In [2]:
import pandas as pd
import fastparquet
from gensim.models.wrappers import FastText
#import gensim
import matplotlib.pyplot as plt
from nltk.tokenize import sent_tokenize
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
import nltk
nltk.download('punkt')
nltk.download("stopwords")
from nltk.corpus import stopwords
import re
import string
In [3]:
pd.options.mode.chained_assignment = None
In [4]:
# load data from precomputed dataframe
pfile = fastparquet.ParquetFile('5col_DFrame.parq')
df = pfile.to_pandas() # all columns
#df2 = pfile.to_pandas(columns=['floats', 'times']) # pick some columns
In [5]:
df.tail()
Out[5]:
In [7]:
# write data to txt
dftrain = pd.DataFrame()
#dftrain.loc[:, 'data'] = "__" + df.loc[:,'sentiment'] + "__" + " " + df.loc[:,'text_clean1']
dftrain.loc[:, 'data'] = "__label__" + df.loc[:,'sentiment'] + " " + df.loc[:,'text_clean1']
with open('train.txt', 'w') as f:
dftrain.iloc[0:10000].to_csv(f, header=None, index=None, sep='\n', quoting=csv.QUOTE_NONE, quotechar='')
with open('test.txt', 'w') as f:
dftrain.iloc[10000:].to_csv(f, header=None, index=None, sep='\n', quoting=csv.QUOTE_NONE, quotechar='')
In [ ]: