This is the test file to the idea prove.
Try to do the Json formatted corpus, but it is so hard, then I find the word2vec can avoid this hard work.
In [1]:
    
# -*- coding: utf-8 -*-
import os
import re
import time
import codecs
import argparse
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
BASE_FOLDER = "C:/Users/sethf/source/repos/chinesepoem/" # os.path.abspath(os.path.dirname(__file__))
DATA_FOLDER = os.path.join(BASE_FOLDER, 'data')
DEFAULT_FIN = os.path.join(DATA_FOLDER, '唐诗语料库.txt')
DEFAULT_FOUT = os.path.join(DATA_FOLDER, 'poem.txt')
reg_noisy = re.compile('[^\u3000-\uffee]')
reg_note = re.compile('((.*))') # Cannot deal with () in seperate lines
# 中文及全角标点符号(字符)是\u3000-\u301e\ufe10-\ufe19\ufe30-\ufe44\ufe50-\ufe6b\uff01-\uffee
    
In [11]:
    
if __name__ == '__main__':
  #  parser = set_arguments()
  #  cmd_args = parser.parse_args()
    print('{} START'.format(time.strftime(TIME_FORMAT)))
    fd = codecs.open(DEFAULT_FIN, 'r', 'utf-8')
    fw = codecs.open( DEFAULT_FOUT, 'w', 'utf-8')
    reg = re.compile('〖(.*)〗')
    start_flag = False
    for line in fd:
        line = line.strip()
        if not line or '《全唐诗》' in line or '<http'  in line or '□' in line:
            continue
        elif '〖' in line and '〗' in line:
            if start_flag:
                fw.write('\n')
            start_flag = True
            g = reg.search(line)
            if g:
                fw.write(g.group(1))
                fw.write('\n')
            else:a
                # noisy data
                print(line)
        else:
            line = reg_noisy.sub('', line)
            line = reg_note.sub('', line)
            line = line.replace(' .', '')
            fw.write(line)
    fd.close()
    fw.close()
    print('{} STOP'.format(time.strftime(TIME_FORMAT)))
    
    
In [17]:
    
print('{} START'.format(time.strftime(TIME_FORMAT)))
import thulac 
DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
fd = codecs.open(DEFAULT_FOUT, 'r', 'utf-8')
fw = codecs.open(DEFAULT_Segment, 'w', 'utf-8')
thu1 = thulac.thulac(seg_only=True)   #只进行分词,不进行词性标注
for line in fd:
    #print(line)
    fw.write(thu1.cut(line, text=True))
    fw.write('\n')
    
fd.close()
fw.close()
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
    
    
In [19]:
    
print('{} START'.format(time.strftime(TIME_FORMAT)))
from gensim.models import word2vec
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
DEFAULT_Word2Vec = os.path.join(DATA_FOLDER, 'Word2Vec150.bin')
sentences = word2vec.Text8Corpus(DEFAULT_Segment)
model = word2vec.Word2Vec(sentences, size=150)
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
model.save(DEFAULT_Word2Vec)
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
    
    
    
    
In [21]:
    
model[u'男']
    
    Out[21]:
In [3]:
    
DEFAULT_FIN = os.path.join(DATA_FOLDER, '唐诗语料库.txt')
DEFAULT_FOUT = os.path.join(DATA_FOLDER, 'poem.txt')
DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
def GetFirstNline(filePath, linesNumber):
    fd = codecs.open(filePath, 'r', 'utf-8')
    for i in range(1,linesNumber):
        print(fd.readline())
    fd.close()
GetFirstNline(DEFAULT_Segment, 3)
GetFirstNline(DEFAULT_FOUT, 3)
    
    
In [10]:
    
print('{} START'.format(time.strftime(TIME_FORMAT)))
DEFAULT_FOUT = os.path.join(DATA_FOLDER, 'poem.txt')
DEFAULT_charSegment = os.path.join(DATA_FOLDER, 'Charactersegment.txt')
fd = codecs.open(DEFAULT_FOUT, 'r', 'utf-8')
fw = codecs.open(DEFAULT_charSegment, 'w', 'utf-8')
start_flag = False
for line in fd:
    if len(line) > 0:
        for c in line:
            if c != '\n':
                fw.write(c)
                fw.write(' ')
    fw.write('\n')
fd.close()
fw.close()
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
    
    
In [11]:
    
GetFirstNline(DEFAULT_charSegment, 3)
    
    
In [10]:
    
print('{} START'.format(time.strftime(TIME_FORMAT)))
from gensim.models import word2vec
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
DEFAULT_Char2Vec = os.path.join(DATA_FOLDER, 'Char2Vec100.bin')
fd = codecs.open(DEFAULT_charSegment, 'r', 'utf-8')
sentences = fd.readlines()
fd.close
model = word2vec.Word2Vec(sentences, size=100)
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
model.save(DEFAULT_Char2Vec)
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
    
    
In [11]:
    
model[u'男']
    
    Out[11]:
In [3]:
    
print('{} START'.format(time.strftime(TIME_FORMAT)))
from gensim.models import word2vec
DEFAULT_charSegment = os.path.join(DATA_FOLDER, 'Charactersegment.txt')
DEFAULT_Char2Vec50 = os.path.join(DATA_FOLDER, 'Char2Vec50.bin')
fd = codecs.open(DEFAULT_charSegment, 'r', 'utf-8')
sentences = fd.readlines()
fd.close
model = word2vec.Word2Vec(sentences, size=50)
#DEFAULT_Segment = os.path.join(DATA_FOLDER, 'wordsegment.txt')
model.save(DEFAULT_Char2Vec50)
print('{} STOP'.format(time.strftime(TIME_FORMAT)))
    
    
In [9]:
    
model.wv.most_similar([u'好'])
    
    Out[9]:
In [ ]: