forked from visayang2005/seq2seq_chatterbot
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpreprocessing.py
81 lines (73 loc) · 2.94 KB
/
preprocessing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import jieba
import re
class preprocessing():
__PAD__ = 0
__GO__ = 1
__EOS__ = 2
__UNK__ = 3
vocab = ['__PAD__', '__GO__', '__EOS__', '__UNK__']
def __init__(self):
self.encoderFile = "./question.txt"
self.decoderFile = './answer.txt'
self.dictFile = 'word_dict.txt'
# 加载用户自定义分词词典
jieba.load_userdict(self.dictFile)
# 获取停用词词典
self.stopwordsFile = "./preprocessing/stopwords.dat"
def wordToVocabulary(self, originFile, vocabFile, segementFile):
# stopwords = [i.strip() for i in open(self.stopwordsFile).readlines()]
# print(stopwords)
# exit()
vocabulary = []
sege = open(segementFile, "w")
with open(originFile, 'rb') as en:
for sent in en.readlines():
# 去标点
if "enc" in segementFile:
#sentence = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。“”’‘??、~@#¥%……&*()]+", "", sent.strip())
sentence = sent.strip()
words = jieba.lcut(sentence)
print(words)
else:
words = jieba.lcut(sent.strip())
vocabulary.extend(words)
for word in words:
sege.write(word+" ")
sege.write("\n")
sege.close()
# 去重并存入词典
vocab_file = open(vocabFile, "w")
_vocabulary = list(set(vocabulary))
_vocabulary.sort(key=vocabulary.index)
_vocabulary = self.vocab + _vocabulary
for index, word in enumerate(_vocabulary):
vocab_file.write(word+"\n")
vocab_file.close()
def toVec(self, segementFile, vocabFile, doneFile):
word_dicts = {}
vec = []
with open(vocabFile, "r") as dict_f:
for index, word in enumerate(dict_f.readlines()):
word_dicts[word.strip()] = index
f = open(doneFile, "w")
with open(segementFile, "r") as sege_f:
for sent in sege_f.readlines():
sents = [i.strip() for i in sent.split(" ")[:-1]]
vec.extend(sents)
for word in sents:
f.write(str(word_dicts.get(word))+" ")
f.write("\n")
f.close()
def main(self):
# 获得字典
self.wordToVocabulary(self.encoderFile, './preprocessing/enc.vocab', './preprocessing/enc.segement')
self.wordToVocabulary(self.decoderFile, './preprocessing/dec.vocab', './preprocessing/dec.segement')
# 转向量
self.toVec("./preprocessing/enc.segement",
"./preprocessing/enc.vocab",
"./preprocessing/enc.vec")
self.toVec("./preprocessing/dec.segement",
"./preprocessing/dec.vocab",
"./preprocessing/dec.vec")
pre = preprocessing()
pre.main()