forked from jinyyy666/AR_Miner
-
Notifications
You must be signed in to change notification settings - Fork 0
/
AR_main.py
executable file
·73 lines (61 loc) · 2.64 KB
/
AR_main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#!/usr/bin/python
#
# Implementation of the AR Miner:
# "AR-Miner: Mining Informative Reviews for Developer from Mobile App MarketPlace"
#
# Authors:
# 1) Shanshan Li
# 2) Yingyezhe Jin
# 3) Tianshu Chu
# 4) Xiao Huang
# python imports
import os, numpy, time
# AR Miner imports
from AR_util import AR_parse, AR_loadReviews, AR_writeReviews, AR_tfIdf
from AR_reviewInstance import Review
from AR_classifier import AR_emnb, AR_svm
from AR_lda import AR_lda
from AR_textrank import AR_textrank
# The main method:
def main():
# 0. Given the application, read the reviews and stem them
datasetName = "swiftkey" # four apps: facebook, templerun2, swiftkey, tapfish
rmStopWords = True # Remove stop words lead to information loss and bad f-score
rmRareWords = True # Remove the word with low frequency
# trainSet/testSet/unlabel: lists of review data
# vocabulary: dictionary len = V and the positional index of each term in the doc vector
# set skParse True to directly read of the data that has been filtered out
skParse = False
if(skParse == False):
# the vocabulary is the words on the entire data set!
trainSet, testSet, unlabelSet, vocabulary = AR_parse(datasetName, rmStopWords, rmRareWords)
# 1. Use the EM-NB or SVM to filter out the informative reviews
# informMat: the informative reviews in X x V sparse matrix from, X: documents size, V: vocabulary size
# informRev: corresponding reviews wrapped as a list of review instances
useSVM = False # SVM is way better than emnb in terms of the testing.
# But it may not filter out the information effectively
start_time = time.clock()
if(skParse == False):
if(useSVM == False):
informRev, informMat = AR_emnb(trainSet, testSet, unlabelSet, vocabulary, datasetName)
else:
informRev, informMat = AR_svm(trainSet, testSet, unlabelSet, vocabulary, datasetName)
print time.clock() - start_time , "seconds"
# write the result back to the file (optional)
# AR_writeReviews(informRev, datasetName)
else:
# directly read from the file
informRev, informMat, vocabulary = AR_loadReviews(datasetName)
print("Number of informative reviews: " + str(len(informRev)))
# 2. Use the LDA to do the grouping based on the topic
# doc_topi : a k*n_topics np matrix, which indicates the probability of each review belongs to one of the topic
# vocab_list: a list of vocabulary words
n_topics = 20
doc_topic, vocab_list = AR_lda(informRev, informMat, vocabulary, n_topics)
# calculate the tf-idf for the similarity measure between reviews
AR_tfIdf(informRev)
# use the text rank to do the instance ranking:
rankedInstance = AR_textrank(doc_topic, informRev)
# call the main
if __name__ == "__main__":
main()