-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
93 lines (73 loc) · 3.58 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.feature_selection import RFECV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import MultinomialNB, BernoulliNB, GaussianNB
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
''' KAGGLE FILES '''
TRAIN_FILE = 'train.csv'
df = pd.read_csv(TRAIN_FILE)
KAGGLE_TEST_FILE = 'test.csv'
kaggle_test_df = pd.read_csv(KAGGLE_TEST_FILE)
''' Renaming Labels '''
df.loc[df['author']=='EAP', 'author'] = 0
df.loc[df['author']=='HPL', 'author'] = 1
df.loc[df['author']=='MWS', 'author'] = 2
''' Extracting Necessary Things '''
X = df['text']
X_kaggle = kaggle_test_df['text']
y = df['author'].astype('int')
''' Transforming Data For Use '''
#### TFIDF Vectorizer Test -- CountVectorization not as good####
tfidf = TfidfVectorizer(stop_words='english')
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .01, random_state=0)
# X_train = tfidf.fit_transform(X_train)
# X_test = tfidf.transform(X_test)
X = tfidf.fit_transform(X)
X_kaggle = tfidf.transform(X_kaggle)
# ''' Grid Search '''
# clf = MultinomialNB(fit_prior=False, alpha=.1)
# clf = MultinomialNB(fit_prior=False, alpha=.01)
# clf = MultinomialNB(fit_prior=True, alpha=.01)
# # clf.fit(X_train, y_train)
# # print("MNB Solo score: {}".format(clf.score(X_test, y_test)))
# alpha_range = [i for i in np.arange(0.000,1.0,.015)]
# prior = [True, False]
# grid_clf = GridSearchCV(clf, {'alpha': alpha_range},scoring='accuracy', n_jobs=1, cv=100)
# grid_clf.fit(X, y)
# ''' Processing Results '''
# mean_scores = [result.mean_validation_score for result in grid_clf.grid_scores_]
# plt.plot(alpha_range, mean_scores)
# plt.xlabel('Alpha')
# plt.ylabel('Mean Score (CV=100)')
# plt.show()
# # print(mean_scores)
# print(grid_clf.best_score_, grid_clf.best_params_)
# ''' One Vs Rest Classification from Grid Search Result '''
# mnb_ovr = OneVsRestClassifier(grid_clf.best_estimator_, n_jobs=1)
# mnb_ovr.fit(X, y)
# kaggle_results = mnb_ovr.predict_proba(X_kaggle)
# print(kaggle_results)
# ids = kaggle_test_df['id']
# predict_data = pd.DataFrame(kaggle_results, columns=['EAP', 'HPL', 'MWS'])
# conc = pd.concat([ids, predict_data], axis=1)
# conc.to_csv('mySubmission.csv', index=False)
''' Multi Layer Perceptron Tests '''
# mlp = MLPClassifier(hidden_layer_sizes=(30,15),activation="logistic",solver='adam',alpha=0.0001,batch_size='auto',learning_rate="constant",learning_rate_init=0.001,\
# power_t=0.5, max_iter=25, shuffle=True, random_state=None, tol=1e-4, verbose=True, warm_start=False, momentum=0.9, nesterovs_momentum=True, \
# early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
mlp = MLPClassifier(hidden_layer_sizes=(50, 30, 15, 15),activation="relu",solver='adam',alpha=0.0001,batch_size='auto',learning_rate="adaptive",learning_rate_init=0.001,\
power_t=0.5, max_iter=4, shuffle=True, random_state=None, tol=1e-4, verbose=True, warm_start=True, momentum=0.9, nesterovs_momentum=True, \
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
mlp.fit(X, y)
kaggle_results = mlp.predict_proba(X_kaggle)
# print(kaggle_results)
print("MLP Score: {}".format(mlp.score(X,y)))
ids = kaggle_test_df['id']
predict_data = pd.DataFrame(kaggle_results, columns=['EAP', 'HPL', 'MWS'])
conc = pd.concat([ids, predict_data], axis=1)