-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathproject.py
117 lines (79 loc) · 3.1 KB
/
project.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
#importing necessary libraries
import pandas as pd
import numpy as nm
import matplotlib.pyplot as mp
import nltk.tokenize as nt
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelBinarizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
import seaborn as sb
#Setting url and data
url = "https://raw.githubusercontent.com/meghjoshii/NSDC_DataScienceProjects_SentimentAnalysis/main/IMDB%20Dataset.csv"
data = pd.read_csv(url)
#Getting top 5 rows of the dataframe
data.head()
#Describing data
data.describe()
#Printing reviews
print(data['review'])
#Printing sentiments
sentiment_count = data['sentiment'].value_counts()
print(sentiment_count)
import nltk
nltk.download('punkt')
data['review'] = data['review'].apply(word_tokenize)
data['review'][1]
data['review'] = data['review'].apply(lambda x: [item for item in x if item.isalpha()])
print(" ".join(data['review'][1]))
data['review'] = data['review'].apply(lambda x: [item.lower() for item in x])
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
data['review'] = data['review'].apply(lambda x: [item for item in x if item not in stop_words])
from nltk.stem import PorterStemmer
ps = PorterStemmer()
data['review'] = data['review'].apply(lambda x: [ps.stem(item) for item in x])
data['review'] = data['review'].apply(lambda x: " ".join(x))
#train reviews
train_reviews = data.review[:40000]
#test reviews
test_reviews = data.review[40000:]
# train sentiments
train_sentiment = data.sentiment[:40000]
test_sentiment = data.sentiment[40000:]
#Count vectorizer for bag of words
cv = CountVectorizer(min_df=0, max_df=1, binary = False, ngram_range = (1,3))
#transformed train reviews
cv_train_reviews = cv.fit_transform(train_reviews)
#transformed test reviews
cv_test_reviews = cv.transform(test_reviews)
#labeling the sentient data
lb = LabelBinarizer()
# transformed sentiment data
lb_train_sentiments = lb.fit_transform(train_sentiment)
# transformed test sentiment data (similar to count vectorizer, transform test reviews, name it lb_test_sentiments)
lb_test_binary = lb.fit_transform(test_sentiment)
# training the model
mnb = MultinomialNB()
# fitting the model
mnb_bow = mnb.fit(cv_train_reviews, lb_train_sentiments)
# predicting the model for bag of words
mnb_bow_predict = mnb.predict(cv_test_reviews)
# accuracy score for bag of words
mnb_bow_score = accuracy_score(lb_test_binary, mnb_bow_predict)
print("Accuracy :", mnb_bow_score)
# commented out IPython magic to ensure Python compatibility.
# word cloud for positive review words in the entire dataset
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
# %matplotlib inline
# join all the positive reviews
positive_words = ' '.join(list(data[data['sentiment'] == 'positive']['review']))
# word cloud for positive words
wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=110).generate(positive_words)
plt.figure(figsize=(10, 7))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis('off')
plt.show()