This repository has been archived by the owner on Feb 11, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
101 lines (78 loc) · 2.95 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
# Color cmd output
from colorama import init
init()
from termcolor import colored
# Ignore all warnings
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=RuntimeWarning)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
import h5py
# Dependencies
from flask import Flask, request, jsonify
import numpy as np
import traceback
import pickle
from keras.models import model_from_json
from keras.preprocessing.sequence import pad_sequences
import tensorflow as tf
# Change logging level of TensorFlow
tf.logging.set_verbosity(tf.logging.ERROR)
MODEL_PATH = 'model/'
model_name = 'simple'
model_file = f'{model_name}.model.json'
weights_file = f'{model_name}.weights.h5'
def load_json_wih_weights_to_model():
json_file = open(f'{MODEL_PATH}{model_name}/{model_file}', 'r')
loaded_model_json = json_file.read()
json_file.close()
global loaded_model
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(f'{MODEL_PATH}{model_name}/{weights_file}')
print(colored('[INFO] Loaded \'%s\' model with weights from disk' % model_name, 'yellow'))
global graph
graph = tf.get_default_graph()
return loaded_model
with open(f'{MODEL_PATH}tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
print(colored('[INFO] Loaded Tokenizer from disk', 'yellow'))
classes_names = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
def prepare_comment(comment):
maxlen = 180
tokenized_comment = tokenizer.texts_to_sequences(comment)
return pad_sequences(tokenized_comment, maxlen=maxlen)
app = Flask(__name__)
@app.route("/")
def hello():
return "Welcome to Detox API!"
@app.route('/detox/api/comment', methods=['POST'])
def predict():
try:
comment = request.json['comment']
print(colored('[INFO] Received comment \'%s\'' % comment, 'yellow'))
# Tokenize the user's query and make a prediction
with graph.as_default():
prediction = loaded_model.predict([prepare_comment([comment])], batch_size=1024)
output = {}
i = 0
is_toxic = False
# Assign scores to classes
for label in classes_names:
result = prediction[0][i]
if result > 0.5:
is_toxic = True
output[label] = '{0:.2f}'.format(prediction[0][i])
i += 1
print(colored('Results:', 'cyan'))
print(colored(output, 'cyan'))
if is_toxic:
print(colored('This comment seems to be toxic!', 'magenta'))
else:
print(colored('This comment seems to be harmless!', 'green'))
return jsonify(output)
except:
return jsonify({'trace': traceback.format_exc()})
if __name__ == '__main__':
loaded_model = load_json_wih_weights_to_model()
app.run(debug=True, port=8585)