-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathModelPrediction.py
More file actions
110 lines (86 loc) · 3.28 KB
/
ModelPrediction.py
File metadata and controls
110 lines (86 loc) · 3.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
## load the library and files
import nltk
from nltk.stem import WordNetLemmatizer
import json
import pickle
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import random
import os
from keras.models import load_model
## load the model and other files
current_path = os.getcwd()
model = load_model(os.path.join(current_path,"model_data","model.keras"))
intents = json.loads(open(os.path.join(current_path,"training_data","data.json")).read())
words = pickle.load(open(os.path.join(current_path,"model_data","words.pkl"), 'rb'))
classes = pickle.load(open(os.path.join(current_path,"model_data","classes.pkl"), 'rb'))
# print(intents)
# print("words : ",words)
## initialize the lemmatizer which lammatize the out words
lemmatizer = WordNetLemmatizer()
def clean_up_sentence(sentence):
"""
this functin is take the sentence and do preprocessig and return the list of words
"""
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def bow(sentence, words, show_details=True):
"""
this function take the sentence and the words from word file
"""
## preprocess the sentence and taking the list of words
sentence_words = clean_up_sentence(sentence)
## creating the bag of words
bag = [0] * len(words)
## iterating to the list of the words and set 1 if it present in to the word file
for s in sentence_words:
for i, w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print("found in bag: %s" % w)
## return the list of words converted into numerical formate of 1 and 0.
return np.array(bag)
def predict_class(sentence, model):
"""
this function is take sentence and preprocess it and pass it to the model
it predict the intent and return the intent
"""
p = bow(sentence, words, show_details=False)
res = model.predict(np.array([p]))[0]
## set the threshold for prediction intent
ERROR_THRESHOLD = 0.60
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
## returning all the intent with there probability
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
## return the intent list
return return_list
def getResponse(ints, intents_json):
""""
take the intenst and return the random result as predicted output
"""
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if i['tag'] == tag:
result = random.choice(i['responses'])
break
## return the answer
return result
def chatbot_response(msg):
## this is a function which is used by the other file it take the input questions and return the
## genrated output
ints = predict_class(msg, model)
if len(ints) == 0 :
res = "Sorry, i am still in learning face."
# print("ints : ", ints)
else:
res = getResponse(ints, intents)
## return the predicted output
return res