-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprocess.py
72 lines (65 loc) · 2.33 KB
/
process.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import json
import random
import nltk
import string
import numpy as np
import pickle
import tensorflow as tf
from nltk.stem import WordNetLemmatizer
from tensorflow import keras
from tensorflow.keras.preprocessing.sequence import pad_sequences
global responses, lemmatizer, tokenizer, le, model, input_shape
input_shape = 15
# import dataset answer
def load_response():
global responses
responses = {}
with open('dataset/intents.json') as content:
data = json.load(content)
for intent in data['intents']:
responses[intent['tag']]=intent['responses']
# import model dan download nltk file
def preparation():
load_response()
global lemmatizer, tokenizer, le, model
tokenizer = pickle.load(open('model/tokenizers.pkl', 'rb'))
le = pickle.load(open('model/labelencoder.pkl', 'rb'))
model = keras.models.load_model('model/chat_model.h5')
lemmatizer = WordNetLemmatizer()
nltk.download('punkt', quiet=True)
nltk.download('wordnet', quiet=True)
nltk.download('omw-1.4', quiet=True)
# hapus tanda baca
def remove_punctuation(text):
texts_p = []
text = [letters.lower() for letters in text if letters not in string.punctuation]
text = ''.join(text)
texts_p.append(text)
return texts_p
# mengubah text menjadi vector
def vectorization(texts_p):
tokenizer = pickle.load(open('model/tokenizers.pkl', 'rb'))
vector = tokenizer.texts_to_sequences(texts_p)
vector = np.array(vector).reshape(-1)
vector = pad_sequences([vector], input_shape)
return vector
# klasifikasi pertanyaan user
def predict(vector):
model = keras.models.load_model('model/chat_model.h5')
le = pickle.load(open('model/labelencoder.pkl', 'rb'))
output = model.predict(vector)
output = output.argmax()
response_tag = le.inverse_transform([output])[0]
return response_tag
# menghasilkan jawaban berdasarkan pertanyaan user
def generate_response(text):
responses = {}
with open('dataset/intents.json') as content:
data = json.load(content)
for intent in data['intents']:
responses[intent['tag']]=intent['responses']
texts_p = remove_punctuation(text)
vector = vectorization(texts_p)
response_tag = predict(vector)
answer = random.choice(responses[response_tag])
return answer