-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchatbot_model_intents.py
94 lines (73 loc) · 2.67 KB
/
chatbot_model_intents.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# -*- coding: utf-8 -*-
"""Chatbot_Model_Intents.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1M7K6X-b88PRIozWBHBc7hvr9ZLT48lPV
"""
import random
import json
import pickle
import numpy as np
import nltk
from nltk.stem import WordNetLemmatizer
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
# Cargar los intents desde el archivo json
with open('intents.json') as file:
intents = json.load(file)
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
words = []
classes = []
documents = []
ignore_letters = ['?', '!', '¿', '.', ',']
nltk.download('punkt_tab')
# Procesar cada intent y sus patrones
for intent in intents['intents']:
for pattern in intent['patterns']:
word_list = nltk.word_tokenize(pattern)
words.extend(word_list)
documents.append((word_list, intent["tag"]))
if intent["tag"] not in classes:
classes.append(intent["tag"])
lemmatizer = WordNetLemmatizer()
words = [lemmatizer.lemmatize(word.lower()) for word in words if word not in ignore_letters]
words = sorted(set(words))
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))
# Preparar datos de entrenamiento
training = []
output_empty = [0] * len(classes)
for document in documents:
bag = []
word_patterns = document[0]
word_patterns = [lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[classes.index(document[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
train_x = np.array([element[0] for element in training])
train_y = np.array([element[1] for element in training])
# Modelo con 300 epochs y Early Stopping
model = Sequential()
model.add(Input(shape=(len(train_x[0]),)))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
# Configuración del optimizador
adam = Adam(learning_rate=0.001)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# Early stopping para evitar el sobreentrenamiento
#early_stopping = EarlyStopping(monitor='accuracy', patience=10, restore_best_weights=True)
# Entrenar el modelo
model.fit(train_x, train_y, epochs=300, batch_size=16, verbose=1)
model.save("chatbot_model.keras")