|
|
|
"""app.ipynb |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1Z_cMyllUfHf2lYtUtdS1ggVMpLCLg0-j |
|
""" |
|
import gradio as gr |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import numpy as np |
|
import nltk |
|
nltk.download('punkt') |
|
from nltk.stem.porter import PorterStemmer |
|
stemmer = PorterStemmer() |
|
|
|
def tokenize(sentence): |
|
""" |
|
split sentence into array of words/tokens |
|
a token can be a word or punctuation character, or number |
|
""" |
|
return nltk.word_tokenize(sentence) |
|
|
|
|
|
|
|
def stem(word): |
|
""" |
|
stemming = find the root form of the word |
|
examples: |
|
words = ["organize", "organizes", "organizing"] |
|
words = [stem(w) for w in words] |
|
-> ["organ", "organ", "organ"] |
|
""" |
|
return stemmer.stem(word.lower()) |
|
|
|
|
|
|
|
def bag_of_words(tokenized_sentence, words): |
|
""" |
|
return bag of words array: |
|
1 for each known word that exists in the sentence, 0 otherwise |
|
example: |
|
sentence = ["hello", "how", "are", "you"] |
|
words = ["hi", "hello", "I", "you", "bye", "thank", "cool"] |
|
bog = [ 0 , 1 , 0 , 1 , 0 , 0 , 0] |
|
""" |
|
|
|
sentence_words = [stem(word) for word in tokenized_sentence] |
|
|
|
bag = np.zeros(len(words), dtype=np.float32) |
|
for idx, w in enumerate(words): |
|
if w in sentence_words: |
|
bag[idx] = 1 |
|
|
|
return bag |
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
|
|
|
|
class NeuralNet(nn.Module): |
|
def __init__(self, input_size, hidden_size, num_classes): |
|
super(NeuralNet, self).__init__() |
|
self.l1 = nn.Linear(input_size, hidden_size) |
|
self.l2 = nn.Linear(hidden_size, hidden_size) |
|
self.l3 = nn.Linear(hidden_size, num_classes) |
|
self.relu = nn.ReLU() |
|
|
|
def forward(self, x): |
|
out = self.l1(x) |
|
out = self.relu(out) |
|
out = self.l2(out) |
|
out = self.relu(out) |
|
out = self.l3(out) |
|
|
|
return out |
|
|
|
|
|
import numpy as np |
|
import random |
|
import json |
|
|
|
import torch |
|
import torch.nn as nn |
|
from torch.utils.data import Dataset, DataLoader |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
path = [['intents.json']] |
|
with open(path, 'r') as f: |
|
intents = json.load(f) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import nltk |
|
nltk.download('punkt') |
|
|
|
from nltk_utils import bag_of_words, tokenize, stem |
|
|
|
all_words = [] |
|
tags = [] |
|
xy = [] |
|
|
|
for intent in intents['intents']: |
|
tag = intent['tag'] |
|
|
|
tags.append(tag) |
|
for pattern in intent['patterns']: |
|
|
|
w = tokenize(pattern) |
|
|
|
all_words.extend(w) |
|
|
|
xy.append((w, tag)) |
|
|
|
|
|
|
|
ignore_words = ['(',')','-',':',',',"'s",'!',':',"'","''",'--','.',':','?',';''[',']','``','o','β','β','β','β','[',';'] |
|
all_words = [stem(w) for w in all_words if w not in ignore_words] |
|
|
|
all_words = sorted(set(all_words)) |
|
tags = sorted(set(tags)) |
|
|
|
print(len(xy), "patterns") |
|
print(len(tags), "tags:", tags) |
|
print(len(all_words), "unique stemmed words:", all_words) |
|
|
|
|
|
X_train = [] |
|
y_train = [] |
|
for (pattern_sentence, tag) in xy: |
|
|
|
bag = bag_of_words(pattern_sentence, all_words) |
|
X_train.append(bag) |
|
|
|
label = tags.index(tag) |
|
y_train.append(label) |
|
|
|
X_train = np.array(X_train) |
|
y_train = np.array(y_train) |
|
|
|
|
|
num_epochs = 1000 |
|
batch_size = 8 |
|
learning_rate = 0.001 |
|
input_size = len(X_train[0]) |
|
hidden_size = 8 |
|
output_size = len(tags) |
|
print(input_size, output_size) |
|
|
|
class ChatDataset(Dataset): |
|
|
|
def __init__(self): |
|
self.n_samples = len(X_train) |
|
self.x_data = X_train |
|
self.y_data = y_train |
|
|
|
|
|
def __getitem__(self, index): |
|
return self.x_data[index], self.y_data[index] |
|
|
|
|
|
def __len__(self): |
|
return self.n_samples |
|
|
|
import torch |
|
import torch.nn as nn |
|
|
|
from model import NeuralNet |
|
|
|
dataset = ChatDataset() |
|
train_loader = DataLoader(dataset=dataset,batch_size=batch_size,shuffle=True,num_workers=2) |
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
model = NeuralNet(input_size, hidden_size, output_size).to(device) |
|
|
|
criterion = nn.CrossEntropyLoss() |
|
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) |
|
|
|
|
|
for epoch in range(num_epochs): |
|
for (words, labels) in train_loader: |
|
words = words.to(device) |
|
labels = labels.to(dtype=torch.long).to(device) |
|
|
|
|
|
outputs = model(words) |
|
|
|
|
|
loss = criterion(outputs, labels) |
|
|
|
|
|
optimizer.zero_grad() |
|
loss.backward() |
|
optimizer.step() |
|
|
|
if (epoch+1) % 100 == 0: |
|
print (f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}') |
|
|
|
|
|
print(f'final loss: {loss.item():.4f}') |
|
|
|
data = { |
|
"model_state": model.state_dict(), |
|
"input_size": input_size, |
|
"hidden_size": hidden_size, |
|
"output_size": output_size, |
|
"all_words": all_words, |
|
"tags": tags |
|
} |
|
|
|
FILE = "data.pth" |
|
torch.save(data, FILE) |
|
|
|
print(f'training complete. file saved to {FILE}') |
|
|
|
|
|
|
|
|
|
import random |
|
import string |
|
|
|
import warnings |
|
warnings.filterwarnings('ignore') |
|
|
|
import torch |
|
|
|
import nltk |
|
nltk.download('punkt') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import random |
|
import json |
|
|
|
import torch |
|
|
|
from model import NeuralNet |
|
from nltk_utils import bag_of_words, tokenize |
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
with open('intents.json', 'r') as json_data: |
|
intents = json.load(json_data) |
|
|
|
FILE = "data.pth" |
|
data = torch.load(FILE, map_location=torch.device('cpu')) |
|
|
|
input_size = data["input_size"] |
|
hidden_size = data["hidden_size"] |
|
output_size = data["output_size"] |
|
all_words = data['all_words'] |
|
tags = data['tags'] |
|
model_state = data["model_state"] |
|
|
|
model = NeuralNet(input_size, hidden_size, output_size).to(device) |
|
model.load_state_dict(model_state) |
|
model.eval() |
|
|
|
bot_name = "Sam" |
|
|
|
|
|
|
|
def get_response(msg): |
|
sentence = tokenize(msg) |
|
X = bag_of_words(sentence, all_words) |
|
X = X.reshape(1, X.shape[0]) |
|
X = torch.from_numpy(X).to(device) |
|
|
|
output = model(X) |
|
_, predicted = torch.max(output, dim=1) |
|
|
|
tag = tags[predicted.item()] |
|
|
|
probs = torch.softmax(output, dim=1) |
|
prob = probs[0][predicted.item()] |
|
if prob.item() > 0.75: |
|
for intent in intents['intents']: |
|
if tag == intent["tag"]: |
|
return random.choice(intent['responses']) |
|
|
|
return "I do not understand..." |
|
|
|
print("Let's chat! (type 'quit' to exit)") |
|
while True: |
|
|
|
sentence = input("You: ") |
|
if sentence == "quit": |
|
break |
|
|
|
sentence = tokenize(sentence) |
|
X = bag_of_words(sentence, all_words) |
|
X = X.reshape(1, X.shape[0]) |
|
X = torch.from_numpy(X).to(device) |
|
|
|
output = model(X) |
|
_, predicted = torch.max(output, dim=1) |
|
|
|
tag = tags[predicted.item()] |
|
|
|
probs = torch.softmax(output, dim=1) |
|
prob = probs[0][predicted.item()] |
|
if prob.item() > 0.75: |
|
for intent in intents['intents']: |
|
if tag == intent["tag"]: |
|
print(f"{bot_name}: {random.choice(intent['responses'])}") |
|
else: |
|
print(f"{bot_name}: I do not understand...") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
title = "ChatBOT" |
|
|
|
chatbot_demo = gr.Interface(fn=get_response, inputs = 'text',outputs='text',title = title,description = 'Chat BOT') |
|
chatbot_demo .launch() |