Update app.py
Browse files
app.py
CHANGED
@@ -252,21 +252,11 @@ import torch
|
|
252 |
import nltk
|
253 |
nltk.download('punkt')
|
254 |
|
255 |
-
#from google.colab import drive #commented
|
256 |
-
#drive.mount("/content/drive") #commented
|
257 |
-
|
258 |
-
# Commented out IPython magic to ensure Python compatibility.
|
259 |
-
# %cd "/content/drive/My Drive/Colab Notebooks/NLP/ChatBot/"
|
260 |
-
# !ls
|
261 |
-
|
262 |
import random
|
263 |
import json
|
264 |
|
265 |
import torch
|
266 |
|
267 |
-
#from model import NeuralNet
|
268 |
-
#from nltk_utils import bag_of_words, tokenize
|
269 |
-
|
270 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
271 |
|
272 |
with open('intents.json', 'r') as json_data:
|
@@ -299,13 +289,9 @@ from transformers import MBartForConditionalGeneration, MBart50Tokenizer
|
|
299 |
|
300 |
|
301 |
################################
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
tokenizer = MBart50Tokenizer.from_pretrained(model_name)
|
306 |
-
return model, tokenizer
|
307 |
-
|
308 |
-
model, tokenizer = download_model()
|
309 |
|
310 |
def get_response(input_text):
|
311 |
model_inputs = tokenizer(input_text, return_tensors="pt")
|
@@ -348,12 +334,6 @@ def get_response(input_text):
|
|
348 |
else:
|
349 |
return "I do not understand..."
|
350 |
|
351 |
-
|
352 |
-
|
353 |
-
#def get_chatbot(sentence):
|
354 |
-
|
355 |
-
#return classifier(sentence)
|
356 |
-
|
357 |
title = "WeASK: ChatBOT"
|
358 |
description = "Ask your query here"
|
359 |
chatbot_demo = gr.Interface(fn=get_response, inputs = 'text',outputs='text',title = title, description = description)
|
|
|
252 |
import nltk
|
253 |
nltk.download('punkt')
|
254 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
255 |
import random
|
256 |
import json
|
257 |
|
258 |
import torch
|
259 |
|
|
|
|
|
|
|
260 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
261 |
|
262 |
with open('intents.json', 'r') as json_data:
|
|
|
289 |
|
290 |
|
291 |
################################
|
292 |
+
model_name = "facebook/mbart-large-50-many-to-many-mmt"
|
293 |
+
model = MBartForConditionalGeneration.from_pretrained(model_name)
|
294 |
+
tokenizer = MBart50Tokenizer.from_pretrained(model_name)
|
|
|
|
|
|
|
|
|
295 |
|
296 |
def get_response(input_text):
|
297 |
model_inputs = tokenizer(input_text, return_tensors="pt")
|
|
|
334 |
else:
|
335 |
return "I do not understand..."
|
336 |
|
|
|
|
|
|
|
|
|
|
|
|
|
337 |
title = "WeASK: ChatBOT"
|
338 |
description = "Ask your query here"
|
339 |
chatbot_demo = gr.Interface(fn=get_response, inputs = 'text',outputs='text',title = title, description = description)
|