Update app.py
Browse files
app.py
CHANGED
@@ -310,11 +310,10 @@ model, tokenizer = download_model()
|
|
310 |
def get_response(input_text):
|
311 |
model_inputs = tokenizer(input_text, return_tensors="pt")
|
312 |
generated_tokens = model.generate(**model_inputs,forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
313 |
-
translation
|
314 |
-
|
315 |
-
#
|
316 |
-
|
317 |
-
#print(string2)
|
318 |
|
319 |
|
320 |
|
@@ -330,7 +329,7 @@ def get_response(input_text):
|
|
330 |
#if sentence== "quit":
|
331 |
#break
|
332 |
|
333 |
-
sentence= tokenize(
|
334 |
X = bag_of_words(sentence, all_words)
|
335 |
X = X.reshape(1, X.shape[0])
|
336 |
X = torch.from_numpy(X).to(device)
|
|
|
310 |
def get_response(input_text):
|
311 |
model_inputs = tokenizer(input_text, return_tensors="pt")
|
312 |
generated_tokens = model.generate(**model_inputs,forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
313 |
+
translation= tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
314 |
+
|
315 |
+
#string2=" ".join(map(str,translation ))
|
316 |
+
|
|
|
317 |
|
318 |
|
319 |
|
|
|
329 |
#if sentence== "quit":
|
330 |
#break
|
331 |
|
332 |
+
sentence= tokenize(translation)
|
333 |
X = bag_of_words(sentence, all_words)
|
334 |
X = X.reshape(1, X.shape[0])
|
335 |
X = torch.from_numpy(X).to(device)
|