research14 commited on
Commit
3036c83
1 Parent(s): ef7e19b

remove exception

Browse files
Files changed (1) hide show
  1. app.py +9 -13
app.py CHANGED
@@ -59,20 +59,16 @@ def vicuna_respond(tab_name, message, chat_history):
59
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
60
  print('Vicuna - Prompt + Context:')
61
  print(formatted_prompt)
62
- try:
63
- input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
64
- output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=100, num_beams=5, no_repeat_ngram_size=2)
65
- bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
66
- # Remove formatted prompt from bot_message
67
- bot_message = bot_message.replace(formatted_prompt, '')
68
- print(bot_message)
69
 
70
- chat_history.append((formatted_prompt, bot_message))
71
- time.sleep(2)
72
- return tab_name, "", chat_history
73
- except Exception as e:
74
- print(f"Error in vicuna_respond: {str(e)}")
75
- return tab_name, "", chat_history
76
 
77
  def llama_respond(tab_name, message, chat_history):
78
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
 
59
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
60
  print('Vicuna - Prompt + Context:')
61
  print(formatted_prompt)
62
+ input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
63
+ output_ids = vicuna_model.generate(input_ids, do_sample=True, max_length=100, num_beams=5, no_repeat_ngram_size=2)
64
+ bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
65
+ # Remove formatted prompt from bot_message
66
+ bot_message = bot_message.replace(formatted_prompt, '')
67
+ print(bot_message)
 
68
 
69
+ chat_history.append((formatted_prompt, bot_message))
70
+ time.sleep(2)
71
+ return tab_name, "", chat_history
 
 
 
72
 
73
  def llama_respond(tab_name, message, chat_history):
74
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''