davanstrien HF staff commited on
Commit
fc46fb1
1 Parent(s): 29d980f

chore: Refactor generate_instruction_response function and optimize Gradio demo description

Browse files
Files changed (1) hide show
  1. app.py +15 -11
app.py CHANGED
@@ -28,14 +28,14 @@ with open("model_configs.json", "r") as f:
28
  # Extract instruction
29
  extract_input = model_config["extract_input"]
30
 
 
 
 
 
31
 
32
- @spaces.GPU
33
- def generate_instruction_response():
34
- terminators = [
35
- tokenizer.eos_token_id,
36
- tokenizer.convert_tokens_to_ids("<|eot_id|>"),
37
- ]
38
 
 
 
39
  instruction = pipeline(
40
  extract_input,
41
  max_new_tokens=2048,
@@ -45,13 +45,11 @@ def generate_instruction_response():
45
  top_p=1,
46
  )
47
 
48
- sanitized_instruction = instruction[0]["generated_text"][
49
- len(extract_input) :
50
- ].split("\n")[0]
51
 
52
- response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
53
 
54
- response = pipeline(
 
55
  response_template,
56
  max_new_tokens=2048,
57
  eos_token_id=terminators,
@@ -60,7 +58,13 @@ def generate_instruction_response():
60
  top_p=1,
61
  )
62
 
 
 
 
 
 
63
  user_message = sanitized_instruction
 
64
  assistant_response = response[0]["generated_text"][len(response_template) :]
65
 
66
  return user_message, assistant_response
 
28
  # Extract instruction
29
  extract_input = model_config["extract_input"]
30
 
31
+ terminators = [
32
+ tokenizer.eos_token_id,
33
+ tokenizer.convert_tokens_to_ids("<|eot_id|>"),
34
+ ]
35
 
 
 
 
 
 
 
36
 
37
+ @spaces.GPU
38
+ def generate_instruction():
39
  instruction = pipeline(
40
  extract_input,
41
  max_new_tokens=2048,
 
45
  top_p=1,
46
  )
47
 
48
+ return instruction[0]["generated_text"][len(extract_input) :].split("\n")[0]
 
 
49
 
 
50
 
51
+ def generate_response(response_template):
52
+ return pipeline(
53
  response_template,
54
  max_new_tokens=2048,
55
  eos_token_id=terminators,
 
58
  top_p=1,
59
  )
60
 
61
+
62
+ def generate_instruction_response():
63
+ sanitized_instruction = generate_instruction()
64
+ response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
65
+
66
  user_message = sanitized_instruction
67
+ response = generate_response(response_template)
68
  assistant_response = response[0]["generated_text"][len(response_template) :]
69
 
70
  return user_message, assistant_response