cryptocalypse commited on
Commit
16c8c95
1 Parent(s): 90365a3

Update gen.py

Browse files
Files changed (1) hide show
  1. gen.py +5 -5
gen.py CHANGED
@@ -3,9 +3,9 @@ import sys
3
  import sys
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
- tokenizer = AutoTokenizer.from_pretrained('stabilityai/stablelm-2-zephyr-1_6b')
7
  model = AutoModelForCausalLM.from_pretrained(
8
- 'stabilityai/stablelm-2-zephyr-1_6b',
9
  device_map="auto"
10
  )
11
 
@@ -153,7 +153,7 @@ prompt = (
153
 
154
  def generate(event):
155
  # Generar el texto usando el modelo
156
- prompt_msg = [{'role':'system','content':prompt},{'role': 'user', 'content': event}]
157
  inputs = tokenizer.apply_chat_template(
158
  prompt_msg,
159
  add_generation_prompt=False,
@@ -162,11 +162,11 @@ def generate(event):
162
 
163
  tokens = model.generate(
164
  inputs.to(model.device),
165
- max_new_tokens=14096,
166
  temperature=0.7,
167
  do_sample=True
168
  )
169
 
170
 
171
  # Imprimir la salida generada
172
- return "{".join(tokenizer.decode(tokens[0], skip_special_tokens=True).split("<|user|>")[1].split("{")[1:-1])
 
3
  import sys
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
 
6
+ tokenizer = AutoTokenizer.from_pretrained('google/gemma-2-2b-it')
7
  model = AutoModelForCausalLM.from_pretrained(
8
+ 'google/gemma-2-2b-it',
9
  device_map="auto"
10
  )
11
 
 
153
 
154
  def generate(event):
155
  # Generar el texto usando el modelo
156
+ prompt_msg = [{'role': 'user', 'content': prompt+"\n\n"+event}]
157
  inputs = tokenizer.apply_chat_template(
158
  prompt_msg,
159
  add_generation_prompt=False,
 
162
 
163
  tokens = model.generate(
164
  inputs.to(model.device),
165
+ max_new_tokens=20096,
166
  temperature=0.7,
167
  do_sample=True
168
  )
169
 
170
 
171
  # Imprimir la salida generada
172
+ return tokenizer.decode(tokens[0], skip_special_tokens=True)