XufengDuan commited on
Commit
e1b4714
•
1 Parent(s): 9da8cd9

updated scripts

Browse files
Files changed (3) hide show
  1. .DS_Store +0 -0
  2. src/.DS_Store +0 -0
  3. src/backend/model_operations.py +9 -9
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
src/.DS_Store CHANGED
Binary files a/src/.DS_Store and b/src/.DS_Store differ
 
src/backend/model_operations.py CHANGED
@@ -299,8 +299,8 @@ class SummaryGenerator:
299
  payload = {
300
  "model": self.model_id,
301
  # "max_tokens": 4096,
302
- 'max_new_tokens': 250,
303
- "temperature": 0.0,
304
  # 'repetition_penalty': 1.1 if 'mixtral' in self.model_id.lower() else 1
305
  }
306
  # if 'mixtral' in self.model_id.lower():
@@ -343,8 +343,8 @@ class SummaryGenerator:
343
  model=self.model_id.replace('openai/',''),
344
  messages=[{"role": "system", "content": system_prompt},
345
  {"role": "user", "content": user_prompt}],
346
- temperature=0.0,
347
- max_tokens=250,
348
  )
349
  result = response['choices'][0]['message']['content']
350
  # print()
@@ -358,7 +358,7 @@ class SummaryGenerator:
358
  "temperature": 0,
359
  "top_p": 0.95, # cannot change
360
  "top_k": 0,
361
- "max_output_tokens": 250,
362
  # "response_mime_type": "application/json",
363
  }
364
  safety_settings = [
@@ -411,7 +411,7 @@ class SummaryGenerator:
411
  # mistralai/Mistral-7B-Instruct-v0.1",
412
  messages=[{"role": "system", "content": system_prompt},
413
  {"role": "user", "content": user_prompt}],
414
- temperature=0.0,
415
  max_tokens=1024,
416
  api_base="https://api-inference.huggingface.co/models/" + self.model_id)
417
  print("模型返回结果",response)
@@ -441,9 +441,9 @@ class SummaryGenerator:
441
  )
442
 
443
  generation_args = {
444
- "max_new_tokens": 250,
445
  "return_full_text": False,
446
- "temperature": 0.0,
447
  "do_sample": False,
448
  }
449
 
@@ -455,7 +455,7 @@ class SummaryGenerator:
455
  print(prompt)
456
  input_ids = self.tokenizer(prompt, return_tensors="pt").to('cuda')
457
  with torch.no_grad():
458
- outputs = self.local_model.generate(**input_ids, max_new_tokens=250, do_sample=True, temperature=0.01, pad_token_id=self.tokenizer.eos_token_id)
459
  result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
460
  result = result.replace(prompt[0], '')
461
  print(result)
 
299
  payload = {
300
  "model": self.model_id,
301
  # "max_tokens": 4096,
302
+ 'max_new_tokens': 50,
303
+ # "temperature": 0.0,
304
  # 'repetition_penalty': 1.1 if 'mixtral' in self.model_id.lower() else 1
305
  }
306
  # if 'mixtral' in self.model_id.lower():
 
343
  model=self.model_id.replace('openai/',''),
344
  messages=[{"role": "system", "content": system_prompt},
345
  {"role": "user", "content": user_prompt}],
346
+ # temperature=0.0,
347
+ max_tokens=50,
348
  )
349
  result = response['choices'][0]['message']['content']
350
  # print()
 
358
  "temperature": 0,
359
  "top_p": 0.95, # cannot change
360
  "top_k": 0,
361
+ "max_output_tokens": 50,
362
  # "response_mime_type": "application/json",
363
  }
364
  safety_settings = [
 
411
  # mistralai/Mistral-7B-Instruct-v0.1",
412
  messages=[{"role": "system", "content": system_prompt},
413
  {"role": "user", "content": user_prompt}],
414
+ #temperature=0.0,
415
  max_tokens=1024,
416
  api_base="https://api-inference.huggingface.co/models/" + self.model_id)
417
  print("模型返回结果",response)
 
441
  )
442
 
443
  generation_args = {
444
+ "max_new_tokens": 50,
445
  "return_full_text": False,
446
+ #"temperature": 0.0,
447
  "do_sample": False,
448
  }
449
 
 
455
  print(prompt)
456
  input_ids = self.tokenizer(prompt, return_tensors="pt").to('cuda')
457
  with torch.no_grad():
458
+ outputs = self.local_model.generate(**input_ids, max_new_tokens=50, do_sample=True, pad_token_id=self.tokenizer.eos_token_id)
459
  result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
460
  result = result.replace(prompt[0], '')
461
  print(result)