Uglevod7 commited on
Commit
cc5c5c0
1 Parent(s): 225f2ef
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -11,7 +11,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
11
  disable_exllama=True
12
  )
13
 
14
- tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
15
 
16
  prompt = "Tell me about AI"
17
  prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
@@ -26,7 +26,7 @@ prompt_template=f'''Below is an instruction that describes a task. Write a respo
26
  print("\n\n*** Generate:")
27
 
28
  input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
29
- output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)
30
  print(tokenizer.decode(output[0]))
31
 
32
  # Inference can also be done using transformers' pipeline
 
11
  disable_exllama=True
12
  )
13
 
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True,disable_exllama=True)
15
 
16
  prompt = "Tell me about AI"
17
  prompt_template=f'''Below is an instruction that describes a task. Write a response that appropriately completes the request.
 
26
  print("\n\n*** Generate:")
27
 
28
  input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
29
+ output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512,disable_exllama=True)
30
  print(tokenizer.decode(output[0]))
31
 
32
  # Inference can also be done using transformers' pipeline