SearchUnify-ML commited on
Commit
2e1ba24
1 Parent(s): c7c5d47

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -8
README.md CHANGED
@@ -33,7 +33,7 @@ pip install tiktoken
33
 
34
  ```
35
 
36
- from transformers import AutoTokenizer, pipeline
37
  from auto_gptq import AutoGPTQForCausalLM
38
 
39
  model_name_or_path = "SearchUnify-ML/xgen-7b-8k-open-instruct-gptq"
@@ -41,18 +41,20 @@ model_basename = "gptq_model-4bit-128g"
41
 
42
  use_triton = False
43
 
44
- tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False, trust_remote_code=True)
 
 
45
 
46
  model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
47
- model_basename=model_basename,
48
- use_safetensors=False,
49
- trust_remote_code=True,
50
- device="cuda:0",
51
- use_triton=use_triton)
52
 
53
  # Note: check the prompt template is correct for this model.
54
  prompt = "Explain the rules of field hockey to a novice."
55
- prompt_template=f'''### Instruction: {prompt}
56
  ### Response:'''
57
 
58
  print("\n\n*** Generate:")
 
33
 
34
  ```
35
 
36
+ from transformers import AutoTokenizer
37
  from auto_gptq import AutoGPTQForCausalLM
38
 
39
  model_name_or_path = "SearchUnify-ML/xgen-7b-8k-open-instruct-gptq"
 
41
 
42
  use_triton = False
43
 
44
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,
45
+ use_fast=False,
46
+ trust_remote_code=True)
47
 
48
  model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
49
+ model_basename=model_basename,
50
+ use_safetensors=False,
51
+ trust_remote_code=True,
52
+ device="cuda:0",
53
+ use_triton=use_triton)
54
 
55
  # Note: check the prompt template is correct for this model.
56
  prompt = "Explain the rules of field hockey to a novice."
57
+ prompt_template = f'''### Instruction: {prompt}
58
  ### Response:'''
59
 
60
  print("\n\n*** Generate:")