iamkhadke commited on
Commit
e966be2
β€’
1 Parent(s): 4dc99bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -15,7 +15,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
15
 
16
  quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
17
  tok = AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map="auto", load_in_8bit=True, torch_dtype=torch.float16 )
18
- m = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map= "auto", quantization_config=quantization_config)
 
19
  generator = pipeline('text-generation', model=m, tokenizer=tok, device=0)
20
  print(f"Sucessfully loaded the model to the memory")
21
 
 
15
 
16
  quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
17
  tok = AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map="auto", load_in_8bit=True, torch_dtype=torch.float16 )
18
+ m = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-tuned-alpha-3b", device_map= "auto", quantization_config=quantization_config,
19
+ offload_folder="./")
20
  generator = pipeline('text-generation', model=m, tokenizer=tok, device=0)
21
  print(f"Sucessfully loaded the model to the memory")
22