dorogan commited on
Commit
676808b
1 Parent(s): 542774a

Update: hf cache folder was manually provided

Browse files
Files changed (1) hide show
  1. model.py +6 -2
model.py CHANGED
@@ -1,11 +1,15 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
 
 
 
 
3
 
4
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
5
 
6
  model_id = "CohereForAI/c4ai-command-r-v01-4bit"
7
- tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForCausalLM.from_pretrained(model_id).to(device)
9
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
10
 
11
 
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import torch
3
+ import os
4
+
5
+ hf_cache_folder = './huggingface_cache/'
6
+ os.makedirs(hf_cache_folder, exist_ok=True)
7
 
8
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
 
10
  model_id = "CohereForAI/c4ai-command-r-v01-4bit"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=hf_cache_folder)
12
+ model = AutoModelForCausalLM.from_pretrained(model_id, cache_dir=hf_cache_folder).to(device)
13
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
14
 
15