SpiketheCowboy commited on
Commit
9869274
1 Parent(s): f5e9f18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -57,6 +57,9 @@ def apply_delta(base_model_path, target_model_path, delta_path):
57
  base.load_state_dict(target_weights)
58
  # base.save_pretrained(target_model_path)
59
  # delta_tokenizer.save_pretrained(target_model_path)
 
 
 
60
  return base, delta_tokenizer
61
 
62
 
@@ -64,6 +67,7 @@ base_weights = 'decapoda-research/llama-7b-hf'
64
  target_weights = 'expertllama' # local path
65
  delta_weights = 'OFA-Sys/expertllama-7b-delta'
66
  model, tokenizer = apply_delta(base_weights, target_weights, delta_weights)
 
67
 
68
  # tokenizer = transformers.LlamaTokenizer.from_pretrained(expertllama_path)
69
  # model = transformers.LlamaForCausalLM.from_pretrained(expertllama_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
 
57
  base.load_state_dict(target_weights)
58
  # base.save_pretrained(target_model_path)
59
  # delta_tokenizer.save_pretrained(target_model_path)
60
+
61
+ delta = None
62
+
63
  return base, delta_tokenizer
64
 
65
 
 
67
  target_weights = 'expertllama' # local path
68
  delta_weights = 'OFA-Sys/expertllama-7b-delta'
69
  model, tokenizer = apply_delta(base_weights, target_weights, delta_weights)
70
+ model = model.to(torch.float)
71
 
72
  # tokenizer = transformers.LlamaTokenizer.from_pretrained(expertllama_path)
73
  # model = transformers.LlamaForCausalLM.from_pretrained(expertllama_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)