LinkangZhan commited on
Commit
b2dcf43
1 Parent(s): 15fa80b
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -4,13 +4,13 @@ from transformers_stream_generator.main import NewGenerationMixin, StreamGenerat
4
  import gradio as gr
5
  import torch
6
 
7
- config = PeftConfig.from_pretrained("Junity/Genshin-World-Model")
8
- model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan-13B-Base")
9
- model = PeftModel.from_pretrained(model, "Junity/Genshin-World-Model")
10
- tokenizer = AutoTokenizer.from_pretrained("Junity/Genshin-World-Model")
11
 
12
  history = []
13
- device = "cuda" if torch.cuda.is_available() else "cpu"
14
 
15
 
16
  def respond(role_name, msg, chatbot, character):
 
4
  import gradio as gr
5
  import torch
6
 
7
+ config = PeftConfig.from_pretrained("Junity/Genshin-World-Model", trust_remote_code=True)
8
+ model = AutoModelForCausalLM.from_pretrained("../Baichuan/models--baichuan-inc--Baichuan-13B-Base\snapshots\Baichuan-13B-Base", trust_remote_code=True)
9
+ model = PeftModel.from_pretrained(model, r"../Baichuan/r64alpha32dropout0.5loss0.007/checkpoint-5000", trust_remote_code=True)
10
+ tokenizer = AutoTokenizer.from_pretrained("Junity/Genshin-World-Model", trust_remote_code=True)
11
 
12
  history = []
13
+ device = "cpu"
14
 
15
 
16
  def respond(role_name, msg, chatbot, character):