codelion commited on
Commit
70674a9
1 Parent(s): bf59298

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -12,12 +12,12 @@ DEFAULT_MAX_NEW_TOKENS = 512
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
- # Chat with Patched Mixture of Experts (MoE) Model
16
  """
17
 
18
  LICENSE = """\
19
  ---
20
- This space is powered by the patched-mix-4x7B model, which was created by [patched](https://patched.codes).
21
  """
22
 
23
  if not torch.cuda.is_available():
@@ -25,7 +25,7 @@ if not torch.cuda.is_available():
25
 
26
 
27
  if torch.cuda.is_available():
28
- model_id = "Qwen/CodeQwen1.5-7B-Chat"
29
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
30
  tokenizer = AutoTokenizer.from_pretrained(model_id)
31
  tokenizer.padding_side = 'right'
 
12
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
13
 
14
  DESCRIPTION = """\
15
+ # Chat with Patched Coder
16
  """
17
 
18
  LICENSE = """\
19
  ---
20
+ This space is powered by the patched-coder-7b model, which was created by [patched](https://patched.codes).
21
  """
22
 
23
  if not torch.cuda.is_available():
 
25
 
26
 
27
  if torch.cuda.is_available():
28
+ model_id = "patched-codes/patched-coder-7b"
29
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
30
  tokenizer = AutoTokenizer.from_pretrained(model_id)
31
  tokenizer.padding_side = 'right'