Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,12 +12,12 @@ DEFAULT_MAX_NEW_TOKENS = 512
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
-
# Chat with Patched
|
16 |
"""
|
17 |
|
18 |
LICENSE = """\
|
19 |
---
|
20 |
-
This space is powered by the patched-
|
21 |
"""
|
22 |
|
23 |
if not torch.cuda.is_available():
|
@@ -25,7 +25,7 @@ if not torch.cuda.is_available():
|
|
25 |
|
26 |
|
27 |
if torch.cuda.is_available():
|
28 |
-
model_id = "
|
29 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
30 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
31 |
tokenizer.padding_side = 'right'
|
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
+
# Chat with Patched Coder
|
16 |
"""
|
17 |
|
18 |
LICENSE = """\
|
19 |
---
|
20 |
+
This space is powered by the patched-coder-7b model, which was created by [patched](https://patched.codes).
|
21 |
"""
|
22 |
|
23 |
if not torch.cuda.is_available():
|
|
|
25 |
|
26 |
|
27 |
if torch.cuda.is_available():
|
28 |
+
model_id = "patched-codes/patched-coder-7b"
|
29 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
30 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
31 |
tokenizer.padding_side = 'right'
|