freQuensy23
commited on
Commit
β’
8e47a5a
1
Parent(s):
0ace51c
Update app.py
Browse files
app.py
CHANGED
@@ -12,35 +12,16 @@ DEFAULT_MAX_NEW_TOKENS = 1024
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
-
#
|
16 |
-
|
17 |
-
This Space demonstrates model [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta, a Llama 2 model with 7B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
18 |
-
|
19 |
-
π For more details about the Llama 2 family of models and how to use them with `transformers`, take a look [at our blog post](https://huggingface.co/blog/llama2).
|
20 |
-
|
21 |
-
π¨ Looking for an even more powerful model? Check out the [13B version](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat) or the large [70B model demo](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI).
|
22 |
"""
|
23 |
|
24 |
-
LICENSE = """
|
25 |
-
<p/>
|
26 |
-
|
27 |
-
---
|
28 |
-
As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-2-7b-chat) by Meta,
|
29 |
-
this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
|
30 |
"""
|
31 |
|
32 |
if not torch.cuda.is_available():
|
33 |
-
DESCRIPTION += "
|
34 |
|
35 |
|
36 |
-
if torch.cuda.is_available():
|
37 |
-
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
38 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
39 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
40 |
-
tokenizer.use_default_system_prompt = False
|
41 |
-
|
42 |
-
|
43 |
-
@spaces.GPU
|
44 |
def generate(
|
45 |
message: str,
|
46 |
chat_history: list[tuple[str, str]],
|
@@ -58,31 +39,17 @@ def generate(
|
|
58 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
59 |
conversation.append({"role": "user", "content": message})
|
60 |
|
61 |
-
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
|
62 |
-
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
|
63 |
-
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
|
64 |
-
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
|
65 |
-
input_ids = input_ids.to(model.device)
|
66 |
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
temperature=temperature,
|
76 |
-
num_beams=1,
|
77 |
-
repetition_penalty=repetition_penalty,
|
78 |
-
)
|
79 |
-
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
80 |
-
t.start()
|
81 |
|
82 |
-
outputs
|
83 |
-
for text in streamer:
|
84 |
-
outputs.append(text)
|
85 |
-
yield "".join(outputs)
|
86 |
|
87 |
|
88 |
chat_interface = gr.ChatInterface(
|
|
|
12 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
13 |
|
14 |
DESCRIPTION = """\
|
15 |
+
# MIQU-70B
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"""
|
17 |
|
18 |
+
LICENSE = """Pirate license
|
|
|
|
|
|
|
|
|
|
|
19 |
"""
|
20 |
|
21 |
if not torch.cuda.is_available():
|
22 |
+
DESCRIPTION += ""
|
23 |
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
def generate(
|
26 |
message: str,
|
27 |
chat_history: list[tuple[str, str]],
|
|
|
39 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
40 |
conversation.append({"role": "user", "content": message})
|
41 |
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
r = requests.post('https://social-warthog-fleet.ngrok-free.app/api/chat', params={
|
44 |
+
"model": "miqu",
|
45 |
+
"messages":conversation,
|
46 |
+
"stream": false,
|
47 |
+
"options":{"num_predict": 128, "temperature":1, "top_p":0.95}
|
48 |
+
|
49 |
+
})
|
50 |
+
outputs = [r.json()['message']['content']]
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
+
yield "".join(outputs)
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
chat_interface = gr.ChatInterface(
|