Spaces:
Runtime error
Runtime error
zR
commited on
Commit
•
630e57e
1
Parent(s):
9390ab5
init
Browse files- .idea/.gitignore +8 -0
- .idea/LongWriter.iml +8 -0
- .idea/inspectionProfiles/Project_Default.xml +21 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- README.md +7 -2
- app.py +112 -59
- requirements.txt +11 -1
.idea/.gitignore
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
4 |
+
# Editor-based HTTP Client requests
|
5 |
+
/httpRequests/
|
6 |
+
# Datasource local storage ignored files
|
7 |
+
/dataSources/
|
8 |
+
/dataSources.local.xml
|
.idea/LongWriter.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="inheritedJdk" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<profile version="1.0">
|
3 |
+
<option name="myName" value="Project Default" />
|
4 |
+
<inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
|
5 |
+
<option name="ignoredPackages">
|
6 |
+
<value>
|
7 |
+
<list size="8">
|
8 |
+
<item index="0" class="java.lang.String" itemvalue="openai" />
|
9 |
+
<item index="1" class="java.lang.String" itemvalue="sse_starlette" />
|
10 |
+
<item index="2" class="java.lang.String" itemvalue="fastapi" />
|
11 |
+
<item index="3" class="java.lang.String" itemvalue="timm" />
|
12 |
+
<item index="4" class="java.lang.String" itemvalue="gradio" />
|
13 |
+
<item index="5" class="java.lang.String" itemvalue="uvicorn" />
|
14 |
+
<item index="6" class="java.lang.String" itemvalue="diffusers" />
|
15 |
+
<item index="7" class="java.lang.String" itemvalue="transformers" />
|
16 |
+
</list>
|
17 |
+
</value>
|
18 |
+
</option>
|
19 |
+
</inspection_tool>
|
20 |
+
</profile>
|
21 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/LongWriter.iml" filepath="$PROJECT_DIR$/.idea/LongWriter.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="" vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
README.md
CHANGED
@@ -4,9 +4,14 @@ emoji: 💬
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
|
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
4 |
colorFrom: yellow
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.41.1
|
8 |
+
app_port: 7860
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
---
|
12 |
|
13 |
+
# LongWriter
|
14 |
+
|
15 |
+
```shell
|
16 |
+
python app.py
|
17 |
+
```
|
app.py
CHANGED
@@ -1,63 +1,116 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
def respond(
|
11 |
-
message,
|
12 |
-
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
-
|
20 |
-
for val in history:
|
21 |
-
if val[0]:
|
22 |
-
messages.append({"role": "user", "content": val[0]})
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
-
|
26 |
-
messages.append({"role": "user", "content": message})
|
27 |
-
|
28 |
-
response = ""
|
29 |
-
|
30 |
-
for message in client.chat_completion(
|
31 |
-
messages,
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
-
|
39 |
-
response += token
|
40 |
-
yield response
|
41 |
-
|
42 |
-
"""
|
43 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
44 |
-
"""
|
45 |
-
demo = gr.ChatInterface(
|
46 |
-
respond,
|
47 |
-
additional_inputs=[
|
48 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
49 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
50 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
51 |
-
gr.Slider(
|
52 |
-
minimum=0.1,
|
53 |
-
maximum=1.0,
|
54 |
-
value=0.95,
|
55 |
-
step=0.05,
|
56 |
-
label="Top-p (nucleus sampling)",
|
57 |
-
),
|
58 |
-
],
|
59 |
)
|
60 |
|
|
|
|
|
61 |
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from threading import Thread
|
2 |
+
import spaces
|
3 |
import gradio as gr
|
4 |
+
import torch
|
5 |
+
from transformers import (
|
6 |
+
AutoModelForCausalLM,
|
7 |
+
AutoTokenizer,
|
8 |
+
StoppingCriteria,
|
9 |
+
StoppingCriteriaList,
|
10 |
+
TextIteratorStreamer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
)
|
12 |
|
13 |
+
model = AutoModelForCausalLM.from_pretrained("THUDM/longwriter-glm4-9b", trust_remote_code=True, device_map='auto')
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained("THUDM/longwriter-glm4-9b", trust_remote_code=True)
|
15 |
|
16 |
+
|
17 |
+
class StopOnTokens(StoppingCriteria):
|
18 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
19 |
+
stop_ids = model.config.eos_token_id
|
20 |
+
for stop_id in stop_ids:
|
21 |
+
if input_ids[0][-1] == stop_id:
|
22 |
+
return True
|
23 |
+
return False
|
24 |
+
|
25 |
+
|
26 |
+
@spaces.GPU()
|
27 |
+
def predict(history, prompt, max_length, top_p, temperature):
|
28 |
+
stop = StopOnTokens()
|
29 |
+
messages = []
|
30 |
+
if prompt:
|
31 |
+
messages.append({"role": "system", "content": prompt})
|
32 |
+
for idx, (user_msg, model_msg) in enumerate(history):
|
33 |
+
if prompt and idx == 0:
|
34 |
+
continue
|
35 |
+
if idx == len(history) - 1 and not model_msg:
|
36 |
+
query = user_msg
|
37 |
+
break
|
38 |
+
if user_msg:
|
39 |
+
messages.append({"role": "user", "content": user_msg})
|
40 |
+
if model_msg:
|
41 |
+
messages.append({"role": "assistant", "content": model_msg})
|
42 |
+
|
43 |
+
model_inputs = tokenizer.build_chat_input(query, history=messages, role='user').input_ids.to(
|
44 |
+
next(model.parameters()).device)
|
45 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=600, skip_prompt=True, skip_special_tokens=True)
|
46 |
+
eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command("<|user|>"),
|
47 |
+
tokenizer.get_command("<|observation|>")]
|
48 |
+
generate_kwargs = {
|
49 |
+
"input_ids": model_inputs,
|
50 |
+
"streamer": streamer,
|
51 |
+
"max_new_tokens": max_length,
|
52 |
+
"do_sample": True,
|
53 |
+
"top_p": top_p,
|
54 |
+
"temperature": temperature,
|
55 |
+
"stopping_criteria": StoppingCriteriaList([stop]),
|
56 |
+
"repetition_penalty": 1,
|
57 |
+
"eos_token_id": eos_token_id,
|
58 |
+
}
|
59 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
60 |
+
t.start()
|
61 |
+
for new_token in streamer:
|
62 |
+
if new_token == '<|user|>':
|
63 |
+
continue
|
64 |
+
elif new_token:
|
65 |
+
history[-1][1] += new_token
|
66 |
+
yield history
|
67 |
+
|
68 |
+
|
69 |
+
with gr.Blocks() as demo:
|
70 |
+
gr.Markdown(
|
71 |
+
"""
|
72 |
+
<div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
|
73 |
+
longwriter-glm4-9b Huggingface Space🤗
|
74 |
+
</div>
|
75 |
+
<div style="text-align: center;">
|
76 |
+
<a href="https://huggingface.co/THUDM/LongWriter-glm4-9b">🤗 Model Hub</a> |
|
77 |
+
<a href="https://github.com/THUDM/LongWriter">🌐 Github</a> |
|
78 |
+
<a href="https://arxiv.org/pdf/2408.07055">📜 arxiv </a>
|
79 |
+
</div>
|
80 |
+
"""
|
81 |
+
)
|
82 |
+
chatbot = gr.Chatbot()
|
83 |
+
|
84 |
+
with gr.Row():
|
85 |
+
with gr.Column(scale=3):
|
86 |
+
with gr.Column(scale=12):
|
87 |
+
user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10, container=False)
|
88 |
+
with gr.Column(min_width=32, scale=1):
|
89 |
+
submitBtn = gr.Button("Submit")
|
90 |
+
with gr.Column(scale=1):
|
91 |
+
prompt_input = gr.Textbox(show_label=False, placeholder="Prompt", lines=10, container=False)
|
92 |
+
pBtn = gr.Button("Set Prompt")
|
93 |
+
with gr.Column(scale=1):
|
94 |
+
emptyBtn = gr.Button("Clear History")
|
95 |
+
max_length = gr.Slider(0, 10000000, value=10000, step=1.0, label="Maximum length", interactive=True)
|
96 |
+
top_p = gr.Slider(0, 1, value=0.8, step=0.01, label="Top P", interactive=True)
|
97 |
+
temperature = gr.Slider(0.01, 1, value=0.6, step=0.01, label="Temperature", interactive=True)
|
98 |
+
|
99 |
+
|
100 |
+
def user(query, history):
|
101 |
+
return "", history + [[query, ""]]
|
102 |
+
|
103 |
+
|
104 |
+
def set_prompt(prompt_text):
|
105 |
+
return [[prompt_text, "Set prompt successfully"]]
|
106 |
+
|
107 |
+
|
108 |
+
pBtn.click(set_prompt, inputs=[prompt_input], outputs=chatbot)
|
109 |
+
|
110 |
+
submitBtn.click(user, [user_input, chatbot], [user_input, chatbot], queue=False).then(
|
111 |
+
predict, [chatbot, prompt_input, max_length, top_p, temperature], chatbot
|
112 |
+
)
|
113 |
+
emptyBtn.click(lambda: (None, None), None, [chatbot, prompt_input], queue=False)
|
114 |
+
|
115 |
+
demo.queue()
|
116 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -1 +1,11 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==4.41.0
|
2 |
+
torch==2.2.0
|
3 |
+
transformers==4.44.0
|
4 |
+
spaces==0.29.2
|
5 |
+
accelerate==0.33.0
|
6 |
+
sentencepiece==0.2.0
|
7 |
+
huggingface-hub==0.24.5
|
8 |
+
sentencepiece==0.2.0
|
9 |
+
jinja2==3.1.4
|
10 |
+
sentence_transforme==3.0.1
|
11 |
+
tiktoke==0.7.0
|