hayas commited on
Commit
d0d7843
1 Parent(s): d5697df

gradio==5.1.0

Browse files
Files changed (6) hide show
  1. .python-version +1 -0
  2. README.md +1 -1
  3. app.py +23 -12
  4. pyproject.toml +20 -0
  5. requirements.txt +261 -8
  6. uv.lock +0 -0
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🐢
4
  colorFrom: purple
5
  colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.27.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: purple
5
  colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 5.1.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -7,7 +7,12 @@ from typing import Iterator
7
  import gradio as gr
8
  import spaces
9
  import torch
10
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
 
 
 
 
 
11
 
12
  DESCRIPTION = """# Swallow-13B instruct"""
13
 
@@ -18,7 +23,10 @@ if torch.cuda.is_available():
18
  model_name = "tokyotech-llm/Swallow-13b-instruct-hf"
19
  tokenizer = AutoTokenizer.from_pretrained(model_name)
20
  model = AutoModelForCausalLM.from_pretrained(
21
- model_name, load_in_8bit=True, low_cpu_mem_usage=True, device_map="auto"
 
 
 
22
  )
23
 
24
  MAX_INPUT_TOKENS = 2048
@@ -30,7 +38,9 @@ PROMPT_DICT = {
30
  "### 指示:\n{instruction}\n\n### 入力:\n{input}\n\n### 応答:"
31
  ),
32
  "prompt_no_input": (
33
- "以下に、あるタスクを説明する指示があります。" "リクエストを適切に完了するための回答を記述してください。\n\n" "### 指示:\n{instruction}\n\n### 応答:"
 
 
34
  ),
35
  }
36
 
@@ -94,13 +104,8 @@ def process_example(instruction: str, input_text: str) -> Iterator[str]:
94
  yield from run(instruction, input_text)
95
 
96
 
97
- with gr.Blocks(css="style.css") as demo:
98
  gr.Markdown(DESCRIPTION)
99
- gr.DuplicateButton(
100
- value="Duplicate Space for private use",
101
- elem_id="duplicate-button",
102
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
103
- )
104
 
105
  with gr.Row():
106
  with gr.Column():
@@ -125,8 +130,14 @@ with gr.Blocks(css="style.css") as demo:
125
 
126
  gr.Examples(
127
  examples=[
128
- ["以下のトピックに関する詳細な情報を提供してください。", "東京工業大学の主なキャンパスについて教えてください。"],
129
- ["以下のトピックに関する詳細な情報を提供してください。", "夢オチとは何かについて教えてください。"],
 
 
 
 
 
 
130
  ["暴れん坊将軍って誰のことですか?", ""],
131
  ],
132
  inputs=[instruction, input_text],
@@ -137,4 +148,4 @@ with gr.Blocks(css="style.css") as demo:
137
  )
138
 
139
  if __name__ == "__main__":
140
- demo.queue(max_size=20).launch()
 
7
  import gradio as gr
8
  import spaces
9
  import torch
10
+ from transformers import (
11
+ AutoModelForCausalLM,
12
+ AutoTokenizer,
13
+ BitsAndBytesConfig,
14
+ TextIteratorStreamer,
15
+ )
16
 
17
  DESCRIPTION = """# Swallow-13B instruct"""
18
 
 
23
  model_name = "tokyotech-llm/Swallow-13b-instruct-hf"
24
  tokenizer = AutoTokenizer.from_pretrained(model_name)
25
  model = AutoModelForCausalLM.from_pretrained(
26
+ model_name,
27
+ quantization_config=BitsAndBytesConfig(load_in_8bit=True),
28
+ low_cpu_mem_usage=True,
29
+ device_map="auto",
30
  )
31
 
32
  MAX_INPUT_TOKENS = 2048
 
38
  "### 指示:\n{instruction}\n\n### 入力:\n{input}\n\n### 応答:"
39
  ),
40
  "prompt_no_input": (
41
+ "以下に、あるタスクを説明する指示があります。"
42
+ "リクエストを適切に完了するための回答を記述してください。\n\n"
43
+ "### 指示:\n{instruction}\n\n### 応答:"
44
  ),
45
  }
46
 
 
104
  yield from run(instruction, input_text)
105
 
106
 
107
+ with gr.Blocks(css_paths="style.css") as demo:
108
  gr.Markdown(DESCRIPTION)
 
 
 
 
 
109
 
110
  with gr.Row():
111
  with gr.Column():
 
130
 
131
  gr.Examples(
132
  examples=[
133
+ [
134
+ "以下のトピックに関する詳細な情報を提供してください。",
135
+ "東京工業大学の主なキャンパスについて教えてください。",
136
+ ],
137
+ [
138
+ "以下のトピックに関する詳細な情報を提供してください。",
139
+ "夢オチとは何かについて教えてください。",
140
+ ],
141
  ["暴れん坊将軍って誰のことですか?", ""],
142
  ],
143
  inputs=[instruction, input_text],
 
148
  )
149
 
150
  if __name__ == "__main__":
151
+ demo.launch()
pyproject.toml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "swallow-13b-instruct"
3
+ version = "0.1.0"
4
+ description = ""
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "accelerate>=1.0.1",
9
+ "bitsandbytes>=0.44.1",
10
+ "blobfile>=3.0.0",
11
+ "gradio>=5.1.0",
12
+ "hf-transfer>=0.1.8",
13
+ "protobuf>=5.28.2",
14
+ "sentencepiece>=0.2.0",
15
+ "setuptools>=75.2.0",
16
+ "spaces>=0.30.4",
17
+ "tiktoken>=0.8.0",
18
+ "torch==2.4.0",
19
+ "transformers>=4.45.2",
20
+ ]
requirements.txt CHANGED
@@ -1,8 +1,261 @@
1
- accelerate==0.29.3
2
- bitsandbytes==0.43.1
3
- gradio==4.27.0
4
- scipy==1.13.0
5
- sentencepiece==0.1.99
6
- spaces==0.26.1
7
- torch==2.0.0
8
- transformers==4.40.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ accelerate==1.0.1
4
+ # via swallow-13b-instruct (pyproject.toml)
5
+ aiofiles==23.2.1
6
+ # via gradio
7
+ annotated-types==0.7.0
8
+ # via pydantic
9
+ anyio==4.6.2.post1
10
+ # via
11
+ # gradio
12
+ # httpx
13
+ # starlette
14
+ bitsandbytes==0.44.1
15
+ # via swallow-13b-instruct (pyproject.toml)
16
+ blobfile==3.0.0
17
+ # via swallow-13b-instruct (pyproject.toml)
18
+ certifi==2024.8.30
19
+ # via
20
+ # httpcore
21
+ # httpx
22
+ # requests
23
+ charset-normalizer==3.4.0
24
+ # via requests
25
+ click==8.1.7
26
+ # via
27
+ # typer
28
+ # uvicorn
29
+ exceptiongroup==1.2.2
30
+ # via anyio
31
+ fastapi==0.115.2
32
+ # via gradio
33
+ ffmpy==0.4.0
34
+ # via gradio
35
+ filelock==3.16.1
36
+ # via
37
+ # blobfile
38
+ # huggingface-hub
39
+ # torch
40
+ # transformers
41
+ # triton
42
+ fsspec==2024.9.0
43
+ # via
44
+ # gradio-client
45
+ # huggingface-hub
46
+ # torch
47
+ gradio==5.1.0
48
+ # via
49
+ # swallow-13b-instruct (pyproject.toml)
50
+ # spaces
51
+ gradio-client==1.4.0
52
+ # via gradio
53
+ h11==0.14.0
54
+ # via
55
+ # httpcore
56
+ # uvicorn
57
+ hf-transfer==0.1.8
58
+ # via swallow-13b-instruct (pyproject.toml)
59
+ httpcore==1.0.6
60
+ # via httpx
61
+ httpx==0.27.2
62
+ # via
63
+ # gradio
64
+ # gradio-client
65
+ # spaces
66
+ huggingface-hub==0.26.0
67
+ # via
68
+ # accelerate
69
+ # gradio
70
+ # gradio-client
71
+ # tokenizers
72
+ # transformers
73
+ idna==3.10
74
+ # via
75
+ # anyio
76
+ # httpx
77
+ # requests
78
+ jinja2==3.1.4
79
+ # via
80
+ # gradio
81
+ # torch
82
+ lxml==5.3.0
83
+ # via blobfile
84
+ markdown-it-py==3.0.0
85
+ # via rich
86
+ markupsafe==2.1.5
87
+ # via
88
+ # gradio
89
+ # jinja2
90
+ mdurl==0.1.2
91
+ # via markdown-it-py
92
+ mpmath==1.3.0
93
+ # via sympy
94
+ networkx==3.4.1
95
+ # via torch
96
+ numpy==2.1.2
97
+ # via
98
+ # accelerate
99
+ # bitsandbytes
100
+ # gradio
101
+ # pandas
102
+ # transformers
103
+ nvidia-cublas-cu12==12.1.3.1
104
+ # via
105
+ # nvidia-cudnn-cu12
106
+ # nvidia-cusolver-cu12
107
+ # torch
108
+ nvidia-cuda-cupti-cu12==12.1.105
109
+ # via torch
110
+ nvidia-cuda-nvrtc-cu12==12.1.105
111
+ # via torch
112
+ nvidia-cuda-runtime-cu12==12.1.105
113
+ # via torch
114
+ nvidia-cudnn-cu12==9.1.0.70
115
+ # via torch
116
+ nvidia-cufft-cu12==11.0.2.54
117
+ # via torch
118
+ nvidia-curand-cu12==10.3.2.106
119
+ # via torch
120
+ nvidia-cusolver-cu12==11.4.5.107
121
+ # via torch
122
+ nvidia-cusparse-cu12==12.1.0.106
123
+ # via
124
+ # nvidia-cusolver-cu12
125
+ # torch
126
+ nvidia-nccl-cu12==2.20.5
127
+ # via torch
128
+ nvidia-nvjitlink-cu12==12.6.77
129
+ # via
130
+ # nvidia-cusolver-cu12
131
+ # nvidia-cusparse-cu12
132
+ nvidia-nvtx-cu12==12.1.105
133
+ # via torch
134
+ orjson==3.10.9
135
+ # via gradio
136
+ packaging==24.1
137
+ # via
138
+ # accelerate
139
+ # gradio
140
+ # gradio-client
141
+ # huggingface-hub
142
+ # spaces
143
+ # transformers
144
+ pandas==2.2.3
145
+ # via gradio
146
+ pillow==10.4.0
147
+ # via gradio
148
+ protobuf==5.28.2
149
+ # via swallow-13b-instruct (pyproject.toml)
150
+ psutil==5.9.8
151
+ # via
152
+ # accelerate
153
+ # spaces
154
+ pycryptodomex==3.21.0
155
+ # via blobfile
156
+ pydantic==2.9.2
157
+ # via
158
+ # fastapi
159
+ # gradio
160
+ # spaces
161
+ pydantic-core==2.23.4
162
+ # via pydantic
163
+ pydub==0.25.1
164
+ # via gradio
165
+ pygments==2.18.0
166
+ # via rich
167
+ python-dateutil==2.9.0.post0
168
+ # via pandas
169
+ python-multipart==0.0.12
170
+ # via gradio
171
+ pytz==2024.2
172
+ # via pandas
173
+ pyyaml==6.0.2
174
+ # via
175
+ # accelerate
176
+ # gradio
177
+ # huggingface-hub
178
+ # transformers
179
+ regex==2024.9.11
180
+ # via
181
+ # tiktoken
182
+ # transformers
183
+ requests==2.32.3
184
+ # via
185
+ # huggingface-hub
186
+ # spaces
187
+ # tiktoken
188
+ # transformers
189
+ rich==13.9.2
190
+ # via typer
191
+ ruff==0.7.0
192
+ # via gradio
193
+ safetensors==0.4.5
194
+ # via
195
+ # accelerate
196
+ # transformers
197
+ semantic-version==2.10.0
198
+ # via gradio
199
+ sentencepiece==0.2.0
200
+ # via swallow-13b-instruct (pyproject.toml)
201
+ setuptools==75.2.0
202
+ # via swallow-13b-instruct (pyproject.toml)
203
+ shellingham==1.5.4
204
+ # via typer
205
+ six==1.16.0
206
+ # via python-dateutil
207
+ sniffio==1.3.1
208
+ # via
209
+ # anyio
210
+ # httpx
211
+ spaces==0.30.4
212
+ # via swallow-13b-instruct (pyproject.toml)
213
+ starlette==0.40.0
214
+ # via fastapi
215
+ sympy==1.13.3
216
+ # via torch
217
+ tiktoken==0.8.0
218
+ # via swallow-13b-instruct (pyproject.toml)
219
+ tokenizers==0.20.1
220
+ # via transformers
221
+ tomlkit==0.12.0
222
+ # via gradio
223
+ torch==2.4.0
224
+ # via
225
+ # swallow-13b-instruct (pyproject.toml)
226
+ # accelerate
227
+ # bitsandbytes
228
+ tqdm==4.66.5
229
+ # via
230
+ # huggingface-hub
231
+ # transformers
232
+ transformers==4.45.2
233
+ # via swallow-13b-instruct (pyproject.toml)
234
+ triton==3.0.0
235
+ # via torch
236
+ typer==0.12.5
237
+ # via gradio
238
+ typing-extensions==4.12.2
239
+ # via
240
+ # anyio
241
+ # fastapi
242
+ # gradio
243
+ # gradio-client
244
+ # huggingface-hub
245
+ # pydantic
246
+ # pydantic-core
247
+ # rich
248
+ # spaces
249
+ # torch
250
+ # typer
251
+ # uvicorn
252
+ tzdata==2024.2
253
+ # via pandas
254
+ urllib3==2.2.3
255
+ # via
256
+ # blobfile
257
+ # requests
258
+ uvicorn==0.32.0
259
+ # via gradio
260
+ websockets==12.0
261
+ # via gradio-client
uv.lock ADDED
The diff for this file is too large to render. See raw diff