twn39 commited on
Commit
bb159c0
1 Parent(s): fb4e9c9

add init files

Browse files
.env ADDED
File without changes
.gitignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # python generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # venv
10
+ .venv
11
+ .idea
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10.14
pyproject.toml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "aitoolkits-webui"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ authors = [
6
+ { name = "twn39", email = "[email protected]" }
7
+ ]
8
+ dependencies = [
9
+ "gradio>=4.38.1",
10
+ "diffusers>=0.29.2",
11
+ "langchain>=0.2.7",
12
+ "llama-index>=0.10.55",
13
+ "pydantic>=2.8.2",
14
+ "pydantic-settings>=2.3.4",
15
+ "llama-index-llms-openai>=0.1.25",
16
+ "langchain-openai>=0.1.16",
17
+ "attrs>=23.2.0",
18
+ ]
19
+ readme = "README.md"
20
+ requires-python = ">= 3.8"
21
+
22
+ [build-system]
23
+ requires = ["hatchling"]
24
+ build-backend = "hatchling.build"
25
+
26
+ [tool.rye]
27
+ managed = true
28
+ dev-dependencies = []
29
+
30
+ [tool.hatch.metadata]
31
+ allow-direct-references = true
32
+
33
+ [tool.hatch.build.targets.wheel]
34
+ packages = ["src/aitoolkits_webui"]
requirements-dev.lock ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+ # generate-hashes: false
10
+
11
+ -e file:.
12
+ aiofiles==23.2.1
13
+ # via gradio
14
+ aiohttp==3.9.5
15
+ # via langchain
16
+ # via llama-index-core
17
+ # via llama-index-legacy
18
+ aiosignal==1.3.1
19
+ # via aiohttp
20
+ altair==5.3.0
21
+ # via gradio
22
+ annotated-types==0.7.0
23
+ # via pydantic
24
+ anyio==4.4.0
25
+ # via httpx
26
+ # via openai
27
+ # via starlette
28
+ # via watchfiles
29
+ async-timeout==4.0.3
30
+ # via aiohttp
31
+ # via langchain
32
+ attrs==23.2.0
33
+ # via aiohttp
34
+ # via aitoolkits-webui
35
+ # via jsonschema
36
+ # via referencing
37
+ beautifulsoup4==4.12.3
38
+ # via llama-index-readers-file
39
+ certifi==2024.7.4
40
+ # via httpcore
41
+ # via httpx
42
+ # via requests
43
+ charset-normalizer==3.3.2
44
+ # via requests
45
+ click==8.1.7
46
+ # via nltk
47
+ # via typer
48
+ # via uvicorn
49
+ colorama==0.4.6
50
+ # via click
51
+ # via tqdm
52
+ # via uvicorn
53
+ contourpy==1.2.1
54
+ # via matplotlib
55
+ cycler==0.12.1
56
+ # via matplotlib
57
+ dataclasses-json==0.6.7
58
+ # via llama-index-core
59
+ # via llama-index-legacy
60
+ deprecated==1.2.14
61
+ # via llama-index-core
62
+ # via llama-index-legacy
63
+ diffusers==0.29.2
64
+ # via aitoolkits-webui
65
+ dirtyjson==1.0.8
66
+ # via llama-index-core
67
+ # via llama-index-legacy
68
+ distro==1.9.0
69
+ # via openai
70
+ dnspython==2.6.1
71
+ # via email-validator
72
+ email-validator==2.2.0
73
+ # via fastapi
74
+ exceptiongroup==1.2.2
75
+ # via anyio
76
+ fastapi==0.111.0
77
+ # via gradio
78
+ fastapi-cli==0.0.4
79
+ # via fastapi
80
+ ffmpy==0.3.2
81
+ # via gradio
82
+ filelock==3.15.4
83
+ # via diffusers
84
+ # via huggingface-hub
85
+ fonttools==4.53.1
86
+ # via matplotlib
87
+ frozenlist==1.4.1
88
+ # via aiohttp
89
+ # via aiosignal
90
+ fsspec==2024.6.1
91
+ # via gradio-client
92
+ # via huggingface-hub
93
+ # via llama-index-core
94
+ # via llama-index-legacy
95
+ gradio==4.38.1
96
+ # via aitoolkits-webui
97
+ gradio-client==1.1.0
98
+ # via gradio
99
+ greenlet==3.0.3
100
+ # via sqlalchemy
101
+ h11==0.14.0
102
+ # via httpcore
103
+ # via uvicorn
104
+ httpcore==1.0.5
105
+ # via httpx
106
+ httptools==0.6.1
107
+ # via uvicorn
108
+ httpx==0.27.0
109
+ # via fastapi
110
+ # via gradio
111
+ # via gradio-client
112
+ # via llama-cloud
113
+ # via llama-index-core
114
+ # via llama-index-legacy
115
+ # via openai
116
+ huggingface-hub==0.23.4
117
+ # via diffusers
118
+ # via gradio
119
+ # via gradio-client
120
+ idna==3.7
121
+ # via anyio
122
+ # via email-validator
123
+ # via httpx
124
+ # via requests
125
+ # via yarl
126
+ importlib-metadata==8.0.0
127
+ # via diffusers
128
+ importlib-resources==6.4.0
129
+ # via gradio
130
+ jinja2==3.1.4
131
+ # via altair
132
+ # via fastapi
133
+ # via gradio
134
+ joblib==1.4.2
135
+ # via nltk
136
+ jsonpatch==1.33
137
+ # via langchain-core
138
+ jsonpointer==3.0.0
139
+ # via jsonpatch
140
+ jsonschema==4.23.0
141
+ # via altair
142
+ jsonschema-specifications==2023.12.1
143
+ # via jsonschema
144
+ kiwisolver==1.4.5
145
+ # via matplotlib
146
+ langchain==0.2.7
147
+ # via aitoolkits-webui
148
+ langchain-core==0.2.18
149
+ # via langchain
150
+ # via langchain-openai
151
+ # via langchain-text-splitters
152
+ langchain-openai==0.1.16
153
+ # via aitoolkits-webui
154
+ langchain-text-splitters==0.2.2
155
+ # via langchain
156
+ langsmith==0.1.85
157
+ # via langchain
158
+ # via langchain-core
159
+ llama-cloud==0.0.9
160
+ # via llama-index-indices-managed-llama-cloud
161
+ llama-index==0.10.55
162
+ # via aitoolkits-webui
163
+ llama-index-agent-openai==0.2.8
164
+ # via llama-index
165
+ # via llama-index-program-openai
166
+ llama-index-cli==0.1.12
167
+ # via llama-index
168
+ llama-index-core==0.10.55
169
+ # via llama-index
170
+ # via llama-index-agent-openai
171
+ # via llama-index-cli
172
+ # via llama-index-embeddings-openai
173
+ # via llama-index-indices-managed-llama-cloud
174
+ # via llama-index-llms-openai
175
+ # via llama-index-multi-modal-llms-openai
176
+ # via llama-index-program-openai
177
+ # via llama-index-question-gen-openai
178
+ # via llama-index-readers-file
179
+ # via llama-index-readers-llama-parse
180
+ # via llama-parse
181
+ llama-index-embeddings-openai==0.1.10
182
+ # via llama-index
183
+ # via llama-index-cli
184
+ llama-index-indices-managed-llama-cloud==0.2.5
185
+ # via llama-index
186
+ llama-index-legacy==0.9.48
187
+ # via llama-index
188
+ llama-index-llms-openai==0.1.25
189
+ # via aitoolkits-webui
190
+ # via llama-index
191
+ # via llama-index-agent-openai
192
+ # via llama-index-cli
193
+ # via llama-index-multi-modal-llms-openai
194
+ # via llama-index-program-openai
195
+ # via llama-index-question-gen-openai
196
+ llama-index-multi-modal-llms-openai==0.1.7
197
+ # via llama-index
198
+ llama-index-program-openai==0.1.6
199
+ # via llama-index
200
+ # via llama-index-question-gen-openai
201
+ llama-index-question-gen-openai==0.1.3
202
+ # via llama-index
203
+ llama-index-readers-file==0.1.30
204
+ # via llama-index
205
+ llama-index-readers-llama-parse==0.1.6
206
+ # via llama-index
207
+ llama-parse==0.4.7
208
+ # via llama-index-readers-llama-parse
209
+ markdown-it-py==3.0.0
210
+ # via rich
211
+ markupsafe==2.1.5
212
+ # via gradio
213
+ # via jinja2
214
+ marshmallow==3.21.3
215
+ # via dataclasses-json
216
+ matplotlib==3.9.1
217
+ # via gradio
218
+ mdurl==0.1.2
219
+ # via markdown-it-py
220
+ multidict==6.0.5
221
+ # via aiohttp
222
+ # via yarl
223
+ mypy-extensions==1.0.0
224
+ # via typing-inspect
225
+ nest-asyncio==1.6.0
226
+ # via llama-index-core
227
+ # via llama-index-legacy
228
+ networkx==3.3
229
+ # via llama-index-core
230
+ # via llama-index-legacy
231
+ nltk==3.8.1
232
+ # via llama-index-core
233
+ # via llama-index-legacy
234
+ numpy==1.26.4
235
+ # via altair
236
+ # via contourpy
237
+ # via diffusers
238
+ # via gradio
239
+ # via langchain
240
+ # via llama-index-core
241
+ # via llama-index-legacy
242
+ # via matplotlib
243
+ # via pandas
244
+ openai==1.35.13
245
+ # via langchain-openai
246
+ # via llama-index-agent-openai
247
+ # via llama-index-core
248
+ # via llama-index-legacy
249
+ orjson==3.10.6
250
+ # via fastapi
251
+ # via gradio
252
+ # via langsmith
253
+ packaging==24.1
254
+ # via altair
255
+ # via gradio
256
+ # via gradio-client
257
+ # via huggingface-hub
258
+ # via langchain-core
259
+ # via marshmallow
260
+ # via matplotlib
261
+ pandas==2.2.2
262
+ # via altair
263
+ # via gradio
264
+ # via llama-index-core
265
+ # via llama-index-legacy
266
+ pillow==10.4.0
267
+ # via diffusers
268
+ # via gradio
269
+ # via llama-index-core
270
+ # via matplotlib
271
+ pydantic==2.8.2
272
+ # via aitoolkits-webui
273
+ # via fastapi
274
+ # via gradio
275
+ # via langchain
276
+ # via langchain-core
277
+ # via langsmith
278
+ # via llama-cloud
279
+ # via openai
280
+ # via pydantic-settings
281
+ pydantic-core==2.20.1
282
+ # via pydantic
283
+ pydantic-settings==2.3.4
284
+ # via aitoolkits-webui
285
+ pydub==0.25.1
286
+ # via gradio
287
+ pygments==2.18.0
288
+ # via rich
289
+ pyparsing==3.1.2
290
+ # via matplotlib
291
+ pypdf==4.2.0
292
+ # via llama-index-readers-file
293
+ python-dateutil==2.9.0.post0
294
+ # via matplotlib
295
+ # via pandas
296
+ python-dotenv==1.0.1
297
+ # via pydantic-settings
298
+ # via uvicorn
299
+ python-multipart==0.0.9
300
+ # via fastapi
301
+ # via gradio
302
+ pytz==2024.1
303
+ # via pandas
304
+ pyyaml==6.0.1
305
+ # via gradio
306
+ # via huggingface-hub
307
+ # via langchain
308
+ # via langchain-core
309
+ # via llama-index-core
310
+ # via uvicorn
311
+ referencing==0.35.1
312
+ # via jsonschema
313
+ # via jsonschema-specifications
314
+ regex==2024.5.15
315
+ # via diffusers
316
+ # via nltk
317
+ # via tiktoken
318
+ requests==2.32.3
319
+ # via diffusers
320
+ # via huggingface-hub
321
+ # via langchain
322
+ # via langsmith
323
+ # via llama-index-core
324
+ # via llama-index-legacy
325
+ # via tiktoken
326
+ rich==13.7.1
327
+ # via typer
328
+ rpds-py==0.19.0
329
+ # via jsonschema
330
+ # via referencing
331
+ ruff==0.5.1
332
+ # via gradio
333
+ safetensors==0.4.3
334
+ # via diffusers
335
+ semantic-version==2.10.0
336
+ # via gradio
337
+ shellingham==1.5.4
338
+ # via typer
339
+ six==1.16.0
340
+ # via python-dateutil
341
+ sniffio==1.3.1
342
+ # via anyio
343
+ # via httpx
344
+ # via openai
345
+ soupsieve==2.5
346
+ # via beautifulsoup4
347
+ sqlalchemy==2.0.31
348
+ # via langchain
349
+ # via llama-index-core
350
+ # via llama-index-legacy
351
+ starlette==0.37.2
352
+ # via fastapi
353
+ striprtf==0.0.26
354
+ # via llama-index-readers-file
355
+ tenacity==8.5.0
356
+ # via langchain
357
+ # via langchain-core
358
+ # via llama-index-core
359
+ # via llama-index-legacy
360
+ tiktoken==0.7.0
361
+ # via langchain-openai
362
+ # via llama-index-core
363
+ # via llama-index-legacy
364
+ tomlkit==0.12.0
365
+ # via gradio
366
+ toolz==0.12.1
367
+ # via altair
368
+ tqdm==4.66.4
369
+ # via huggingface-hub
370
+ # via llama-index-core
371
+ # via nltk
372
+ # via openai
373
+ typer==0.12.3
374
+ # via fastapi-cli
375
+ # via gradio
376
+ typing-extensions==4.12.2
377
+ # via altair
378
+ # via anyio
379
+ # via fastapi
380
+ # via gradio
381
+ # via gradio-client
382
+ # via huggingface-hub
383
+ # via llama-index-core
384
+ # via llama-index-legacy
385
+ # via openai
386
+ # via pydantic
387
+ # via pydantic-core
388
+ # via pypdf
389
+ # via sqlalchemy
390
+ # via typer
391
+ # via typing-inspect
392
+ # via uvicorn
393
+ typing-inspect==0.9.0
394
+ # via dataclasses-json
395
+ # via llama-index-core
396
+ # via llama-index-legacy
397
+ tzdata==2024.1
398
+ # via pandas
399
+ ujson==5.10.0
400
+ # via fastapi
401
+ urllib3==2.2.2
402
+ # via gradio
403
+ # via requests
404
+ uvicorn==0.30.1
405
+ # via fastapi
406
+ # via gradio
407
+ watchfiles==0.22.0
408
+ # via uvicorn
409
+ websockets==11.0.3
410
+ # via gradio-client
411
+ # via uvicorn
412
+ wrapt==1.16.0
413
+ # via deprecated
414
+ # via llama-index-core
415
+ yarl==1.9.4
416
+ # via aiohttp
417
+ zipp==3.19.2
418
+ # via importlib-metadata
requirements.lock ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # generated by rye
2
+ # use `rye lock` or `rye sync` to update this lockfile
3
+ #
4
+ # last locked with the following flags:
5
+ # pre: false
6
+ # features: []
7
+ # all-features: false
8
+ # with-sources: false
9
+ # generate-hashes: false
10
+
11
+ -e file:.
12
+ aiofiles==23.2.1
13
+ # via gradio
14
+ aiohttp==3.9.5
15
+ # via langchain
16
+ # via llama-index-core
17
+ # via llama-index-legacy
18
+ aiosignal==1.3.1
19
+ # via aiohttp
20
+ altair==5.3.0
21
+ # via gradio
22
+ annotated-types==0.7.0
23
+ # via pydantic
24
+ anyio==4.4.0
25
+ # via httpx
26
+ # via openai
27
+ # via starlette
28
+ # via watchfiles
29
+ async-timeout==4.0.3
30
+ # via aiohttp
31
+ # via langchain
32
+ attrs==23.2.0
33
+ # via aiohttp
34
+ # via aitoolkits-webui
35
+ # via jsonschema
36
+ # via referencing
37
+ beautifulsoup4==4.12.3
38
+ # via llama-index-readers-file
39
+ certifi==2024.7.4
40
+ # via httpcore
41
+ # via httpx
42
+ # via requests
43
+ charset-normalizer==3.3.2
44
+ # via requests
45
+ click==8.1.7
46
+ # via nltk
47
+ # via typer
48
+ # via uvicorn
49
+ colorama==0.4.6
50
+ # via click
51
+ # via tqdm
52
+ # via uvicorn
53
+ contourpy==1.2.1
54
+ # via matplotlib
55
+ cycler==0.12.1
56
+ # via matplotlib
57
+ dataclasses-json==0.6.7
58
+ # via llama-index-core
59
+ # via llama-index-legacy
60
+ deprecated==1.2.14
61
+ # via llama-index-core
62
+ # via llama-index-legacy
63
+ diffusers==0.29.2
64
+ # via aitoolkits-webui
65
+ dirtyjson==1.0.8
66
+ # via llama-index-core
67
+ # via llama-index-legacy
68
+ distro==1.9.0
69
+ # via openai
70
+ dnspython==2.6.1
71
+ # via email-validator
72
+ email-validator==2.2.0
73
+ # via fastapi
74
+ exceptiongroup==1.2.2
75
+ # via anyio
76
+ fastapi==0.111.0
77
+ # via gradio
78
+ fastapi-cli==0.0.4
79
+ # via fastapi
80
+ ffmpy==0.3.2
81
+ # via gradio
82
+ filelock==3.15.4
83
+ # via diffusers
84
+ # via huggingface-hub
85
+ fonttools==4.53.1
86
+ # via matplotlib
87
+ frozenlist==1.4.1
88
+ # via aiohttp
89
+ # via aiosignal
90
+ fsspec==2024.6.1
91
+ # via gradio-client
92
+ # via huggingface-hub
93
+ # via llama-index-core
94
+ # via llama-index-legacy
95
+ gradio==4.38.1
96
+ # via aitoolkits-webui
97
+ gradio-client==1.1.0
98
+ # via gradio
99
+ greenlet==3.0.3
100
+ # via sqlalchemy
101
+ h11==0.14.0
102
+ # via httpcore
103
+ # via uvicorn
104
+ httpcore==1.0.5
105
+ # via httpx
106
+ httptools==0.6.1
107
+ # via uvicorn
108
+ httpx==0.27.0
109
+ # via fastapi
110
+ # via gradio
111
+ # via gradio-client
112
+ # via llama-cloud
113
+ # via llama-index-core
114
+ # via llama-index-legacy
115
+ # via openai
116
+ huggingface-hub==0.23.4
117
+ # via diffusers
118
+ # via gradio
119
+ # via gradio-client
120
+ idna==3.7
121
+ # via anyio
122
+ # via email-validator
123
+ # via httpx
124
+ # via requests
125
+ # via yarl
126
+ importlib-metadata==8.0.0
127
+ # via diffusers
128
+ importlib-resources==6.4.0
129
+ # via gradio
130
+ jinja2==3.1.4
131
+ # via altair
132
+ # via fastapi
133
+ # via gradio
134
+ joblib==1.4.2
135
+ # via nltk
136
+ jsonpatch==1.33
137
+ # via langchain-core
138
+ jsonpointer==3.0.0
139
+ # via jsonpatch
140
+ jsonschema==4.23.0
141
+ # via altair
142
+ jsonschema-specifications==2023.12.1
143
+ # via jsonschema
144
+ kiwisolver==1.4.5
145
+ # via matplotlib
146
+ langchain==0.2.7
147
+ # via aitoolkits-webui
148
+ langchain-core==0.2.18
149
+ # via langchain
150
+ # via langchain-openai
151
+ # via langchain-text-splitters
152
+ langchain-openai==0.1.16
153
+ # via aitoolkits-webui
154
+ langchain-text-splitters==0.2.2
155
+ # via langchain
156
+ langsmith==0.1.85
157
+ # via langchain
158
+ # via langchain-core
159
+ llama-cloud==0.0.9
160
+ # via llama-index-indices-managed-llama-cloud
161
+ llama-index==0.10.55
162
+ # via aitoolkits-webui
163
+ llama-index-agent-openai==0.2.8
164
+ # via llama-index
165
+ # via llama-index-program-openai
166
+ llama-index-cli==0.1.12
167
+ # via llama-index
168
+ llama-index-core==0.10.55
169
+ # via llama-index
170
+ # via llama-index-agent-openai
171
+ # via llama-index-cli
172
+ # via llama-index-embeddings-openai
173
+ # via llama-index-indices-managed-llama-cloud
174
+ # via llama-index-llms-openai
175
+ # via llama-index-multi-modal-llms-openai
176
+ # via llama-index-program-openai
177
+ # via llama-index-question-gen-openai
178
+ # via llama-index-readers-file
179
+ # via llama-index-readers-llama-parse
180
+ # via llama-parse
181
+ llama-index-embeddings-openai==0.1.10
182
+ # via llama-index
183
+ # via llama-index-cli
184
+ llama-index-indices-managed-llama-cloud==0.2.5
185
+ # via llama-index
186
+ llama-index-legacy==0.9.48
187
+ # via llama-index
188
+ llama-index-llms-openai==0.1.25
189
+ # via aitoolkits-webui
190
+ # via llama-index
191
+ # via llama-index-agent-openai
192
+ # via llama-index-cli
193
+ # via llama-index-multi-modal-llms-openai
194
+ # via llama-index-program-openai
195
+ # via llama-index-question-gen-openai
196
+ llama-index-multi-modal-llms-openai==0.1.7
197
+ # via llama-index
198
+ llama-index-program-openai==0.1.6
199
+ # via llama-index
200
+ # via llama-index-question-gen-openai
201
+ llama-index-question-gen-openai==0.1.3
202
+ # via llama-index
203
+ llama-index-readers-file==0.1.30
204
+ # via llama-index
205
+ llama-index-readers-llama-parse==0.1.6
206
+ # via llama-index
207
+ llama-parse==0.4.7
208
+ # via llama-index-readers-llama-parse
209
+ markdown-it-py==3.0.0
210
+ # via rich
211
+ markupsafe==2.1.5
212
+ # via gradio
213
+ # via jinja2
214
+ marshmallow==3.21.3
215
+ # via dataclasses-json
216
+ matplotlib==3.9.1
217
+ # via gradio
218
+ mdurl==0.1.2
219
+ # via markdown-it-py
220
+ multidict==6.0.5
221
+ # via aiohttp
222
+ # via yarl
223
+ mypy-extensions==1.0.0
224
+ # via typing-inspect
225
+ nest-asyncio==1.6.0
226
+ # via llama-index-core
227
+ # via llama-index-legacy
228
+ networkx==3.3
229
+ # via llama-index-core
230
+ # via llama-index-legacy
231
+ nltk==3.8.1
232
+ # via llama-index-core
233
+ # via llama-index-legacy
234
+ numpy==1.26.4
235
+ # via altair
236
+ # via contourpy
237
+ # via diffusers
238
+ # via gradio
239
+ # via langchain
240
+ # via llama-index-core
241
+ # via llama-index-legacy
242
+ # via matplotlib
243
+ # via pandas
244
+ openai==1.35.13
245
+ # via langchain-openai
246
+ # via llama-index-agent-openai
247
+ # via llama-index-core
248
+ # via llama-index-legacy
249
+ orjson==3.10.6
250
+ # via fastapi
251
+ # via gradio
252
+ # via langsmith
253
+ packaging==24.1
254
+ # via altair
255
+ # via gradio
256
+ # via gradio-client
257
+ # via huggingface-hub
258
+ # via langchain-core
259
+ # via marshmallow
260
+ # via matplotlib
261
+ pandas==2.2.2
262
+ # via altair
263
+ # via gradio
264
+ # via llama-index-core
265
+ # via llama-index-legacy
266
+ pillow==10.4.0
267
+ # via diffusers
268
+ # via gradio
269
+ # via llama-index-core
270
+ # via matplotlib
271
+ pydantic==2.8.2
272
+ # via aitoolkits-webui
273
+ # via fastapi
274
+ # via gradio
275
+ # via langchain
276
+ # via langchain-core
277
+ # via langsmith
278
+ # via llama-cloud
279
+ # via openai
280
+ # via pydantic-settings
281
+ pydantic-core==2.20.1
282
+ # via pydantic
283
+ pydantic-settings==2.3.4
284
+ # via aitoolkits-webui
285
+ pydub==0.25.1
286
+ # via gradio
287
+ pygments==2.18.0
288
+ # via rich
289
+ pyparsing==3.1.2
290
+ # via matplotlib
291
+ pypdf==4.2.0
292
+ # via llama-index-readers-file
293
+ python-dateutil==2.9.0.post0
294
+ # via matplotlib
295
+ # via pandas
296
+ python-dotenv==1.0.1
297
+ # via pydantic-settings
298
+ # via uvicorn
299
+ python-multipart==0.0.9
300
+ # via fastapi
301
+ # via gradio
302
+ pytz==2024.1
303
+ # via pandas
304
+ pyyaml==6.0.1
305
+ # via gradio
306
+ # via huggingface-hub
307
+ # via langchain
308
+ # via langchain-core
309
+ # via llama-index-core
310
+ # via uvicorn
311
+ referencing==0.35.1
312
+ # via jsonschema
313
+ # via jsonschema-specifications
314
+ regex==2024.5.15
315
+ # via diffusers
316
+ # via nltk
317
+ # via tiktoken
318
+ requests==2.32.3
319
+ # via diffusers
320
+ # via huggingface-hub
321
+ # via langchain
322
+ # via langsmith
323
+ # via llama-index-core
324
+ # via llama-index-legacy
325
+ # via tiktoken
326
+ rich==13.7.1
327
+ # via typer
328
+ rpds-py==0.19.0
329
+ # via jsonschema
330
+ # via referencing
331
+ ruff==0.5.1
332
+ # via gradio
333
+ safetensors==0.4.3
334
+ # via diffusers
335
+ semantic-version==2.10.0
336
+ # via gradio
337
+ shellingham==1.5.4
338
+ # via typer
339
+ six==1.16.0
340
+ # via python-dateutil
341
+ sniffio==1.3.1
342
+ # via anyio
343
+ # via httpx
344
+ # via openai
345
+ soupsieve==2.5
346
+ # via beautifulsoup4
347
+ sqlalchemy==2.0.31
348
+ # via langchain
349
+ # via llama-index-core
350
+ # via llama-index-legacy
351
+ starlette==0.37.2
352
+ # via fastapi
353
+ striprtf==0.0.26
354
+ # via llama-index-readers-file
355
+ tenacity==8.5.0
356
+ # via langchain
357
+ # via langchain-core
358
+ # via llama-index-core
359
+ # via llama-index-legacy
360
+ tiktoken==0.7.0
361
+ # via langchain-openai
362
+ # via llama-index-core
363
+ # via llama-index-legacy
364
+ tomlkit==0.12.0
365
+ # via gradio
366
+ toolz==0.12.1
367
+ # via altair
368
+ tqdm==4.66.4
369
+ # via huggingface-hub
370
+ # via llama-index-core
371
+ # via nltk
372
+ # via openai
373
+ typer==0.12.3
374
+ # via fastapi-cli
375
+ # via gradio
376
+ typing-extensions==4.12.2
377
+ # via altair
378
+ # via anyio
379
+ # via fastapi
380
+ # via gradio
381
+ # via gradio-client
382
+ # via huggingface-hub
383
+ # via llama-index-core
384
+ # via llama-index-legacy
385
+ # via openai
386
+ # via pydantic
387
+ # via pydantic-core
388
+ # via pypdf
389
+ # via sqlalchemy
390
+ # via typer
391
+ # via typing-inspect
392
+ # via uvicorn
393
+ typing-inspect==0.9.0
394
+ # via dataclasses-json
395
+ # via llama-index-core
396
+ # via llama-index-legacy
397
+ tzdata==2024.1
398
+ # via pandas
399
+ ujson==5.10.0
400
+ # via fastapi
401
+ urllib3==2.2.2
402
+ # via gradio
403
+ # via requests
404
+ uvicorn==0.30.1
405
+ # via fastapi
406
+ # via gradio
407
+ watchfiles==0.22.0
408
+ # via uvicorn
409
+ websockets==11.0.3
410
+ # via gradio-client
411
+ # via uvicorn
412
+ wrapt==1.16.0
413
+ # via deprecated
414
+ # via llama-index-core
415
+ yarl==1.9.4
416
+ # via aiohttp
417
+ zipp==3.19.2
418
+ # via importlib-metadata
src/aitoolkits_webui/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ def hello() -> str:
2
+ return "Hello from aitoolkits-webui!"
src/aitoolkits_webui/config.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable, Set
2
+
3
+ from pydantic import (
4
+ AliasChoices,
5
+ AmqpDsn,
6
+ BaseModel,
7
+ Field,
8
+ ImportString,
9
+ PostgresDsn,
10
+ RedisDsn,
11
+ )
12
+
13
+ from pydantic_settings import BaseSettings, SettingsConfigDict
14
+
15
+
16
+ class Settings(BaseSettings):
17
+ model_config = SettingsConfigDict(env_file='.env', env_file_encoding='utf-8')
src/aitoolkits_webui/llm.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from attrs import define, field
3
+ from abc import ABC, abstractmethod
4
+
5
+
6
+ @define
7
+ class BaseLLM(ABC):
8
+ api_key: str = field(default='')
9
+
10
+ @abstractmethod
11
+ def support_models(self) -> List[str]:
12
+ pass
13
+
14
+ @abstractmethod
15
+ def base_url(self) -> str:
16
+ pass
17
+
18
+
19
+ @define
20
+ class DeepSeekLLM(BaseLLM):
21
+ _support_models = ['deepseek-chat', 'deepseek-coder']
22
+ _base_url = 'https://api.deepseek.com/v1'
23
+ _default_model = 'deepseek-chat'
24
+
25
+ @property
26
+ def support_models(self) -> List[str]:
27
+ return self._support_models
28
+
29
+ @property
30
+ def base_url(self) -> str:
31
+ return self._base_url
32
+
33
+ @property
34
+ def default_model(self) -> str:
35
+ return self._default_model
src/app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain_openai import ChatOpenAI
3
+ from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
4
+ from aitoolkits_webui.llm import DeepSeekLLM
5
+
6
+ api_key = "sk-cd4f2925207a4d78bf6656d952ed46f3"
7
+ deep_seek_llm = DeepSeekLLM(api_key=api_key)
8
+ chat = ChatOpenAI(model="deepseek-chat", api_key=deep_seek_llm.api_key, base_url=deep_seek_llm.base_url)
9
+
10
+
11
+ def predict(message, history):
12
+ history_messages = []
13
+ for human, assistant in history:
14
+ history_messages.append(HumanMessage(content=human))
15
+ history_messages.append(AIMessage(content=assistant))
16
+ history_messages.append(HumanMessage(content=message))
17
+
18
+ response_message = ''
19
+ for chunk in chat.stream(history_messages):
20
+ response_message = response_message + chunk.content
21
+ yield response_message
22
+
23
+
24
+ css = """
25
+ .container {
26
+ height: 100vh;
27
+ }
28
+ """
29
+
30
+ with gr.Blocks(css=css) as app:
31
+ with gr.Tab('Chat'):
32
+ with gr.Row():
33
+ with gr.Column(scale=2, min_width=600):
34
+ gr.ChatInterface(predict)
35
+ with gr.Column(scale=1, min_width=300):
36
+ with gr.Accordion('Select Model', open=True):
37
+ gr.CheckboxGroup(
38
+ choices=deep_seek_llm.support_models,
39
+ type="value",
40
+ label="Model",
41
+ key="model",
42
+ )
43
+ gr.CheckboxGroup(
44
+ choices=deep_seek_llm.support_models,
45
+ type="value",
46
+ label="Model",
47
+ key="model",
48
+ )
49
+ gr.CheckboxGroup(
50
+ choices=deep_seek_llm.support_models,
51
+ type="value",
52
+ label="Model",
53
+ key="model",
54
+ )
55
+
56
+
57
+ app.launch(server_name='0.0.0.0')