Gilberto Medrano
commited on
Commit
•
181e273
1
Parent(s):
95d916c
Added relevant files from llm-app project
Browse files- Dockerfile +11 -0
- app.py +80 -0
- chainlit.md +3 -0
- requirements.txt +5 -0
Dockerfile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.9
|
2 |
+
RUN useradd -m -u 1000 user
|
3 |
+
USER user
|
4 |
+
ENV HOME=/home/user \
|
5 |
+
PATH=/home/user/.local/bin:$PATH
|
6 |
+
WORKDIR $HOME/app
|
7 |
+
COPY --chown=user . $HOME/app
|
8 |
+
COPY ./requirements.txt ~/app/requirements.txt
|
9 |
+
RUN pip install -r requirements.txt
|
10 |
+
COPY . .
|
11 |
+
CMD ["chainlit", "run", "app.py", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
|
2 |
+
|
3 |
+
# OpenAI Chat completion
|
4 |
+
import os
|
5 |
+
from openai import AsyncOpenAI # importing openai for API usage
|
6 |
+
import chainlit as cl # importing chainlit for our app
|
7 |
+
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
|
8 |
+
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
|
11 |
+
load_dotenv()
|
12 |
+
|
13 |
+
# ChatOpenAI Templates
|
14 |
+
system_template = """You are a helpful assistant who always speaks in a pleasant tone!
|
15 |
+
"""
|
16 |
+
|
17 |
+
user_template = """{input}
|
18 |
+
Think through your response step by step.
|
19 |
+
"""
|
20 |
+
|
21 |
+
|
22 |
+
@cl.on_chat_start # marks a function that will be executed at the start of a user session
|
23 |
+
async def start_chat():
|
24 |
+
settings = {
|
25 |
+
"model": "gpt-3.5-turbo",
|
26 |
+
"temperature": 0,
|
27 |
+
"max_tokens": 500,
|
28 |
+
"top_p": 1,
|
29 |
+
"frequency_penalty": 0,
|
30 |
+
"presence_penalty": 0,
|
31 |
+
}
|
32 |
+
|
33 |
+
cl.user_session.set("settings", settings)
|
34 |
+
|
35 |
+
|
36 |
+
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
37 |
+
async def main(message: cl.Message):
|
38 |
+
settings = cl.user_session.get("settings")
|
39 |
+
|
40 |
+
client = AsyncOpenAI()
|
41 |
+
|
42 |
+
print(message.content)
|
43 |
+
|
44 |
+
prompt = Prompt(
|
45 |
+
provider=ChatOpenAI.id,
|
46 |
+
messages=[
|
47 |
+
PromptMessage(
|
48 |
+
role="system",
|
49 |
+
template=system_template,
|
50 |
+
formatted=system_template,
|
51 |
+
),
|
52 |
+
PromptMessage(
|
53 |
+
role="user",
|
54 |
+
template=user_template,
|
55 |
+
formatted=user_template.format(input=message.content),
|
56 |
+
),
|
57 |
+
],
|
58 |
+
inputs={"input": message.content},
|
59 |
+
settings=settings,
|
60 |
+
)
|
61 |
+
|
62 |
+
print([m.to_openai() for m in prompt.messages])
|
63 |
+
|
64 |
+
msg = cl.Message(content="")
|
65 |
+
|
66 |
+
# Call OpenAI
|
67 |
+
async for stream_resp in await client.chat.completions.create(
|
68 |
+
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
|
69 |
+
):
|
70 |
+
token = stream_resp.choices[0].delta.content
|
71 |
+
if not token:
|
72 |
+
token = ""
|
73 |
+
await msg.stream_token(token)
|
74 |
+
|
75 |
+
# Update the prompt object with the completion
|
76 |
+
prompt.completion = msg.content
|
77 |
+
msg.prompt = prompt
|
78 |
+
|
79 |
+
# Send and close the message stream
|
80 |
+
await msg.send()
|
chainlit.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Beyond ChatGPT
|
2 |
+
|
3 |
+
This Chainlit app was created following instructions from [this repository!](https://github.com/AI-Maker-Space/Beyond-ChatGPT)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
chainlit==0.7.700
|
2 |
+
cohere==4.37
|
3 |
+
openai==1.3.5
|
4 |
+
tiktoken==0.5.1
|
5 |
+
python-dotenv==1.0.0
|