Spaces:
Runtime error
Runtime error
File size: 5,513 Bytes
1d52b89 8037acc d1f1735 1d52b89 38bc0ad 1d52b89 0344eb9 38bc0ad 61039b2 38bc0ad 1d52b89 38bc0ad 1d52b89 8f4648b ef1f47d 8f4648b 1d52b89 0344eb9 1d52b89 26a7296 1d52b89 8037acc 1d52b89 26a7296 1d52b89 8037acc 1d52b89 ad90478 1d52b89 b18af55 d1f1735 af66d69 d1f1735 503b13d d1f1735 ad90478 f99ef57 d1f1735 ad90478 d1f1735 1d52b89 d1f1735 1d52b89 d1f1735 1d52b89 1116695 8037acc d1f1735 1d52b89 26a7296 1d52b89 38bc0ad 1d52b89 76225f2 da203a4 38bc0ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import chainlit as cl
from openai import OpenAI
from langsmith.run_helpers import traceable
from langsmith_config import setup_langsmith_config
import base64
import os
model = "gpt-4-1106-preview"
model_vision = "gpt-4-vision-preview"
setup_langsmith_config()
def process_images(msg: cl.Message):
# Processing images exclusively
images = [file for file in msg.elements if "image" in file.mime]
# Accessing the bytes of a specific image
image_bytes = images[0].content # take the first image just for demo purposes
# check the size of the image, max 1mb
if len(image_bytes) > 5000000:
return "too_large"
# we need base64 encoded image
image_base64 = base64.b64encode(image_bytes).decode('utf-8')
return image_base64
async def process_stream(stream, msg: cl.Message):
for part in stream:
if token := part.choices[0].delta.content or "":
await msg.stream_token(token)
def handle_vision_call(msg, image_history):
image_base64 = None
image_base64 = process_images(msg)
if image_base64 == "too_large":
return "too_large"
if image_base64:
# add the image to the image history
image_history.append(
{
"role": "user",
"content": [
{"type": "text", "text": msg.content},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image_base64}",
"detail": "low"
}
},
],
}
)
stream = gpt_vision_call(image_history)
return stream
@traceable(run_type="llm", name="gpt 3 turbo call")
async def gpt_call(message_history: list = []):
client = OpenAI(api_key=cl.user_session.get("api_key"))
stream = client.chat.completions.create(
model=model,
messages=message_history,
stream=True,
)
return stream
@traceable(run_type="llm", name="gpt 4 turbo vision call")
def gpt_vision_call(image_history: list = []):
client = OpenAI(api_key=cl.user_session.get("api_key"))
stream = client.chat.completions.create(
model=model_vision,
messages=image_history,
max_tokens=500,
stream=True,
)
return stream
async def wait_for_key():
res = await cl.AskUserMessage(content="Send an Openai API KEY to start. [https://platform.openai.com/api-keys](https://platform.openai.com/api-keys). e.g. sk-IY8Wl.....1cXD8", timeout=600).send()
if res:
await cl.Message(content="setting up...", indent=1).send()
# check if the key is valid
client = OpenAI(api_key=res["content"])
try:
stream = client.chat.completions.create(
model=model,
messages=[{"role": "system", "content": "test"}],
max_tokens=1,
)
if stream:
await cl.Message(content="API_KEY setted, you can start chatting!", indent=1).send()
cl.user_session.set("api_key", res["content"])
except Exception as e:
await cl.Message(content=f"{e}", indent=1).send()
return await wait_for_key()
return await cl.Message(content="API_KEY setted, you can start chatting!").send()
else:
return await wait_for_key()
@cl.on_chat_start
async def start_chat():
cl.user_session.set(
"message_history",
[{"role": "system", "content": "You are a helpful assistant. You are made by GPT-3.5-turbo-1106, the latest version developed by Openai. You do not have the ability to receive images, but if the user uploads an image with the message, GPT-4-vision-preview will be used. So if a user asks you if you have the ability to analyze images, you can tell them that. And tell him that at the bottom left (above the text input) he has a button to upload images, or he can drag it to the chat, or he can just copy paste the input"}],
)
cl.user_session.set("image_history", [{"role": "system", "content": "You are a helpful assistant. You are developed with GPT-4-vision-preview, if the user uploads an image, you have the ability to understand it. For normal messages GPT-3.5-turbo-1106 will be used, and for images you will use it. If the user asks about your capabilities you can tell them that."}])
await wait_for_key()
@cl.on_message
@traceable(run_type="chain", name="message")
async def on_message(msg: cl.Message):
message_history = cl.user_session.get("message_history")
image_history = cl.user_session.get("image_history")
stream_msg = cl.Message(content="")
stream = None
if msg.elements:
stream = handle_vision_call(msg, image_history)
if stream == "too_large":
return await cl.Message(content="Image too large, max 1mb").send()
else:
# add the message in both to keep the coherence between the two histories
message_history.append({"role": "user", "content": msg.content})
image_history.append({"role": "user", "content": msg.content})
stream = await gpt_call(message_history)
if stream:
await process_stream(stream, msg=stream_msg)
message_history.append({"role": "assistant", "content": stream_msg.content})
image_history.append({"role": "assistant", "content": stream_msg.content})
return stream_msg.content
|