Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -29,14 +29,26 @@ def create_file(filename, prompt, response, should_save=True):
|
|
29 |
def process_text(text_input):
|
30 |
if text_input:
|
31 |
st.session_state.messages.append({"role": "user", "content": text_input})
|
32 |
-
st.chat_message("user"
|
|
|
33 |
completion = client.chat.completions.create(model=MODEL, messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages], stream=False)
|
34 |
return_text = completion.choices[0].message.content
|
35 |
-
st.chat_message("assistant"
|
|
|
36 |
filename = generate_filename(text_input, "md")
|
37 |
create_file(filename, text_input, return_text)
|
38 |
st.session_state.messages.append({"role": "assistant", "content": return_text})
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
def save_image(image_input, filename):
|
41 |
with open(filename, "wb") as f:
|
42 |
f.write(image_input.getvalue())
|
@@ -44,12 +56,14 @@ def save_image(image_input, filename):
|
|
44 |
|
45 |
def process_image(image_input):
|
46 |
if image_input:
|
47 |
-
st.chat_message("user"
|
|
|
48 |
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
49 |
st.session_state.messages.append({"role": "user", "content": [{"type": "text", "text": "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."}, {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}]})
|
50 |
response = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, temperature=0.0)
|
51 |
image_response = response.choices[0].message.content
|
52 |
-
st.chat_message("assistant"
|
|
|
53 |
filename_md, filename_img = generate_filename(image_input.name + '- ' + image_response, "md"), image_input.name
|
54 |
create_file(filename_md, image_response, '', True)
|
55 |
with open(filename_md, "w", encoding="utf-8") as f:
|
@@ -64,7 +78,8 @@ def process_audio(audio_input):
|
|
64 |
transcription = client.audio.transcriptions.create(model="whisper-1", file=audio_input)
|
65 |
response = client.chat.completions.create(model=MODEL, messages=[{"role": "system", "content":"You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."}, {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}]}], temperature=0)
|
66 |
audio_response = response.choices[0].message.content
|
67 |
-
st.chat_message("assistant"
|
|
|
68 |
filename = generate_filename(transcription.text, "md")
|
69 |
create_file(filename, transcription.text, audio_response, should_save=True)
|
70 |
st.session_state.messages.append({"role": "assistant", "content": audio_response})
|
@@ -77,7 +92,8 @@ def process_audio_and_video(video_input):
|
|
77 |
st.session_state.messages.append({"role": "user", "content": ["These are the frames from the video.", *map(lambda x: {"type": "image_url", "image_url": {"url": f'data:image/jpg;base64,{x}', "detail": "low"}}, base64Frames), {"type": "text", "text": f"The audio transcription is: {transcript}"}]})
|
78 |
response = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, temperature=0)
|
79 |
video_response = response.choices[0].message.content
|
80 |
-
st.chat_message("assistant"
|
|
|
81 |
filename = generate_filename(transcript, "md")
|
82 |
create_file(filename, transcript, video_response, should_save=True)
|
83 |
st.session_state.messages.append({"role": "assistant", "content": video_response})
|
@@ -88,7 +104,8 @@ def process_audio_for_video(video_input):
|
|
88 |
transcription = client.audio.transcriptions.create(model="whisper-1", file=video_input)
|
89 |
response = client.chat.completions.create(model=MODEL, messages=[{"role": "system", "content":"You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."}, {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription}"}]}], temperature=0)
|
90 |
video_response = response.choices[0].message.content
|
91 |
-
st.chat_message("assistant"
|
|
|
92 |
filename = generate_filename(transcription, "md")
|
93 |
create_file(filename, transcription, video_response, should_save=True)
|
94 |
st.session_state.messages.append({"role": "assistant", "content": video_response})
|
@@ -156,10 +173,11 @@ def main():
|
|
156 |
|
157 |
if prompt := st.chat_input("GPT-4o Multimodal ChatBot - What can I help you with?"):
|
158 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
159 |
-
st.chat_message("user"
|
|
|
160 |
with st.chat_message("assistant"):
|
161 |
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, stream=True)
|
162 |
-
response =
|
163 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
164 |
|
165 |
filename = save_and_play_audio(audio_recorder)
|
@@ -167,10 +185,11 @@ def main():
|
|
167 |
transcript = transcribe_canary(filename)
|
168 |
result = search_arxiv(transcript)
|
169 |
st.session_state.messages.append({"role": "user", "content": transcript})
|
170 |
-
st.chat_message("user"
|
|
|
171 |
with st.chat_message("assistant"):
|
172 |
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, stream=True)
|
173 |
-
response =
|
174 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
175 |
|
176 |
if __name__ == "__main__":
|
|
|
29 |
def process_text(text_input):
|
30 |
if text_input:
|
31 |
st.session_state.messages.append({"role": "user", "content": text_input})
|
32 |
+
with st.chat_message("user"):
|
33 |
+
st.markdown(text_input)
|
34 |
completion = client.chat.completions.create(model=MODEL, messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages], stream=False)
|
35 |
return_text = completion.choices[0].message.content
|
36 |
+
with st.chat_message("assistant"):
|
37 |
+
st.markdown(return_text)
|
38 |
filename = generate_filename(text_input, "md")
|
39 |
create_file(filename, text_input, return_text)
|
40 |
st.session_state.messages.append({"role": "assistant", "content": return_text})
|
41 |
|
42 |
+
def process_text2(MODEL='gpt-4o-2024-05-13', text_input='What is 2+2 and what is an imaginary number'):
|
43 |
+
if text_input:
|
44 |
+
st.session_state.messages.append({"role": "user", "content": text_input})
|
45 |
+
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages)
|
46 |
+
return_text = completion.choices[0].message.content
|
47 |
+
st.write("Assistant: " + return_text)
|
48 |
+
filename = generate_filename(text_input, "md")
|
49 |
+
create_file(filename, text_input, return_text, should_save=True)
|
50 |
+
return return_text
|
51 |
+
|
52 |
def save_image(image_input, filename):
|
53 |
with open(filename, "wb") as f:
|
54 |
f.write(image_input.getvalue())
|
|
|
56 |
|
57 |
def process_image(image_input):
|
58 |
if image_input:
|
59 |
+
with st.chat_message("user"):
|
60 |
+
st.markdown('Processing image: ' + image_input.name)
|
61 |
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
62 |
st.session_state.messages.append({"role": "user", "content": [{"type": "text", "text": "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."}, {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}]})
|
63 |
response = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, temperature=0.0)
|
64 |
image_response = response.choices[0].message.content
|
65 |
+
with st.chat_message("assistant"):
|
66 |
+
st.markdown(image_response)
|
67 |
filename_md, filename_img = generate_filename(image_input.name + '- ' + image_response, "md"), image_input.name
|
68 |
create_file(filename_md, image_response, '', True)
|
69 |
with open(filename_md, "w", encoding="utf-8") as f:
|
|
|
78 |
transcription = client.audio.transcriptions.create(model="whisper-1", file=audio_input)
|
79 |
response = client.chat.completions.create(model=MODEL, messages=[{"role": "system", "content":"You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."}, {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription.text}"}]}], temperature=0)
|
80 |
audio_response = response.choices[0].message.content
|
81 |
+
with st.chat_message("assistant"):
|
82 |
+
st.markdown(audio_response)
|
83 |
filename = generate_filename(transcription.text, "md")
|
84 |
create_file(filename, transcription.text, audio_response, should_save=True)
|
85 |
st.session_state.messages.append({"role": "assistant", "content": audio_response})
|
|
|
92 |
st.session_state.messages.append({"role": "user", "content": ["These are the frames from the video.", *map(lambda x: {"type": "image_url", "image_url": {"url": f'data:image/jpg;base64,{x}', "detail": "low"}}, base64Frames), {"type": "text", "text": f"The audio transcription is: {transcript}"}]})
|
93 |
response = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, temperature=0)
|
94 |
video_response = response.choices[0].message.content
|
95 |
+
with st.chat_message("assistant"):
|
96 |
+
st.markdown(video_response)
|
97 |
filename = generate_filename(transcript, "md")
|
98 |
create_file(filename, transcript, video_response, should_save=True)
|
99 |
st.session_state.messages.append({"role": "assistant", "content": video_response})
|
|
|
104 |
transcription = client.audio.transcriptions.create(model="whisper-1", file=video_input)
|
105 |
response = client.chat.completions.create(model=MODEL, messages=[{"role": "system", "content":"You are generating a transcript summary. Create a summary of the provided transcription. Respond in Markdown."}, {"role": "user", "content": [{"type": "text", "text": f"The audio transcription is: {transcription}"}]}], temperature=0)
|
106 |
video_response = response.choices[0].message.content
|
107 |
+
with st.chat_message("assistant"):
|
108 |
+
st.markdown(video_response)
|
109 |
filename = generate_filename(transcription, "md")
|
110 |
create_file(filename, transcription, video_response, should_save=True)
|
111 |
st.session_state.messages.append({"role": "assistant", "content": video_response})
|
|
|
173 |
|
174 |
if prompt := st.chat_input("GPT-4o Multimodal ChatBot - What can I help you with?"):
|
175 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
176 |
+
with st.chat_message("user"):
|
177 |
+
st.markdown(prompt)
|
178 |
with st.chat_message("assistant"):
|
179 |
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, stream=True)
|
180 |
+
response = process_text2(text_input=prompt)
|
181 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
182 |
|
183 |
filename = save_and_play_audio(audio_recorder)
|
|
|
185 |
transcript = transcribe_canary(filename)
|
186 |
result = search_arxiv(transcript)
|
187 |
st.session_state.messages.append({"role": "user", "content": transcript})
|
188 |
+
with st.chat_message("user"):
|
189 |
+
st.markdown(transcript)
|
190 |
with st.chat_message("assistant"):
|
191 |
completion = client.chat.completions.create(model=MODEL, messages=st.session_state.messages, stream=True)
|
192 |
+
response = process_text2(text_input=prompt)
|
193 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
194 |
|
195 |
if __name__ == "__main__":
|