awacke1 commited on
Commit
7ae4d4b
β€’
1 Parent(s): 38c66b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +604 -0
app.py CHANGED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import anthropic
3
+ import openai
4
+ import base64
5
+ from datetime import datetime
6
+ import plotly.graph_objects as go
7
+ import cv2
8
+ import glob
9
+ import json
10
+ import math
11
+ import os
12
+ import pytz
13
+ import random
14
+ import re
15
+ import requests
16
+ import streamlit.components.v1 as components
17
+ import textract
18
+ import time
19
+ import zipfile
20
+ from audio_recorder_streamlit import audio_recorder
21
+ from bs4 import BeautifulSoup
22
+ from collections import deque
23
+ from dotenv import load_dotenv
24
+ from gradio_client import Client, handle_file
25
+ from huggingface_hub import InferenceClient
26
+ from io import BytesIO
27
+ from moviepy.editor import VideoFileClip
28
+ from PIL import Image
29
+ from PyPDF2 import PdfReader
30
+ from urllib.parse import quote
31
+ from xml.etree import ElementTree as ET
32
+ from openai import OpenAI
33
+
34
+ # 1. Configuration and Setup
35
+ Site_Name = 'πŸ€–πŸ§ Claude35πŸ“πŸ”¬'
36
+ title = "πŸ€–πŸ§ Claude35πŸ“πŸ”¬"
37
+ helpURL = 'https://huggingface.co/awacke1'
38
+ bugURL = 'https://huggingface.co/spaces/awacke1'
39
+ icons = 'πŸ€–πŸ§ πŸ”¬πŸ“'
40
+
41
+ st.set_page_config(
42
+ page_title=title,
43
+ page_icon=icons,
44
+ layout="wide",
45
+ initial_sidebar_state="auto",
46
+ menu_items={
47
+ 'Get Help': helpURL,
48
+ 'Report a bug': bugURL,
49
+ 'About': title
50
+ }
51
+ )
52
+
53
+ # 2. Load environment variables and initialize clients
54
+ load_dotenv()
55
+
56
+ # OpenAI setup
57
+ openai.api_key = os.getenv('OPENAI_API_KEY')
58
+ if openai.api_key == None:
59
+ openai.api_key = st.secrets['OPENAI_API_KEY']
60
+
61
+ openai_client = OpenAI(
62
+ api_key=os.getenv('OPENAI_API_KEY'),
63
+ organization=os.getenv('OPENAI_ORG_ID')
64
+ )
65
+
66
+ # Claude setup
67
+ anthropic_key = os.getenv("ANTHROPIC_API_KEY_3")
68
+ if anthropic_key == None:
69
+ anthropic_key = st.secrets["ANTHROPIC_API_KEY"]
70
+ claude_client = anthropic.Anthropic(api_key=anthropic_key)
71
+
72
+ # HuggingFace setup
73
+ API_URL = os.getenv('API_URL')
74
+ HF_KEY = os.getenv('HF_KEY')
75
+ MODEL1 = "meta-llama/Llama-2-7b-chat-hf"
76
+ MODEL2 = "openai/whisper-small.en"
77
+
78
+ headers = {
79
+ "Authorization": f"Bearer {HF_KEY}",
80
+ "Content-Type": "application/json"
81
+ }
82
+
83
+ # Initialize session states
84
+ if "chat_history" not in st.session_state:
85
+ st.session_state.chat_history = []
86
+ if "openai_model" not in st.session_state:
87
+ st.session_state["openai_model"] = "gpt-4o-2024-05-13"
88
+ if "messages" not in st.session_state:
89
+ st.session_state.messages = []
90
+
91
+ # Custom CSS
92
+ st.markdown("""
93
+ <style>
94
+ .main {
95
+ background: linear-gradient(to right, #1a1a1a, #2d2d2d);
96
+ color: #ffffff;
97
+ }
98
+ .stMarkdown {
99
+ font-family: 'Helvetica Neue', sans-serif;
100
+ }
101
+ .category-header {
102
+ background: linear-gradient(45deg, #2b5876, #4e4376);
103
+ padding: 20px;
104
+ border-radius: 10px;
105
+ margin: 10px 0;
106
+ }
107
+ .scene-card {
108
+ background: rgba(0,0,0,0.3);
109
+ padding: 15px;
110
+ border-radius: 8px;
111
+ margin: 10px 0;
112
+ border: 1px solid rgba(255,255,255,0.1);
113
+ }
114
+ .media-gallery {
115
+ display: grid;
116
+ gap: 1rem;
117
+ padding: 1rem;
118
+ }
119
+ .bike-card {
120
+ background: rgba(255,255,255,0.05);
121
+ border-radius: 10px;
122
+ padding: 15px;
123
+ transition: transform 0.3s;
124
+ }
125
+ .bike-card:hover {
126
+ transform: scale(1.02);
127
+ }
128
+ </style>
129
+ """, unsafe_allow_html=True)
130
+
131
+ # Bike Collections
132
+ bike_collections = {
133
+ "Celestial Collection 🌌": {
134
+ "Eclipse Vaulter": {
135
+ "prompt": """Cinematic shot of a sleek black mountain bike silhouetted against a total solar eclipse.
136
+ The corona creates an ethereal halo effect, with lens flares accentuating key points of the frame.
137
+ Dynamic composition shows the bike mid-leap, with stardust particles trailing behind.
138
+ Camera angle: Low angle, wide shot
139
+ Lighting: Dramatic rim lighting from eclipse
140
+ Color palette: Deep purples, cosmic blues, corona gold""",
141
+ "emoji": "πŸŒ‘"
142
+ },
143
+ "Starlight Leaper": {
144
+ "prompt": """A black bike performing an epic leap under a vast Milky Way galaxy.
145
+ Shimmering stars blanket the sky while the bike's wheels leave a trail of stardust.
146
+ Camera angle: Wide-angle upward shot
147
+ Lighting: Natural starlight with subtle rim lighting
148
+ Color palette: Deep blues, silver highlights, cosmic purples""",
149
+ "emoji": "✨"
150
+ },
151
+ "Moonlit Hopper": {
152
+ "prompt": """A sleek black bike mid-hop over a moonlit meadow,
153
+ the full moon illuminating the misty surroundings. Fireflies dance around the bike,
154
+ and soft shadows create a serene yet dynamic atmosphere.
155
+ Camera angle: Side profile with slight low angle
156
+ Lighting: Soft moonlight with atmospheric fog
157
+ Color palette: Silver blues, soft whites, deep shadows""",
158
+ "emoji": "πŸŒ™"
159
+ }
160
+ },
161
+ "Nature-Inspired Collection 🌲": {
162
+ "Shadow Grasshopper": {
163
+ "prompt": """A black bike jumping between forest paths,
164
+ with dappled sunlight streaming through the canopy. Shadows dance on the bike's frame
165
+ as it soars above mossy logs.
166
+ Camera angle: Through-the-trees tracking shot
167
+ Lighting: Natural forest lighting with sun rays
168
+ Color palette: Forest greens, golden sunlight, deep shadows""",
169
+ "emoji": "πŸ¦—"
170
+ },
171
+ "Onyx Leapfrog": {
172
+ "prompt": """A bike with obsidian-black finish jumping over a sparkling creek,
173
+ the reflection on the water broken into ripples by the leap. The surrounding forest
174
+ is vibrant with greens and browns.
175
+ Camera angle: Low angle from water level
176
+ Lighting: Golden hour side lighting
177
+ Color palette: Deep blacks, water blues, forest greens""",
178
+ "emoji": "🐸"
179
+ }
180
+ }
181
+ }
182
+
183
+ # Helper Functions
184
+ def generate_filename(prompt, file_type):
185
+ """Generate a safe filename using the prompt and file type."""
186
+ central = pytz.timezone('US/Central')
187
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
188
+ replaced_prompt = re.sub(r'[<>:"/\\|?*\n]', ' ', prompt)
189
+ safe_prompt = re.sub(r'\s+', ' ', replaced_prompt).strip()[:240]
190
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
191
+
192
+
193
+
194
+
195
+ # Function to create and save a file (and avoid the black hole of lost data πŸ•³)
196
+ def create_file(filename, prompt, response, should_save=True):
197
+ if not should_save:
198
+ return
199
+ with open(filename, 'w', encoding='utf-8') as file:
200
+ file.write(prompt + "\n\n" + response)
201
+
202
+
203
+
204
+ def create_and_save_file(content, file_type="md", prompt=None, is_image=False, should_save=True):
205
+ """Create and save file with proper handling of different types."""
206
+ if not should_save:
207
+ return None
208
+ filename = generate_filename(prompt if prompt else content, file_type)
209
+ with open(filename, "w", encoding="utf-8") as f:
210
+ if is_image:
211
+ f.write(content)
212
+ else:
213
+ f.write(prompt + "\n\n" + content if prompt else content)
214
+ return filename
215
+
216
+ def get_download_link(file_path):
217
+ """Create download link for file."""
218
+ with open(file_path, "rb") as file:
219
+ contents = file.read()
220
+ b64 = base64.b64encode(contents).decode()
221
+ return f'<a href="data:file/txt;base64,{b64}" download="{os.path.basename(file_path)}">Download {os.path.basename(file_path)}πŸ“‚</a>'
222
+
223
+ @st.cache_resource
224
+ def SpeechSynthesis(result):
225
+ """HTML5 Speech Synthesis."""
226
+ documentHTML5 = f'''
227
+ <!DOCTYPE html>
228
+ <html>
229
+ <head>
230
+ <title>Read It Aloud</title>
231
+ <script type="text/javascript">
232
+ function readAloud() {{
233
+ const text = document.getElementById("textArea").value;
234
+ const speech = new SpeechSynthesisUtterance(text);
235
+ window.speechSynthesis.speak(speech);
236
+ }}
237
+ </script>
238
+ </head>
239
+ <body>
240
+ <h1>πŸ”Š Read It Aloud</h1>
241
+ <textarea id="textArea" rows="10" cols="80">{result}</textarea>
242
+ <br>
243
+ <button onclick="readAloud()">πŸ”Š Read Aloud</button>
244
+ </body>
245
+ </html>
246
+ '''
247
+ components.html(documentHTML5, width=1280, height=300)
248
+
249
+ # Media Processing Functions
250
+ def process_image(image_input, user_prompt):
251
+ """Process image with GPT-4o vision."""
252
+ if isinstance(image_input, str):
253
+ with open(image_input, "rb") as image_file:
254
+ image_input = image_file.read()
255
+
256
+ base64_image = base64.b64encode(image_input).decode("utf-8")
257
+
258
+ response = openai_client.chat.completions.create(
259
+ model=st.session_state["openai_model"],
260
+ messages=[
261
+ {"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
262
+ {"role": "user", "content": [
263
+ {"type": "text", "text": user_prompt},
264
+ {"type": "image_url", "image_url": {
265
+ "url": f"data:image/png;base64,{base64_image}"
266
+ }}
267
+ ]}
268
+ ],
269
+ temperature=0.0,
270
+ )
271
+
272
+ return response.choices[0].message.content
273
+
274
+ def process_audio(audio_input, text_input=''):
275
+ """Process audio with Whisper and GPT."""
276
+ if isinstance(audio_input, str):
277
+ with open(audio_input, "rb") as file:
278
+ audio_input = file.read()
279
+
280
+ transcription = openai_client.audio.transcriptions.create(
281
+ model="whisper-1",
282
+ file=audio_input,
283
+ )
284
+
285
+ st.session_state.messages.append({"role": "user", "content": transcription.text})
286
+
287
+ with st.chat_message("assistant"):
288
+ st.markdown(transcription.text)
289
+ SpeechSynthesis(transcription.text)
290
+
291
+ filename = generate_filename(transcription.text, "wav")
292
+ create_and_save_file(audio_input, "wav", transcription.text, True)
293
+
294
+ def process_video(video_path, seconds_per_frame=1):
295
+ """Process video files for frame extraction and audio."""
296
+ base64Frames = []
297
+ video = cv2.VideoCapture(video_path)
298
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
299
+ fps = video.get(cv2.CAP_PROP_FPS)
300
+ frames_to_skip = int(fps * seconds_per_frame)
301
+
302
+ for frame_idx in range(0, total_frames, frames_to_skip):
303
+ video.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
304
+ success, frame = video.read()
305
+ if not success:
306
+ break
307
+ _, buffer = cv2.imencode(".jpg", frame)
308
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
309
+
310
+ video.release()
311
+
312
+ # Extract audio
313
+ base_video_path = os.path.splitext(video_path)[0]
314
+ audio_path = f"{base_video_path}.mp3"
315
+ try:
316
+ video_clip = VideoFileClip(video_path)
317
+ video_clip.audio.write_audiofile(audio_path)
318
+ video_clip.close()
319
+ except:
320
+ st.warning("No audio track found in video")
321
+ audio_path = None
322
+
323
+ return base64Frames, audio_path
324
+
325
+ def process_video_with_gpt(video_input, user_prompt):
326
+ """Process video with GPT-4o vision."""
327
+ base64Frames, audio_path = process_video(video_input)
328
+
329
+ response = openai_client.chat.completions.create(
330
+ model=st.session_state["openai_model"],
331
+ messages=[
332
+ {"role": "system", "content": "Analyze the video frames and provide a detailed description."},
333
+ {"role": "user", "content": [
334
+ {"type": "text", "text": user_prompt},
335
+ *[{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame}"}}
336
+ for frame in base64Frames]
337
+ ]}
338
+ ]
339
+ )
340
+
341
+ return response.choices[0].message.content
342
+
343
+ # ArXiv Search Functions
344
+ def search_arxiv(query):
345
+ """Search ArXiv papers using Hugging Face client."""
346
+ client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
347
+ response = client.predict(
348
+ query,
349
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
350
+ True,
351
+ api_name="/ask_llm"
352
+ )
353
+ return response
354
+
355
+ # Chat Processing Functions
356
+ def process_with_gpt(text_input):
357
+ """Process text with GPT-4o."""
358
+ if text_input:
359
+ st.session_state.messages.append({"role": "user", "content": text_input})
360
+
361
+ with st.chat_message("user"):
362
+ st.markdown(text_input)
363
+
364
+ with st.chat_message("assistant"):
365
+ completion = openai_client.chat.completions.create(
366
+ model=st.session_state["openai_model"],
367
+ messages=[
368
+ {"role": m["role"], "content": m["content"]}
369
+ for m in st.session_state.messages
370
+ ],
371
+ stream=False
372
+ )
373
+ return_text = completion.choices[0].message.content
374
+ st.write("GPT-4o: " + return_text)
375
+
376
+ filename = generate_filename(text_input, "md")
377
+ create_file(filename, text_input, return_text)
378
+ st.session_state.messages.append({"role": "assistant", "content": return_text})
379
+ return return_text
380
+
381
+ def process_with_claude(text_input):
382
+ """Process text with Claude."""
383
+ if text_input:
384
+ response = claude_client.messages.create(
385
+ model="claude-3-sonnet-20240229",
386
+ max_tokens=1000,
387
+ messages=[
388
+ {"role": "user", "content": text_input}
389
+ ]
390
+ )
391
+ response_text = response.content[0].text
392
+ st.write("Claude: " + response_text)
393
+
394
+ filename = generate_filename(text_input, "md")
395
+ create_file(filename, text_input, response_text)
396
+
397
+ st.session_state.chat_history.append({
398
+ "user": text_input,
399
+ "claude": response_text
400
+ })
401
+ return response_text
402
+
403
+ # File Management Functions
404
+ def load_file(file_name):
405
+ """Load file content."""
406
+ with open(file_name, "r", encoding='utf-8') as file:
407
+ content = file.read()
408
+ return content
409
+
410
+ def create_zip_of_files(files):
411
+ """Create zip archive of files."""
412
+ zip_name = "all_files.zip"
413
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
414
+ for file in files:
415
+ zipf.write(file)
416
+ return zip_name
417
+
418
+
419
+
420
+ def get_media_html(media_path, media_type="video", width="100%"):
421
+ """Generate HTML for media player."""
422
+ media_data = base64.b64encode(open(media_path, 'rb').read()).decode()
423
+ if media_type == "video":
424
+ return f'''
425
+ <video width="{width}" controls autoplay muted loop>
426
+ <source src="data:video/mp4;base64,{media_data}" type="video/mp4">
427
+ Your browser does not support the video tag.
428
+ </video>
429
+ '''
430
+ else: # audio
431
+ return f'''
432
+ <audio controls style="width: {width};">
433
+ <source src="data:audio/mpeg;base64,{media_data}" type="audio/mpeg">
434
+ Your browser does not support the audio element.
435
+ </audio>
436
+ '''
437
+
438
+ def create_media_gallery():
439
+ """Create the media gallery interface."""
440
+ st.header("🎬 Media Gallery")
441
+
442
+ tabs = st.tabs(["πŸ–ΌοΈ Images", "🎡 Audio", "πŸŽ₯ Video", "🎨 Scene Generator"])
443
+
444
+ with tabs[0]:
445
+ image_files = glob.glob("*.png") + glob.glob("*.jpg")
446
+ if image_files:
447
+ num_cols = st.slider("Number of columns", 1, 5, 3)
448
+ cols = st.columns(num_cols)
449
+ for idx, image_file in enumerate(image_files):
450
+ with cols[idx % num_cols]:
451
+ img = Image.open(image_file)
452
+ st.image(img, use_container_width=True)
453
+
454
+ # Add GPT vision analysis option
455
+ if st.button(f"Analyze {os.path.basename(image_file)}"):
456
+ analysis = process_image(image_file,
457
+ "Describe this image in detail and identify key elements.")
458
+ st.markdown(analysis)
459
+
460
+ with tabs[1]:
461
+ audio_files = glob.glob("*.mp3") + glob.glob("*.wav")
462
+ for audio_file in audio_files:
463
+ with st.expander(f"🎡 {os.path.basename(audio_file)}"):
464
+ st.markdown(get_media_html(audio_file, "audio"), unsafe_allow_html=True)
465
+ if st.button(f"Transcribe {os.path.basename(audio_file)}"):
466
+ with open(audio_file, "rb") as f:
467
+ transcription = process_audio(f)
468
+ st.write(transcription)
469
+
470
+ with tabs[2]:
471
+ video_files = glob.glob("*.mp4")
472
+ for video_file in video_files:
473
+ with st.expander(f"πŸŽ₯ {os.path.basename(video_file)}"):
474
+ st.markdown(get_media_html(video_file, "video"), unsafe_allow_html=True)
475
+ if st.button(f"Analyze {os.path.basename(video_file)}"):
476
+ analysis = process_video_with_gpt(video_file,
477
+ "Describe what's happening in this video.")
478
+ st.markdown(analysis)
479
+
480
+ with tabs[3]:
481
+ for collection_name, bikes in bike_collections.items():
482
+ st.subheader(collection_name)
483
+ cols = st.columns(len(bikes))
484
+
485
+ for idx, (bike_name, details) in enumerate(bikes.items()):
486
+ with cols[idx]:
487
+ st.markdown(f"""
488
+ <div class='bike-card'>
489
+ <h3>{details['emoji']} {bike_name}</h3>
490
+ <p>{details['prompt']}</p>
491
+ </div>
492
+ """, unsafe_allow_html=True)
493
+
494
+ if st.button(f"Generate {bike_name} Scene"):
495
+ prompt = details['prompt']
496
+ # Here you could integrate with image generation API
497
+ st.write(f"Generated scene description for {bike_name}:")
498
+ st.write(prompt)
499
+
500
+ def display_file_manager():
501
+ """Display file management sidebar."""
502
+ st.sidebar.title("πŸ“ File Management")
503
+
504
+ all_files = glob.glob("*.md")
505
+ all_files.sort(reverse=True)
506
+
507
+ if st.sidebar.button("πŸ—‘ Delete All"):
508
+ for file in all_files:
509
+ os.remove(file)
510
+ st.rerun()
511
+
512
+ if st.sidebar.button("⬇️ Download All"):
513
+ zip_file = create_zip_of_files(all_files)
514
+ st.sidebar.markdown(get_download_link(zip_file), unsafe_allow_html=True)
515
+
516
+ for file in all_files:
517
+ col1, col2, col3, col4 = st.sidebar.columns([1,3,1,1])
518
+ with col1:
519
+ if st.button("🌐", key="view_"+file):
520
+ st.session_state.current_file = file
521
+ st.session_state.file_content = load_file(file)
522
+ with col2:
523
+ st.markdown(get_download_link(file), unsafe_allow_html=True)
524
+ with col3:
525
+ if st.button("πŸ“‚", key="edit_"+file):
526
+ st.session_state.current_file = file
527
+ st.session_state.file_content = load_file(file)
528
+ with col4:
529
+ if st.button("πŸ—‘", key="delete_"+file):
530
+ os.remove(file)
531
+ st.rerun()
532
+
533
+ def main():
534
+ st.title("🚲 Bike Cinematic Universe & AI Assistant")
535
+
536
+ # Main navigation
537
+ tab_main = st.radio("Choose Action:",
538
+ ["πŸ’¬ Chat", "πŸ“Έ Media Gallery", "πŸ” Search ArXiv", "πŸ“ File Editor"],
539
+ horizontal=True)
540
+
541
+ if tab_main == "πŸ’¬ Chat":
542
+ # Model Selection
543
+ model_choice = st.sidebar.radio(
544
+ "Choose AI Model:",
545
+ ["GPT-4o", "Claude-3", "Both"]
546
+ )
547
+
548
+ # Chat Interface
549
+ user_input = st.text_area("Message:", height=100)
550
+
551
+ if st.button("Send πŸ“¨"):
552
+ if user_input:
553
+ if model_choice == "GPT-4o":
554
+ gpt_response = process_with_gpt(user_input)
555
+ elif model_choice == "Claude-3":
556
+ claude_response = process_with_claude(user_input)
557
+ else: # Both
558
+ col1, col2 = st.columns(2)
559
+ with col1:
560
+ st.subheader("GPT-4o Response")
561
+ gpt_response = process_with_gpt(user_input)
562
+ with col2:
563
+ st.subheader("Claude-3 Response")
564
+ claude_response = process_with_claude(user_input)
565
+
566
+ # Display Chat History
567
+ st.subheader("Chat History πŸ“œ")
568
+ tab1, tab2 = st.tabs(["Claude History", "GPT-4o History"])
569
+
570
+ with tab1:
571
+ for chat in st.session_state.chat_history:
572
+ st.text_area("You:", chat["user"], height=100, disabled=True)
573
+ st.text_area("Claude:", chat["claude"], height=200, disabled=True)
574
+ st.markdown("---")
575
+
576
+ with tab2:
577
+ for message in st.session_state.messages:
578
+ with st.chat_message(message["role"]):
579
+ st.markdown(message["content"])
580
+
581
+ elif tab_main == "πŸ“Έ Media Gallery":
582
+ create_media_gallery()
583
+
584
+ elif tab_main == "πŸ” Search ArXiv":
585
+ query = st.text_input("Enter your research query:")
586
+ if query:
587
+ with st.spinner("Searching ArXiv..."):
588
+ results = search_arxiv(query)
589
+ st.markdown(results)
590
+
591
+ elif tab_main == "πŸ“ File Editor":
592
+ if hasattr(st.session_state, 'current_file'):
593
+ st.subheader(f"Editing: {st.session_state.current_file}")
594
+ new_content = st.text_area("Content:", st.session_state.file_content, height=300)
595
+ if st.button("Save Changes"):
596
+ with open(st.session_state.current_file, 'w', encoding='utf-8') as file:
597
+ file.write(new_content)
598
+ st.success("File updated successfully!")
599
+
600
+ # Always show file manager in sidebar
601
+ display_file_manager()
602
+
603
+ if __name__ == "__main__":
604
+ main()