PierreBrunelle
commited on
Commit
โข
65c1b40
1
Parent(s):
a7f9e10
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,532 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pixeltable as pxt
|
3 |
+
from pixeltable.iterators import DocumentSplitter, FrameIterator, StringSplitter
|
4 |
+
from pixeltable.functions.huggingface import sentence_transformer, clip_image, clip_text
|
5 |
+
from pixeltable.functions.video import extract_audio
|
6 |
+
from pixeltable.functions.audio import get_metadata
|
7 |
+
from pixeltable.functions import openai
|
8 |
+
import numpy as np
|
9 |
+
import PIL.Image
|
10 |
+
import os
|
11 |
+
import getpass
|
12 |
+
import requests
|
13 |
+
import tempfile
|
14 |
+
from datetime import datetime
|
15 |
+
|
16 |
+
# Configuration
|
17 |
+
PIXELTABLE_MEDIA_DIR = os.path.expanduser("~/.pixeltable/media")
|
18 |
+
MAX_TOKENS_DEFAULT = 300
|
19 |
+
TEMPERATURE_DEFAULT = 0.7
|
20 |
+
CHUNK_SIZE_DEFAULT = 300
|
21 |
+
|
22 |
+
# Initialize API keys
|
23 |
+
def init_api_keys():
|
24 |
+
if 'OPENAI_API_KEY' not in os.environ:
|
25 |
+
os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API key:')
|
26 |
+
|
27 |
+
# Embedding Functions
|
28 |
+
@pxt.expr_udf
|
29 |
+
def e5_embed(text: str) -> np.ndarray:
|
30 |
+
return sentence_transformer(text, model_id='intfloat/e5-large-v2')
|
31 |
+
|
32 |
+
@pxt.expr_udf
|
33 |
+
def embed_image(img: PIL.Image.Image):
|
34 |
+
return clip_image(img, model_id='openai/clip-vit-base-patch32')
|
35 |
+
|
36 |
+
@pxt.expr_udf
|
37 |
+
def str_embed(s: str):
|
38 |
+
return clip_text(s, model_id='openai/clip-vit-base-patch32')
|
39 |
+
|
40 |
+
# Common Utilities
|
41 |
+
def initialize_pixeltable(dir_name='unified_app'):
|
42 |
+
"""Initialize Pixeltable directory"""
|
43 |
+
pxt.drop_dir(dir_name, force=True)
|
44 |
+
pxt.create_dir(dir_name)
|
45 |
+
|
46 |
+
@pxt.udf
|
47 |
+
def create_prompt(top_k_list: list[dict], question: str) -> str:
|
48 |
+
"""Create a standardized prompt format"""
|
49 |
+
concat_top_k = '\n\n'.join(elt['text'] for elt in reversed(top_k_list))
|
50 |
+
return f'''
|
51 |
+
PASSAGES:
|
52 |
+
{concat_top_k}
|
53 |
+
QUESTION:
|
54 |
+
{question}'''
|
55 |
+
|
56 |
+
@pxt.udf(return_type=pxt.AudioType())
|
57 |
+
def generate_audio(script: str, voice: str, api_key: str):
|
58 |
+
"""Generate audio from text using OpenAI's API"""
|
59 |
+
if not script or not voice:
|
60 |
+
return None
|
61 |
+
|
62 |
+
try:
|
63 |
+
response = requests.post(
|
64 |
+
"https://api.openai.com/v1/audio/speech",
|
65 |
+
headers={"Authorization": f"Bearer {api_key}"},
|
66 |
+
json={"model": "tts-1", "input": script, "voice": voice}
|
67 |
+
)
|
68 |
+
|
69 |
+
if response.status_code == 200:
|
70 |
+
temp_dir = os.path.join(os.getcwd(), "temp")
|
71 |
+
os.makedirs(temp_dir, exist_ok=True)
|
72 |
+
temp_file = os.path.join(temp_dir, f"audio_{os.urandom(8).hex()}.mp3")
|
73 |
+
|
74 |
+
with open(temp_file, 'wb') as f:
|
75 |
+
f.write(response.content)
|
76 |
+
return temp_file
|
77 |
+
except Exception as e:
|
78 |
+
print(f"Error in audio synthesis: {e}")
|
79 |
+
return None
|
80 |
+
|
81 |
+
# Document Processing
|
82 |
+
class DocumentProcessor:
|
83 |
+
@staticmethod
|
84 |
+
def process_documents(pdf_files, chunk_limit, chunk_separator):
|
85 |
+
"""Process uploaded documents for chatbot functionality"""
|
86 |
+
initialize_pixeltable()
|
87 |
+
|
88 |
+
docs = pxt.create_table(
|
89 |
+
'unified_app.documents',
|
90 |
+
{'document': pxt.DocumentType(nullable=True)}
|
91 |
+
)
|
92 |
+
|
93 |
+
docs.insert({'document': file.name} for file in pdf_files if file.name.endswith('.pdf'))
|
94 |
+
|
95 |
+
chunks = pxt.create_view(
|
96 |
+
'unified_app.chunks',
|
97 |
+
docs,
|
98 |
+
iterator=DocumentSplitter.create(
|
99 |
+
document=docs.document,
|
100 |
+
separators=chunk_separator,
|
101 |
+
limit=chunk_limit if chunk_separator in ["token_limit", "char_limit"] else None
|
102 |
+
)
|
103 |
+
)
|
104 |
+
|
105 |
+
chunks.add_embedding_index('text', string_embed=e5_embed)
|
106 |
+
return "Documents processed successfully. You can start asking questions."
|
107 |
+
|
108 |
+
@staticmethod
|
109 |
+
def get_document_answer(question):
|
110 |
+
"""Get answer from processed documents"""
|
111 |
+
try:
|
112 |
+
chunks = pxt.get_table('unified_app.chunks')
|
113 |
+
sim = chunks.text.similarity(question)
|
114 |
+
relevant_chunks = chunks.order_by(sim, asc=False).limit(5).select(chunks.text).collect()
|
115 |
+
context = "\n\n".join(chunk['text'] for chunk in relevant_chunks)
|
116 |
+
|
117 |
+
temp_table = pxt.create_table(
|
118 |
+
'unified_app.temp_response',
|
119 |
+
{
|
120 |
+
'question': pxt.StringType(),
|
121 |
+
'context': pxt.StringType()
|
122 |
+
}
|
123 |
+
)
|
124 |
+
|
125 |
+
temp_table.insert([{'question': question, 'context': context}])
|
126 |
+
|
127 |
+
temp_table['response'] = openai.chat_completions(
|
128 |
+
messages=[
|
129 |
+
{
|
130 |
+
'role': 'system',
|
131 |
+
'content': 'Answer the question based only on the provided context. If the context doesn\'t contain enough information, say so.'
|
132 |
+
},
|
133 |
+
{
|
134 |
+
'role': 'user',
|
135 |
+
'content': f"Context:\n{context}\n\nQuestion: {question}"
|
136 |
+
}
|
137 |
+
],
|
138 |
+
model='gpt-4o-mini-2024-07-18'
|
139 |
+
)
|
140 |
+
|
141 |
+
answer = temp_table.select(
|
142 |
+
answer=temp_table.response.choices[0].message.content
|
143 |
+
).tail(1)['answer'][0]
|
144 |
+
|
145 |
+
pxt.drop_table('unified_app.temp_response', force=True)
|
146 |
+
return answer
|
147 |
+
|
148 |
+
except Exception as e:
|
149 |
+
return f"Error: {str(e)}"
|
150 |
+
|
151 |
+
# Call Analysis
|
152 |
+
class CallAnalyzer:
|
153 |
+
@staticmethod
|
154 |
+
def process_call(video_file):
|
155 |
+
"""Process and analyze call recordings"""
|
156 |
+
try:
|
157 |
+
calls = pxt.create_table(
|
158 |
+
'unified_app.calls',
|
159 |
+
{"video": pxt.VideoType(nullable=True)}
|
160 |
+
)
|
161 |
+
|
162 |
+
calls['audio'] = extract_audio(calls.video, format='mp3')
|
163 |
+
calls['transcription'] = openai.transcriptions(audio=calls.audio, model='whisper-1')
|
164 |
+
calls['text'] = calls.transcription.text
|
165 |
+
|
166 |
+
sentences = pxt.create_view(
|
167 |
+
'unified_app.sentences',
|
168 |
+
calls,
|
169 |
+
iterator=StringSplitter.create(text=calls.text, separators='sentence')
|
170 |
+
)
|
171 |
+
|
172 |
+
sentences.add_embedding_index('text', string_embed=e5_embed)
|
173 |
+
|
174 |
+
@pxt.udf
|
175 |
+
def generate_insights(text: str) -> list[dict]:
|
176 |
+
return [
|
177 |
+
{'role': 'system', 'content': 'Analyze this call transcript and provide key insights:'},
|
178 |
+
{'role': 'user', 'content': text}
|
179 |
+
]
|
180 |
+
|
181 |
+
calls['insights_prompt'] = generate_insights(calls.text)
|
182 |
+
calls['insights'] = openai.chat_completions(
|
183 |
+
messages=calls.insights_prompt,
|
184 |
+
model='gpt-4o-mini-2024-07-18'
|
185 |
+
).choices[0].message.content
|
186 |
+
|
187 |
+
calls.insert([{"video": video_file}])
|
188 |
+
|
189 |
+
result = calls.select(calls.text, calls.audio, calls.insights).tail(1)
|
190 |
+
return result['text'][0], result['audio'][0], result['insights'][0]
|
191 |
+
|
192 |
+
except Exception as e:
|
193 |
+
return f"Error processing call: {str(e)}", None, None
|
194 |
+
|
195 |
+
# Video Search
|
196 |
+
class VideoSearcher:
|
197 |
+
@staticmethod
|
198 |
+
def process_video(video_file):
|
199 |
+
"""Process video for searching"""
|
200 |
+
try:
|
201 |
+
initialize_pixeltable()
|
202 |
+
videos = pxt.create_table('unified_app.videos', {'video': pxt.VideoType()})
|
203 |
+
|
204 |
+
frames = pxt.create_view(
|
205 |
+
'unified_app.frames',
|
206 |
+
videos,
|
207 |
+
iterator=FrameIterator.create(video=videos.video, fps=1)
|
208 |
+
)
|
209 |
+
|
210 |
+
frames.add_embedding_index('frame', string_embed=str_embed, image_embed=embed_image)
|
211 |
+
videos.insert([{'video': video_file.name}])
|
212 |
+
|
213 |
+
return "Video processed and indexed for search."
|
214 |
+
except Exception as e:
|
215 |
+
return f"Error processing video: {str(e)}"
|
216 |
+
|
217 |
+
@staticmethod
|
218 |
+
def search_video(search_type, text_query=None, image_query=None):
|
219 |
+
"""Search processed video frames"""
|
220 |
+
try:
|
221 |
+
frames = pxt.get_table('unified_app.frames')
|
222 |
+
|
223 |
+
if search_type == "Text" and text_query:
|
224 |
+
sim = frames.frame.similarity(text_query)
|
225 |
+
elif search_type == "Image" and image_query is not None:
|
226 |
+
sim = frames.frame.similarity(image_query)
|
227 |
+
else:
|
228 |
+
return []
|
229 |
+
|
230 |
+
results = frames.order_by(sim, asc=False).limit(5).select(frames.frame).collect()
|
231 |
+
return [row['frame'] for row in results]
|
232 |
+
except Exception as e:
|
233 |
+
print(f"Search error: {str(e)}")
|
234 |
+
return []
|
235 |
+
|
236 |
+
# Gradio Interface
|
237 |
+
def create_interface():
|
238 |
+
with gr.Blocks(theme=gr.themes.Base()) as demo:
|
239 |
+
# Header
|
240 |
+
gr.HTML(
|
241 |
+
"""
|
242 |
+
<div style="text-align: left; margin-bottom: 1rem;">
|
243 |
+
<img src="https://raw.githubusercontent.com/pixeltable/pixeltable/main/docs/source/data/pixeltable-logo-large.png" alt="Pixeltable" style="max-width: 150px;" />
|
244 |
+
</div>
|
245 |
+
"""
|
246 |
+
)
|
247 |
+
|
248 |
+
gr.Markdown(
|
249 |
+
"""
|
250 |
+
# Multimodal Powerhouse
|
251 |
+
"""
|
252 |
+
)
|
253 |
+
|
254 |
+
gr.HTML(
|
255 |
+
"""
|
256 |
+
<p>
|
257 |
+
<a href="https://github.com/pixeltable/pixeltable" target="_blank" style="color: #F25022; text-decoration: none; font-weight: bold;">Pixeltable</a>
|
258 |
+
is a declarative interface for working with text, images, embeddings, and video, enabling you to store, transform, index, and iterate on data.
|
259 |
+
</p>
|
260 |
+
|
261 |
+
<div style="background-color: #E5DDD4; border: 1px solid #e9ecef; border-radius: 8px; padding: 15px; margin: 15px 0;">
|
262 |
+
<strong>โ ๏ธ Note:</strong> This app runs best with GPU. For optimal performance, consider
|
263 |
+
<a href="https://huggingface.co/spaces/Pixeltable/Multimodal-Processing-Suite?duplicate=true" target="_blank" style="color: #F25022; text-decoration: none; font-weight: bold;">duplicating this space</a>
|
264 |
+
to run locally or with better computing resources.
|
265 |
+
</div>
|
266 |
+
"""
|
267 |
+
)
|
268 |
+
|
269 |
+
# Documentation Sections
|
270 |
+
with gr.Row():
|
271 |
+
with gr.Column():
|
272 |
+
with gr.Accordion("๐ฏ What This App Does", open=False):
|
273 |
+
gr.Markdown("""
|
274 |
+
1. ๐ **Document Processing**
|
275 |
+
* Chat with your documents using RAG
|
276 |
+
* Process multiple document formats
|
277 |
+
* Extract key insights
|
278 |
+
|
279 |
+
2. ๐ฅ **Video Analysis**
|
280 |
+
* Text and image-based video search
|
281 |
+
* Frame extraction and indexing
|
282 |
+
* Visual content discovery
|
283 |
+
|
284 |
+
3. ๐๏ธ **Call Analysis**
|
285 |
+
* Automatic transcription
|
286 |
+
* Key insight extraction
|
287 |
+
* Audio processing
|
288 |
+
""")
|
289 |
+
|
290 |
+
with gr.Column():
|
291 |
+
with gr.Accordion("โ๏ธ How It Works", open=False):
|
292 |
+
gr.Markdown("""
|
293 |
+
1. ๐ **Data Processing**
|
294 |
+
* Chunking and indexing documents
|
295 |
+
* Embedding generation for search
|
296 |
+
* Multi-modal data handling
|
297 |
+
|
298 |
+
2. ๐ค **AI Integration**
|
299 |
+
* LLM-powered analysis
|
300 |
+
* Speech-to-text conversion
|
301 |
+
* Semantic search capabilities
|
302 |
+
|
303 |
+
3. ๐ **Storage & Retrieval**
|
304 |
+
* Efficient data organization
|
305 |
+
* Quick content retrieval
|
306 |
+
* Structured data management
|
307 |
+
""")
|
308 |
+
|
309 |
+
with gr.Tabs():
|
310 |
+
# Document Chat Tab
|
311 |
+
with gr.TabItem("๐ Document Chat"):
|
312 |
+
with gr.Row():
|
313 |
+
with gr.Column():
|
314 |
+
doc_files = gr.File(label="Upload Documents", file_count="multiple")
|
315 |
+
chunk_size = gr.Slider(
|
316 |
+
minimum=100,
|
317 |
+
maximum=500,
|
318 |
+
value=CHUNK_SIZE_DEFAULT,
|
319 |
+
label="Chunk Size"
|
320 |
+
)
|
321 |
+
chunk_type = gr.Dropdown(
|
322 |
+
choices=["token_limit", "char_limit", "sentence", "paragraph"],
|
323 |
+
value="token_limit",
|
324 |
+
label="Chunking Method"
|
325 |
+
)
|
326 |
+
process_docs_btn = gr.Button("Process Documents")
|
327 |
+
process_status = gr.Textbox(label="Status")
|
328 |
+
with gr.Column():
|
329 |
+
chatbot = gr.Chatbot(label="Document Chat")
|
330 |
+
msg = gr.Textbox(label="Ask a question")
|
331 |
+
send_btn = gr.Button("Send")
|
332 |
+
|
333 |
+
# Call Analysis Tab
|
334 |
+
with gr.TabItem("๐๏ธ Call Analysis"):
|
335 |
+
with gr.Row():
|
336 |
+
with gr.Column():
|
337 |
+
call_upload = gr.Video(label="Upload Call Recording")
|
338 |
+
analyze_btn = gr.Button("Analyze Call")
|
339 |
+
with gr.Column():
|
340 |
+
with gr.Tabs():
|
341 |
+
with gr.TabItem("๐ Transcript"):
|
342 |
+
transcript = gr.Textbox(label="Transcript", lines=10)
|
343 |
+
with gr.TabItem("๐ก Insights"):
|
344 |
+
insights = gr.Textbox(label="Key Insights", lines=10)
|
345 |
+
with gr.TabItem("๐ Audio"):
|
346 |
+
audio_output = gr.Audio(label="Extracted Audio")
|
347 |
+
|
348 |
+
# Video Search Tab
|
349 |
+
with gr.TabItem("๐ฅ Video Search"):
|
350 |
+
with gr.Row():
|
351 |
+
with gr.Column():
|
352 |
+
video_upload = gr.File(label="Upload Video")
|
353 |
+
process_video_btn = gr.Button("Process Video")
|
354 |
+
video_status = gr.Textbox(label="Processing Status")
|
355 |
+
search_type = gr.Radio(
|
356 |
+
choices=["Text", "Image"],
|
357 |
+
label="Search Type",
|
358 |
+
value="Text"
|
359 |
+
)
|
360 |
+
text_input = gr.Textbox(label="Text Query")
|
361 |
+
image_input = gr.Image(label="Image Query", type="pil", visible=False)
|
362 |
+
search_btn = gr.Button("Search")
|
363 |
+
with gr.Column():
|
364 |
+
results_gallery = gr.Gallery(label="Search Results")
|
365 |
+
|
366 |
+
# Event Handlers
|
367 |
+
def document_chat(message, chat_history):
|
368 |
+
bot_message = DocumentProcessor.get_document_answer(message)
|
369 |
+
chat_history.append((message, bot_message))
|
370 |
+
return "", chat_history
|
371 |
+
|
372 |
+
def update_search_type(choice):
|
373 |
+
return {
|
374 |
+
text_input: gr.update(visible=choice=="Text"),
|
375 |
+
image_input: gr.update(visible=choice=="Image")
|
376 |
+
}
|
377 |
+
|
378 |
+
# Connect Events
|
379 |
+
process_docs_btn.click(
|
380 |
+
DocumentProcessor.process_documents,
|
381 |
+
inputs=[doc_files, chunk_size, chunk_type],
|
382 |
+
outputs=[process_status]
|
383 |
+
)
|
384 |
+
|
385 |
+
send_btn.click(
|
386 |
+
document_chat,
|
387 |
+
inputs=[msg, chatbot],
|
388 |
+
outputs=[msg, chatbot]
|
389 |
+
)
|
390 |
+
|
391 |
+
analyze_btn.click(
|
392 |
+
CallAnalyzer.process_call,
|
393 |
+
inputs=[call_upload],
|
394 |
+
outputs=[transcript, audio_output, insights]
|
395 |
+
)
|
396 |
+
|
397 |
+
process_video_btn.click(
|
398 |
+
VideoSearcher.process_video,
|
399 |
+
inputs=[video_upload],
|
400 |
+
outputs=[video_status]
|
401 |
+
)
|
402 |
+
|
403 |
+
search_type.change(
|
404 |
+
update_search_type,
|
405 |
+
search_type,
|
406 |
+
[text_input, image_input]
|
407 |
+
)
|
408 |
+
|
409 |
+
search_btn.click(
|
410 |
+
VideoSearcher.search_video,
|
411 |
+
inputs=[search_type, text_input, image_input],
|
412 |
+
outputs=[results_gallery]
|
413 |
+
)
|
414 |
+
|
415 |
+
# Related Pixeltable Spaces
|
416 |
+
gr.Markdown("## ๐ Explore More Pixeltable Apps")
|
417 |
+
|
418 |
+
with gr.Row():
|
419 |
+
with gr.Column():
|
420 |
+
gr.HTML(
|
421 |
+
"""
|
422 |
+
<div style="border: 1px solid #ddd; padding: 15px; border-radius: 8px; margin-bottom: 10px;">
|
423 |
+
<h3>๐ Document & Text Processing</h3>
|
424 |
+
<ul style="list-style-type: none; padding-left: 0;">
|
425 |
+
<li style="margin-bottom: 10px;">
|
426 |
+
<a href="https://huggingface.co/spaces/Pixeltable/Multi-LLM-RAG-with-Groundtruth-Comparison" target="_blank" style="color: #F25022; text-decoration: none;">
|
427 |
+
๐ค Multi-LLM RAG Comparison
|
428 |
+
</a>
|
429 |
+
</li>
|
430 |
+
<li style="margin-bottom: 10px;">
|
431 |
+
<a href="https://huggingface.co/spaces/Pixeltable/Document-to-Audio-Synthesis" target="_blank" style="color: #F25022; text-decoration: none;">
|
432 |
+
๐ Document to Audio Synthesis
|
433 |
+
</a>
|
434 |
+
</li>
|
435 |
+
<li style="margin-bottom: 10px;">
|
436 |
+
<a href="https://huggingface.co/spaces/Pixeltable/Prompt-Engineering-and-LLM-Studio" target="_blank" style="color: #F25022; text-decoration: none;">
|
437 |
+
๐ก Prompt Engineering Studio
|
438 |
+
</a>
|
439 |
+
</li>
|
440 |
+
</ul>
|
441 |
+
</div>
|
442 |
+
"""
|
443 |
+
)
|
444 |
+
|
445 |
+
with gr.Column():
|
446 |
+
gr.HTML(
|
447 |
+
"""
|
448 |
+
<div style="border: 1px solid #ddd; padding: 15px; border-radius: 8px; margin-bottom: 10px;">
|
449 |
+
<h3>๐ฅ Video & Audio Processing</h3>
|
450 |
+
<ul style="list-style-type: none; padding-left: 0;">
|
451 |
+
<li style="margin-bottom: 10px;">
|
452 |
+
<a href="https://huggingface.co/spaces/Pixeltable/video-to-social-media-post-generator" target="_blank" style="color: #F25022; text-decoration: none;">
|
453 |
+
๐ฑ Social Media Post Generator
|
454 |
+
</a>
|
455 |
+
</li>
|
456 |
+
<li style="margin-bottom: 10px;">
|
457 |
+
<a href="https://huggingface.co/spaces/Pixeltable/Call-Analysis-AI-Tool" target="_blank" style="color: #F25022; text-decoration: none;">
|
458 |
+
๐๏ธ Call Analysis Tool
|
459 |
+
</a>
|
460 |
+
</li>
|
461 |
+
<li style="margin-bottom: 10px;">
|
462 |
+
<a href="https://huggingface.co/spaces/Pixeltable/object-detection-in-videos-with-yolox" target="_blank" style="color: #F25022; text-decoration: none;">
|
463 |
+
๐ Video Object Detection
|
464 |
+
</a>
|
465 |
+
</li>
|
466 |
+
</ul>
|
467 |
+
</div>
|
468 |
+
"""
|
469 |
+
)
|
470 |
+
|
471 |
+
with gr.Column():
|
472 |
+
gr.HTML(
|
473 |
+
"""
|
474 |
+
<div style="border: 1px solid #ddd; padding: 15px; border-radius: 8px; margin-bottom: 10px;">
|
475 |
+
<h3>๐ฎ Interactive Applications</h3>
|
476 |
+
<ul style="list-style-type: none; padding-left: 0;">
|
477 |
+
<li style="margin-bottom: 10px;">
|
478 |
+
<a href="https://huggingface.co/spaces/Pixeltable/AI-RPG-Adventure" target="_blank" style="color: #F25022; text-decoration: none;">
|
479 |
+
๐ฒ AI RPG Adventure
|
480 |
+
</a>
|
481 |
+
</li>
|
482 |
+
<li style="margin-bottom: 10px;">
|
483 |
+
<a href="https://huggingface.co/spaces/Pixeltable/AI-Financial-Analysis-Platform" target="_blank" style="color: #F25022; text-decoration: none;">
|
484 |
+
๐ Financial Analysis Platform
|
485 |
+
</a>
|
486 |
+
</li>
|
487 |
+
</ul>
|
488 |
+
</div>
|
489 |
+
"""
|
490 |
+
)
|
491 |
+
|
492 |
+
gr.HTML(
|
493 |
+
"""
|
494 |
+
<div style="margin-top: 2rem; padding-top: 1rem; border-top: 1px solid #e5e7eb;">
|
495 |
+
<div style="display: flex; justify-content: space-between; align-items: center; flex-wrap: wrap; gap: 1rem;">
|
496 |
+
<div style="flex: 1;">
|
497 |
+
<h4 style="margin: 0; color: #374151;">๐ Built with Pixeltable</h4>
|
498 |
+
<p style="margin: 0.5rem 0; color: #6b7280;">
|
499 |
+
Open Source AI Data infrastructure.
|
500 |
+
</p>
|
501 |
+
</div>
|
502 |
+
<div style="flex: 1;">
|
503 |
+
<h4 style="margin: 0; color: #374151;">๐ Resources</h4>
|
504 |
+
<div style="display: flex; gap: 1.5rem; margin-top: 0.5rem;">
|
505 |
+
<a href="https://github.com/pixeltable/pixeltable" target="_blank" style="color: #4F46E5; text-decoration: none;">
|
506 |
+
๐ป GitHub
|
507 |
+
</a>
|
508 |
+
<a href="https://docs.pixeltable.com" target="_blank" style="color: #4F46E5; text-decoration: none;">
|
509 |
+
๐ Documentation
|
510 |
+
</a>
|
511 |
+
<a href="https://huggingface.co/Pixeltable" target="_blank" style="color: #4F46E5; text-decoration: none;">
|
512 |
+
๐ค Hugging Face
|
513 |
+
</a>
|
514 |
+
</div>
|
515 |
+
</div>
|
516 |
+
</div>
|
517 |
+
<p style="margin: 1rem 0 0; text-align: center; color: #9CA3AF; font-size: 0.875rem;">
|
518 |
+
ยฉ 2024 Pixeltable | Apache License 2.0
|
519 |
+
</p>
|
520 |
+
</div>
|
521 |
+
"""
|
522 |
+
)
|
523 |
+
|
524 |
+
return demo
|
525 |
+
|
526 |
+
if __name__ == "__main__":
|
527 |
+
init_api_keys()
|
528 |
+
demo = create_interface()
|
529 |
+
demo.launch(
|
530 |
+
allowed_paths=[PIXELTABLE_MEDIA_DIR],
|
531 |
+
show_api=False
|
532 |
+
)
|