swyx commited on
Commit
bc2848b
1 Parent(s): 893a228

working initial version

Browse files
Files changed (3) hide show
  1. .cursorrules +6 -1
  2. app.py +241 -37
  3. requirements.txt +3 -1
.cursorrules CHANGED
@@ -1,4 +1,5 @@
1
  You use Python 3.12 and Frameworks: gradio.
 
2
  Here are some best practices and rules you must follow:
3
 
4
 
@@ -15,4 +16,8 @@ Here are some best practices and rules you must follow:
15
  11. **Minimize Global Variables**: Limit the use of global variables to reduce side effects and improve code maintainability. Use function parameters and return values to pass data instead.
16
  12. **Use Context Managers**: Employ context managers (with statements) for resource management, such as file operations or database connections, to ensure proper cleanup.
17
 
18
- These rules will help you write clean, efficient, and maintainable Python code.
 
 
 
 
 
1
  You use Python 3.12 and Frameworks: gradio.
2
+
3
  Here are some best practices and rules you must follow:
4
 
5
 
 
16
  11. **Minimize Global Variables**: Limit the use of global variables to reduce side effects and improve code maintainability. Use function parameters and return values to pass data instead.
17
  12. **Use Context Managers**: Employ context managers (with statements) for resource management, such as file operations or database connections, to ensure proper cleanup.
18
 
19
+ These rules will help you write clean, efficient, and maintainable Python code.
20
+
21
+ Common mistakes you make:
22
+
23
+ 1. you hallucinate Gradio APIs. doublecheck the docs. or list a few options and ask user to pick.
app.py CHANGED
@@ -1,27 +1,152 @@
1
  import gradio as gr
 
 
 
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
 
9
 
10
  def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
25
 
26
  messages.append({"role": "user", "content": message})
27
 
@@ -35,30 +160,109 @@ def respond(
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
- )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
  demo.launch()
 
1
  import gradio as gr
2
+ import asyncio
3
+ from typing import List, Dict, Any, Tuple, Generator
4
+ from beeai import Bee
5
  from huggingface_hub import InferenceClient
6
+ import logging
7
+ from datetime import datetime
8
+ import pytz
9
+ import pandas as pd
10
+ from functools import partial
11
 
12
+ # Set up logging with a higher level
13
+ logging.basicConfig(level=logging.INFO,
14
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
15
+ filename='app.log',
16
+ filemode='w')
17
+
18
+ # Global variable to track the current page
19
+ current_page = 1
20
+ total_pages = 1
21
+
22
+ async def fetch_conversations(api_key: str, page: int = 1) -> Dict[str, Any]:
23
+ bee = Bee(api_key)
24
+ logging.info(f"Fetching conversations for user 'me', page {page}")
25
+ conversations = await bee.get_conversations("me", page=page)
26
+ return conversations
27
+
28
+ def format_end_time(end_time: str) -> str:
29
+ utc_time = datetime.fromisoformat(end_time.replace('Z', '+00:00'))
30
+ pacific_time = utc_time.astimezone(pytz.timezone('US/Pacific'))
31
+ return pacific_time.strftime("%Y-%m-%d %I:%M:%S %p PT")
32
+
33
+ async def fetch_conversation(api_key: str, conversation_id: int) -> Dict[str, Any]:
34
+ bee = Bee(api_key)
35
+ try:
36
+ logging.info(f"Fetching conversation with ID: {conversation_id}")
37
+ full_conversation = await bee.get_conversation("me", conversation_id)
38
+ logging.debug(f"Raw conversation data: {full_conversation}")
39
+ return full_conversation
40
+ except Exception as e:
41
+ logging.error(f"Error fetching conversation {conversation_id}: {str(e)}")
42
+ return {"error": f"Failed to fetch conversation: {str(e)}"}
43
+
44
+ def format_conversation(data: Dict[str, Any]) -> str:
45
+ try:
46
+ conversation = data.get("conversation", {})
47
+ logging.debug(f"Conversation keys: {conversation.keys()}")
48
+ formatted = f"# Conversation Details {conversation['id']}\n\n"
49
+ # Format start_time and end_time
50
+ start_time = conversation.get('start_time')
51
+ end_time = conversation.get('end_time')
52
+ if start_time and end_time:
53
+ start_dt = datetime.fromisoformat(start_time.replace('Z', '+00:00'))
54
+ end_dt = datetime.fromisoformat(end_time.replace('Z', '+00:00'))
55
+ pacific_tz = pytz.timezone('US/Pacific')
56
+ start_pacific = start_dt.astimezone(pacific_tz)
57
+ end_pacific = end_dt.astimezone(pacific_tz)
58
+
59
+ if start_pacific.date() == end_pacific.date():
60
+ formatted += f"**Time**: {start_pacific.strftime('%I:%M %p')} - {end_pacific.strftime('%I:%M %p')} PT\n"
61
+ else:
62
+ formatted += f"**Start Time**: {start_pacific.strftime('%Y-%m-%d %I:%M %p')} PT\n"
63
+ formatted += f"**End Time**: {end_pacific.strftime('%Y-%m-%d %I:%M %p')} PT\n"
64
+ elif start_time:
65
+ start_time_formatted = format_end_time(start_time)
66
+ formatted += f"**Start Time**: {start_time_formatted}\n"
67
+ elif end_time:
68
+ end_time_formatted = format_end_time(end_time)
69
+ formatted += f"**End Time**: {end_time_formatted}\n"
70
+
71
+ # Display short_summary nicely
72
+ if 'short_summary' in conversation:
73
+ formatted += f"\n## Short Summary\n\n{conversation['short_summary']}\n"
74
+
75
+ formatted += "\n" # Add a newline for better readability
76
+
77
+ formatted += f"\n{conversation['summary']}"
78
+ # for key in ['summary']: #, 'short_summary', 'state', 'created_at', 'updated_at']:
79
+ # if key in conversation:
80
+ # formatted += f"**{key}**: {conversation[key]}\n"
81
+
82
+ if 'transcriptions' in conversation and conversation['transcriptions']:
83
+ formatted += "\n\n## Transcriptions\n\n"
84
+ last_timestamp = None
85
+ for utterance in conversation['transcriptions'][0].get('utterances', []):
86
+ current_timestamp = utterance.get('spoken_at')
87
+ speaker = utterance.get('speaker')
88
+ text = utterance.get('text')
89
+
90
+ formatted += f"Speaker **[{speaker}]({current_timestamp})**: {text}\n\n"
91
+
92
+ return formatted
93
+ except Exception as e:
94
+ logging.error(f"Error formatting conversation: {str(e)}")
95
+ return f"Error formatting conversation: {str(e)}\n\nRaw data: {conversation}"
96
+
97
+ async def list_conversations(api_key: str) -> Tuple[pd.DataFrame, str, int, int]:
98
+ global current_page, total_pages
99
+ conversations_data = await fetch_conversations(api_key, current_page)
100
+ conversations = conversations_data.get("conversations", [])
101
+ total_pages = conversations_data.get("totalPages", 1)
102
+ df = pd.DataFrame([
103
+ {
104
+ "ID": c['id'],
105
+ "End Time": format_end_time(c['end_time']),
106
+ "Summary": c['short_summary'][1:50] + "..."
107
+ }
108
+ for c in conversations
109
+ ])
110
+ df = df[["ID", "End Time", "Summary"]] # Reorder columns to ensure ID is first
111
+ info = f"Page {current_page} of {total_pages}"
112
+ return df, info, current_page, total_pages
113
+
114
+ async def display_conversation(api_key: str, conversation_id: int) -> str:
115
+ full_conversation = await fetch_conversation(api_key, conversation_id)
116
+ if "error" in full_conversation:
117
+ logging.error(f"Error in full_conversation: {full_conversation['error']}")
118
+ return full_conversation["error"]
119
+ formatted_conversation = format_conversation(full_conversation)
120
+ return formatted_conversation
121
+
122
+ async def delete_conversation(api_key: str, conversation_id: int) -> str:
123
+ bee = Bee(api_key)
124
+ try:
125
+ await bee.delete_conversation("me", conversation_id)
126
+ return f"Conversation {conversation_id} deleted successfully."
127
+ except Exception as e:
128
+ logging.error(f"Error deleting conversation {conversation_id}: {str(e)}")
129
+ return f"Failed to delete conversation: {str(e)}"
130
 
131
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
132
 
133
  def respond(
134
+ message: str,
135
+ history: List[Tuple[str, str]],
136
+ system_message: str,
137
+ max_tokens: int,
138
+ temperature: float,
139
+ top_p: float,
140
+ conversation_context: str
141
+ ) -> Generator[str, None, None]:
142
+ messages = [
143
+ {"role": "system", "content": system_message},
144
+ {"role": "system", "content": f"Here's the context of the conversation: {conversation_context}"}
145
+ ]
146
+
147
+ for human, assistant in history:
148
+ messages.append({"role": "user", "content": human})
149
+ messages.append({"role": "assistant", "content": assistant})
150
 
151
  messages.append({"role": "user", "content": message})
152
 
 
160
  top_p=top_p,
161
  ):
162
  token = message.choices[0].delta.content
 
163
  response += token
164
  yield response
165
 
166
+ # Add this new function
167
+ def get_selected_conversation_id(table_data):
168
+ if table_data and len(table_data) > 0:
169
+ # Assuming the ID is in the first column
170
+ return table_data[0][0]
171
+ return None
172
+
173
+ async def delete_selected_conversation(api_key: str, conversation_id: int):
174
+ if not api_key or not conversation_id:
175
+ return "No conversation selected or API key missing", None, None, gr.update(visible=False), ""
176
+
177
+ logging.info(f"Deleting conversation with ID: {conversation_id}")
178
+
179
+ try:
180
+ result = await delete_conversation(api_key, conversation_id)
181
+ df, info, current_page, total_pages = await list_conversations(api_key)
182
+ return result, df, info, gr.update(visible=False), ""
183
+ except Exception as e:
184
+ error_message = f"Error deleting conversation: {str(e)}"
185
+ logging.error(error_message)
186
+ return error_message, None, None, gr.update(visible=False), ""
187
+
188
+ with gr.Blocks() as demo:
189
+ gr.Markdown("# Bee AI Conversation Viewer and Chat")
190
+
191
+ with gr.Row():
192
+ with gr.Column(scale=1):
193
+ api_key = gr.Textbox(label="Enter your Bee API Key", type="password")
194
+ load_button = gr.Button("Load Conversations")
195
+ conversation_table = gr.Dataframe(label="Select a conversation (CLICK ON THE ID)", interactive=True)
196
+ info_text = gr.Textbox(label="Info", interactive=False)
197
+ prev_page = gr.Button("Previous Page")
198
+ next_page = gr.Button("Next Page")
199
+
200
+ with gr.Column(scale=2):
201
+ conversation_details = gr.Markdown(
202
+ label="Conversation Details",
203
+ value="Enter your Bee API Key, click 'Load Conversations', then select a conversation to view details here."
204
+ )
205
+ delete_button = gr.Button("Delete Conversation", visible=False)
206
+
207
+ selected_conversation_id = gr.State(None)
208
+
209
+ async def load_conversations(api_key):
210
+ try:
211
+ df, info, current_page, total_pages = await list_conversations(api_key)
212
+ prev_disabled = current_page == 1
213
+ next_disabled = current_page == total_pages
214
+ return df, info, gr.update(visible=True), gr.update(interactive=not prev_disabled), gr.update(interactive=not next_disabled)
215
+ except Exception as e:
216
+ error_message = f"Error loading conversations: {str(e)}"
217
+ logging.error(error_message)
218
+ return None, error_message, gr.update(visible=False), gr.update(interactive=False), gr.update(interactive=False)
219
+
220
+ load_button.click(load_conversations, inputs=[api_key], outputs=[conversation_table, info_text, delete_button, prev_page, next_page])
221
+
222
+ async def update_conversation(api_key, evt: gr.SelectData):
223
+ try:
224
+ logging.info(f"SelectData event: index={evt.index}, value={evt.value}")
225
+ conversation_id = int(evt.value)
226
+ logging.info(f"Updating conversation with ID: {conversation_id}")
227
+ formatted_conversation = await display_conversation(api_key, conversation_id)
228
+ return formatted_conversation, gr.update(visible=True), conversation_id
229
+ except Exception as e:
230
+ error_message = f"Error updating conversation: {str(e)}"
231
+ logging.error(error_message)
232
+ return error_message, gr.update(visible=False), None
233
+
234
+ conversation_table.select(update_conversation, inputs=[api_key], outputs=[conversation_details, delete_button, selected_conversation_id])
235
+
236
+ delete_button.click(
237
+ delete_selected_conversation,
238
+ inputs=[api_key, selected_conversation_id],
239
+ outputs=[conversation_details, conversation_table, info_text, delete_button, conversation_details]
240
+ )
241
+
242
+ async def change_page(api_key: str, direction: int) -> Tuple[pd.DataFrame, str, gr.update, gr.update]:
243
+ global current_page, total_pages
244
+ current_page += direction
245
+ current_page = max(1, min(current_page, total_pages)) # Ensure page is within bounds
246
+ df, info, current_page, total_pages = await list_conversations(api_key)
247
+ prev_disabled = current_page == 1
248
+ next_disabled = current_page == total_pages
249
+ return df, info, gr.update(interactive=not prev_disabled), gr.update(interactive=not next_disabled)
250
 
251
+ prev_page.click(partial(change_page, direction=-1), inputs=[api_key], outputs=[conversation_table, info_text, prev_page, next_page])
252
+ next_page.click(partial(change_page, direction=1), inputs=[api_key], outputs=[conversation_table, info_text, prev_page, next_page])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
253
 
254
+ gr.Markdown("## Chat about the conversation")
255
+
256
+ chat_interface = gr.ChatInterface(
257
+ respond,
258
+ additional_inputs=[
259
+ gr.Textbox(value="You are a friendly Chatbot. Analyze and discuss the given conversation context.", label="System message"),
260
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
261
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
262
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
263
+ conversation_details
264
+ ],
265
+ )
266
 
267
  if __name__ == "__main__":
268
  demo.launch()
requirements.txt CHANGED
@@ -1,4 +1,6 @@
1
  huggingface_hub==0.25.2
2
  beeai==1.0.1
3
  gradio==5.1.0
4
- gradio-tools==0.0.9
 
 
 
1
  huggingface_hub==0.25.2
2
  beeai==1.0.1
3
  gradio==5.1.0
4
+ gradio-tools==0.0.9
5
+ pytz==2024.2
6
+ pandas==2.2.3