Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .gitignore +3 -0
- README.md +30 -8
- chatgpt.py +505 -0
- interview_protocol.py +21 -0
- requirements.txt +67 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
.idea/
|
3 |
+
__pycache__/
|
README.md
CHANGED
@@ -1,12 +1,34 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: llm-autobiography
|
3 |
+
app_file: chatgpt.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 3.50.2
|
|
|
|
|
6 |
---
|
7 |
+
# Chatbot Frontend
|
8 |
|
9 |
+
## Prerequisites
|
10 |
+
- Python 3.x
|
11 |
+
- pip
|
12 |
+
|
13 |
+
## Installation
|
14 |
+
1. Clone the repository:
|
15 |
+
```
|
16 |
+
git clone https://github.com/Zhuoxuan-Zhang/chatbot-ui.git
|
17 |
+
```
|
18 |
+
2. Navigate to the project directory:
|
19 |
+
```
|
20 |
+
cd chatbot-ui
|
21 |
+
```
|
22 |
+
3. Install the required packages:
|
23 |
+
```
|
24 |
+
pip install -r requirements.txt
|
25 |
+
```
|
26 |
+
|
27 |
+
## Running the Application
|
28 |
+
Start the chatbot by running:
|
29 |
+
```
|
30 |
+
python chatgpt.py
|
31 |
+
```
|
32 |
+
|
33 |
+
## Usage
|
34 |
+
- Open Chrome and visit `http://localhost:7860` to interact with the chatbot.
|
chatgpt.py
ADDED
@@ -0,0 +1,505 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import whisper
|
3 |
+
import asyncio
|
4 |
+
import httpx
|
5 |
+
import tempfile
|
6 |
+
import os
|
7 |
+
import requests
|
8 |
+
import time
|
9 |
+
import threading
|
10 |
+
from datetime import datetime, timedelta
|
11 |
+
|
12 |
+
session = requests.Session()
|
13 |
+
|
14 |
+
from interview_protocol import protocols as interview_protocols
|
15 |
+
|
16 |
+
model = whisper.load_model("base")
|
17 |
+
|
18 |
+
base_url = "https://llm4socialisolation-fd4082d0a518.herokuapp.com"
|
19 |
+
# base_url = "http://localhost:8080"
|
20 |
+
|
21 |
+
timeout = 60
|
22 |
+
concurrency_count=10
|
23 |
+
|
24 |
+
# mapping between display names and internal chatbot_type values
|
25 |
+
display_to_value = {
|
26 |
+
'Echo': 'enhanced',
|
27 |
+
'Breeze': 'baseline'
|
28 |
+
}
|
29 |
+
|
30 |
+
value_to_display = {
|
31 |
+
'enhanced': 'Echo',
|
32 |
+
'baseline': 'Breeze'
|
33 |
+
}
|
34 |
+
|
35 |
+
def get_method_index(chapter, method):
|
36 |
+
all_methods = []
|
37 |
+
for chap in interview_protocols.values():
|
38 |
+
all_methods.extend(chap)
|
39 |
+
index = all_methods.index(method)
|
40 |
+
return index
|
41 |
+
|
42 |
+
async def initialization(api_key, chapter_name, topic_name, username, prompts, chatbot_type):
|
43 |
+
url = f"{base_url}/api/initialization"
|
44 |
+
headers = {'Content-Type': 'application/json'}
|
45 |
+
data = {
|
46 |
+
'api_key': api_key,
|
47 |
+
'chapter_name': chapter_name,
|
48 |
+
'topic_name': topic_name,
|
49 |
+
'username': username,
|
50 |
+
'chatbot_type': chatbot_type,
|
51 |
+
**prompts
|
52 |
+
}
|
53 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
54 |
+
try:
|
55 |
+
response = await client.post(url, json=data, headers=headers)
|
56 |
+
if response.status_code == 200:
|
57 |
+
return "Initialization successful."
|
58 |
+
else:
|
59 |
+
return f"Initialization failed: {response.text}"
|
60 |
+
except asyncio.TimeoutError:
|
61 |
+
print("The request timed out")
|
62 |
+
return "Request timed out during initialization."
|
63 |
+
except Exception as e:
|
64 |
+
return f"Error in initialization: {str(e)}"
|
65 |
+
|
66 |
+
def fetch_default_prompts(chatbot_type):
|
67 |
+
url = f"{base_url}?chatbot_type={chatbot_type}"
|
68 |
+
try:
|
69 |
+
response = httpx.get(url, timeout=timeout)
|
70 |
+
if response.status_code == 200:
|
71 |
+
prompts = response.json()
|
72 |
+
print(prompts)
|
73 |
+
return prompts
|
74 |
+
else:
|
75 |
+
print(f"Failed to fetch prompts: {response.status_code} - {response.text}")
|
76 |
+
return {}
|
77 |
+
except Exception as e:
|
78 |
+
print(f"Error fetching prompts: {str(e)}")
|
79 |
+
return {}
|
80 |
+
|
81 |
+
async def get_backend_response(api_key, patient_prompt, username, chatbot_type):
|
82 |
+
url = f"{base_url}/responses/doctor"
|
83 |
+
headers = {'Content-Type': 'application/json'}
|
84 |
+
data = {
|
85 |
+
'username': username,
|
86 |
+
'patient_prompt': patient_prompt,
|
87 |
+
'chatbot_type': chatbot_type
|
88 |
+
}
|
89 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
90 |
+
try:
|
91 |
+
response = await client.post(url, json=data, headers=headers)
|
92 |
+
if response.status_code == 200:
|
93 |
+
response_data = response.json()
|
94 |
+
return response_data
|
95 |
+
else:
|
96 |
+
return f"Failed to fetch response from backend: {response.text}"
|
97 |
+
except Exception as e:
|
98 |
+
return f"Error contacting backend service: {str(e)}"
|
99 |
+
|
100 |
+
async def save_conversation_and_memory(username, chatbot_type):
|
101 |
+
url = f"{base_url}/save/end_and_save"
|
102 |
+
headers = {'Content-Type': 'application/json'}
|
103 |
+
data = {
|
104 |
+
'username': username,
|
105 |
+
'chatbot_type': chatbot_type
|
106 |
+
}
|
107 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
108 |
+
try:
|
109 |
+
response = await client.post(url, json=data, headers=headers)
|
110 |
+
if response.status_code == 200:
|
111 |
+
response_data = response.json()
|
112 |
+
return response_data.get('message', 'Saving Error!')
|
113 |
+
else:
|
114 |
+
return f"Failed to save conversations and memory graph: {response.text}"
|
115 |
+
except Exception as e:
|
116 |
+
return f"Error contacting backend service: {str(e)}"
|
117 |
+
|
118 |
+
async def get_conversation_histories(username, chatbot_type):
|
119 |
+
url = f"{base_url}/save/download_conversations"
|
120 |
+
headers = {'Content-Type': 'application/json'}
|
121 |
+
data = {
|
122 |
+
'username': username,
|
123 |
+
'chatbot_type': chatbot_type
|
124 |
+
}
|
125 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
126 |
+
try:
|
127 |
+
response = await client.post(url, json=data, headers=headers)
|
128 |
+
if response.status_code == 200:
|
129 |
+
conversation_data = response.json()
|
130 |
+
return conversation_data
|
131 |
+
else:
|
132 |
+
return []
|
133 |
+
except Exception as e:
|
134 |
+
return []
|
135 |
+
|
136 |
+
def download_conversations(username, chatbot_type):
|
137 |
+
conversation_histories = asyncio.run(get_conversation_histories(username, chatbot_type))
|
138 |
+
files = []
|
139 |
+
temp_dir = tempfile.mkdtemp()
|
140 |
+
for conversation_entry in conversation_histories:
|
141 |
+
file_name = conversation_entry.get('file_name', f"Conversation_{len(files)+1}.txt")
|
142 |
+
conversation = conversation_entry.get('conversation', [])
|
143 |
+
conversation_text = ""
|
144 |
+
for message_pair in conversation:
|
145 |
+
if isinstance(message_pair, list) and len(message_pair) == 2:
|
146 |
+
speaker, message = message_pair
|
147 |
+
conversation_text += f"{speaker.capitalize()}: {message}\n\n"
|
148 |
+
else:
|
149 |
+
conversation_text += f"Unknown format: {message_pair}\n\n"
|
150 |
+
temp_file_path = os.path.join(temp_dir, file_name)
|
151 |
+
with open(temp_file_path, 'w') as temp_file:
|
152 |
+
temp_file.write(conversation_text)
|
153 |
+
files.append(temp_file_path)
|
154 |
+
return files
|
155 |
+
|
156 |
+
async def get_biography(username, chatbot_type):
|
157 |
+
url = f"{base_url}/save/generate_autobiography"
|
158 |
+
headers = {'Content-Type': 'application/json'}
|
159 |
+
data = {
|
160 |
+
'username': username,
|
161 |
+
'chatbot_type': chatbot_type
|
162 |
+
}
|
163 |
+
async with httpx.AsyncClient(timeout=timeout) as client:
|
164 |
+
try:
|
165 |
+
response = await client.post(url, json=data, headers=headers)
|
166 |
+
if response.status_code == 200:
|
167 |
+
biography_data = response.json()
|
168 |
+
biography_text = biography_data.get('biography', '')
|
169 |
+
return biography_text
|
170 |
+
else:
|
171 |
+
return "Failed to generate biography."
|
172 |
+
except Exception as e:
|
173 |
+
return f"Error contacting backend service: {str(e)}"
|
174 |
+
|
175 |
+
def download_biography(username, chatbot_type):
|
176 |
+
biography_text = asyncio.run(get_biography(username, chatbot_type))
|
177 |
+
if not biography_text or "Failed" in biography_text or "Error" in biography_text:
|
178 |
+
return gr.update(value=None, visible=False), gr.update(value=biography_text, visible=True)
|
179 |
+
temp_dir = tempfile.mkdtemp()
|
180 |
+
temp_file_path = os.path.join(temp_dir, "biography.txt")
|
181 |
+
with open(temp_file_path, 'w') as temp_file:
|
182 |
+
temp_file.write(biography_text)
|
183 |
+
return temp_file_path, gr.update(value=biography_text, visible=True)
|
184 |
+
|
185 |
+
def transcribe_audio(audio_file):
|
186 |
+
transcription = model.transcribe(audio_file)["text"]
|
187 |
+
return transcription
|
188 |
+
|
189 |
+
def submit_text_and_respond(edited_text, api_key, username, history, chatbot_type):
|
190 |
+
response = asyncio.run(get_backend_response(api_key, edited_text, username, chatbot_type))
|
191 |
+
print('------')
|
192 |
+
print(response)
|
193 |
+
if isinstance(response, str):
|
194 |
+
history.append((edited_text, response))
|
195 |
+
return history, "", []
|
196 |
+
doctor_response = response['doctor_response']['response']
|
197 |
+
memory_event = response.get('memory_events', [])
|
198 |
+
history.append((edited_text, doctor_response))
|
199 |
+
memory_graph = update_memory_graph(memory_event)
|
200 |
+
return history, "", memory_graph # Return memory_graph as output
|
201 |
+
|
202 |
+
def set_initialize_button(api_key_input, chapter_name, topic_name, username_input,
|
203 |
+
system_prompt_text, conv_instruction_prompt_text, therapy_prompt_text, autobio_prompt_text, chatbot_display_name):
|
204 |
+
chatbot_type = display_to_value.get(chatbot_display_name, 'enhanced')
|
205 |
+
prompts = {
|
206 |
+
'system_prompt': system_prompt_text,
|
207 |
+
'conv_instruction_prompt': conv_instruction_prompt_text,
|
208 |
+
'therapy_prompt': therapy_prompt_text,
|
209 |
+
'autobio_prompt': autobio_prompt_text
|
210 |
+
}
|
211 |
+
message = asyncio.run(initialization(api_key_input, chapter_name, topic_name, username_input, prompts, chatbot_type))
|
212 |
+
print(message)
|
213 |
+
return message, api_key_input, chatbot_type
|
214 |
+
|
215 |
+
def save_conversation(username, chatbot_type):
|
216 |
+
response = asyncio.run(save_conversation_and_memory(username, chatbot_type))
|
217 |
+
return response
|
218 |
+
|
219 |
+
def start_recording(audio_file):
|
220 |
+
if not audio_file:
|
221 |
+
return ""
|
222 |
+
try:
|
223 |
+
transcription = transcribe_audio(audio_file)
|
224 |
+
return transcription
|
225 |
+
except Exception as e:
|
226 |
+
return f"Failed to transcribe: {str(e)}"
|
227 |
+
|
228 |
+
def update_methods(chapter):
|
229 |
+
return gr.update(choices=interview_protocols[chapter], value=interview_protocols[chapter][0])
|
230 |
+
|
231 |
+
def update_memory_graph(memory_data):
|
232 |
+
table_data = []
|
233 |
+
for node in memory_data:
|
234 |
+
table_data.append([
|
235 |
+
node.get('date', ''),
|
236 |
+
node.get('topic', ''),
|
237 |
+
node.get('event_description', ''),
|
238 |
+
node.get('people_involved', '')
|
239 |
+
])
|
240 |
+
return table_data
|
241 |
+
|
242 |
+
def update_prompts(chatbot_display_name):
|
243 |
+
chatbot_type = display_to_value.get(chatbot_display_name, 'enhanced')
|
244 |
+
prompts = fetch_default_prompts(chatbot_type)
|
245 |
+
return (
|
246 |
+
gr.update(value=prompts.get('system_prompt', '')),
|
247 |
+
gr.update(value=prompts.get('conv_instruction_prompt', '')),
|
248 |
+
gr.update(value=prompts.get('therapy_prompt', '')),
|
249 |
+
gr.update(value=prompts.get('autobio_generation_prompt', '')),
|
250 |
+
)
|
251 |
+
|
252 |
+
def update_chatbot_type(chatbot_display_name):
|
253 |
+
chatbot_type = display_to_value.get(chatbot_display_name, 'enhanced')
|
254 |
+
return chatbot_type
|
255 |
+
|
256 |
+
# Function to start the periodic toggle
|
257 |
+
def start_timer():
|
258 |
+
target_timestamp = datetime.now() + timedelta(seconds=8 * 60)
|
259 |
+
return True, target_timestamp
|
260 |
+
|
261 |
+
def reset_timer():
|
262 |
+
is_running = False
|
263 |
+
return is_running, "Timer remaining: 8:00"
|
264 |
+
|
265 |
+
|
266 |
+
# Async function to manage periodic updates, running every second
|
267 |
+
def periodic_call(is_running, target_timestamp):
|
268 |
+
if is_running:
|
269 |
+
prefix = 'Time remaining:'
|
270 |
+
time_difference = target_timestamp - datetime.now()
|
271 |
+
second_left = int(round(time_difference.total_seconds()))
|
272 |
+
if second_left <= 0:
|
273 |
+
second_left = 0
|
274 |
+
minutes, seconds = divmod(second_left, 60)
|
275 |
+
new_remain_min = f'{minutes:02}'
|
276 |
+
new_remain_second = f'{seconds:02}'
|
277 |
+
new_info = f'{prefix} {new_remain_min}:{new_remain_second}'
|
278 |
+
return new_info
|
279 |
+
else:
|
280 |
+
return 'Time remaining: 8:00'
|
281 |
+
|
282 |
+
# initialize prompts with empty strings
|
283 |
+
initial_prompts = {'system_prompt': '', 'conv_instruction_prompt': '', 'therapy_prompt': '', 'autobio_generation_prompt': ''}
|
284 |
+
|
285 |
+
# CSS to keep the buttons small
|
286 |
+
css = """
|
287 |
+
#start_button, #reset_button {
|
288 |
+
padding: 4px 10px !important;
|
289 |
+
font-size: 12px !important;
|
290 |
+
width: auto !important;
|
291 |
+
}
|
292 |
+
"""
|
293 |
+
|
294 |
+
with gr.Blocks(css=css) as app:
|
295 |
+
chatbot_type_state = gr.State('enhanced')
|
296 |
+
api_key_state = gr.State()
|
297 |
+
prompt_visibility_state = gr.State(False)
|
298 |
+
|
299 |
+
is_running = gr.State()
|
300 |
+
target_timestamp = gr.State()
|
301 |
+
|
302 |
+
with gr.Row():
|
303 |
+
with gr.Column(scale=1, min_width=250):
|
304 |
+
gr.Markdown("## Settings")
|
305 |
+
|
306 |
+
# chatbot Type Selection
|
307 |
+
with gr.Box():
|
308 |
+
gr.Markdown("### Chatbot Selection")
|
309 |
+
chatbot_type_dropdown = gr.Dropdown(
|
310 |
+
label="Select Chatbot Type",
|
311 |
+
choices=['Echo', 'Breeze'],
|
312 |
+
value='Echo',
|
313 |
+
)
|
314 |
+
chatbot_type_dropdown.change(
|
315 |
+
fn=update_chatbot_type,
|
316 |
+
inputs=[chatbot_type_dropdown],
|
317 |
+
outputs=[chatbot_type_state]
|
318 |
+
)
|
319 |
+
|
320 |
+
# fetch initial prompts based on the default chatbot type
|
321 |
+
system_prompt_value, conv_instruction_prompt_value, therapy_prompt_value, autobio_prompt_value = update_prompts('Echo')
|
322 |
+
|
323 |
+
# interview protocol selection
|
324 |
+
with gr.Box():
|
325 |
+
gr.Markdown("### Interview Protocol")
|
326 |
+
chapter_dropdown = gr.Dropdown(
|
327 |
+
label="Select Chapter",
|
328 |
+
choices=list(interview_protocols.keys()),
|
329 |
+
value=list(interview_protocols.keys())[1],
|
330 |
+
)
|
331 |
+
method_dropdown = gr.Dropdown(
|
332 |
+
label="Select Topic",
|
333 |
+
choices=interview_protocols[chapter_dropdown.value],
|
334 |
+
value=interview_protocols[chapter_dropdown.value][3],
|
335 |
+
)
|
336 |
+
|
337 |
+
chapter_dropdown.change(
|
338 |
+
fn=update_methods,
|
339 |
+
inputs=[chapter_dropdown],
|
340 |
+
outputs=[method_dropdown]
|
341 |
+
)
|
342 |
+
|
343 |
+
# Update states when selections change
|
344 |
+
def update_chapter(chapter):
|
345 |
+
return chapter
|
346 |
+
|
347 |
+
def update_method(method):
|
348 |
+
return method
|
349 |
+
|
350 |
+
chapter_state = gr.State()
|
351 |
+
method_state = gr.State()
|
352 |
+
|
353 |
+
chapter_dropdown.change(
|
354 |
+
fn=update_chapter,
|
355 |
+
inputs=[chapter_dropdown],
|
356 |
+
outputs=[chapter_state]
|
357 |
+
)
|
358 |
+
|
359 |
+
method_dropdown.change(
|
360 |
+
fn=update_method,
|
361 |
+
inputs=[method_dropdown],
|
362 |
+
outputs=[method_state]
|
363 |
+
)
|
364 |
+
|
365 |
+
# customize Prompts
|
366 |
+
with gr.Box():
|
367 |
+
toggle_prompts_button = gr.Button("Show Prompts")
|
368 |
+
|
369 |
+
# wrap the prompts in a component with initial visibility set to False
|
370 |
+
with gr.Column(visible=False) as prompt_section:
|
371 |
+
gr.Markdown("### Customize Prompts")
|
372 |
+
system_prompt = gr.Textbox(
|
373 |
+
label="System Prompt",
|
374 |
+
placeholder="Enter the system prompt here.",
|
375 |
+
value=system_prompt_value['value']
|
376 |
+
)
|
377 |
+
conv_instruction_prompt = gr.Textbox(
|
378 |
+
label="Conversation Instruction Prompt",
|
379 |
+
placeholder="Enter the instruction for each conversation here.",
|
380 |
+
value=conv_instruction_prompt_value['value']
|
381 |
+
)
|
382 |
+
therapy_prompt = gr.Textbox(
|
383 |
+
label="Therapy Prompt",
|
384 |
+
placeholder="Enter the instruction for reminiscence therapy.",
|
385 |
+
value=therapy_prompt_value['value']
|
386 |
+
)
|
387 |
+
autobio_prompt = gr.Textbox(
|
388 |
+
label="Autobiography Generation Prompt",
|
389 |
+
placeholder="Enter the instruction for autobiography generation.",
|
390 |
+
value=autobio_prompt_value['value']
|
391 |
+
)
|
392 |
+
|
393 |
+
# update prompts when chatbot_type changes
|
394 |
+
chatbot_type_dropdown.change(
|
395 |
+
fn=update_prompts,
|
396 |
+
inputs=[chatbot_type_dropdown],
|
397 |
+
outputs=[system_prompt, conv_instruction_prompt, therapy_prompt, autobio_prompt]
|
398 |
+
)
|
399 |
+
|
400 |
+
with gr.Box():
|
401 |
+
gr.Markdown("### User Information")
|
402 |
+
username_input = gr.Textbox(
|
403 |
+
label="Username", placeholder="Enter your username"
|
404 |
+
)
|
405 |
+
|
406 |
+
api_key_input = gr.Textbox(
|
407 |
+
label="OpenAI API Key",
|
408 |
+
placeholder="Enter your openai api key",
|
409 |
+
value="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",
|
410 |
+
type="password"
|
411 |
+
)
|
412 |
+
|
413 |
+
initialize_button = gr.Button("Initialize", variant="primary", size="large")
|
414 |
+
initialization_status = gr.Textbox(
|
415 |
+
label="Status", interactive=False, placeholder="Initialization status will appear here."
|
416 |
+
)
|
417 |
+
|
418 |
+
initialize_button.click(
|
419 |
+
fn=set_initialize_button,
|
420 |
+
inputs=[api_key_input, chapter_dropdown, method_dropdown, username_input,
|
421 |
+
system_prompt, conv_instruction_prompt, therapy_prompt, autobio_prompt, chatbot_type_dropdown],
|
422 |
+
outputs=[initialization_status, api_key_state, chatbot_type_state],
|
423 |
+
)
|
424 |
+
|
425 |
+
# define the function to toggle prompts visibility
|
426 |
+
def toggle_prompts(visibility):
|
427 |
+
new_visibility = not visibility
|
428 |
+
button_text = "Hide Prompts" if new_visibility else "Show Prompts"
|
429 |
+
return gr.update(value=button_text), gr.update(visible=new_visibility), new_visibility
|
430 |
+
|
431 |
+
toggle_prompts_button.click(
|
432 |
+
fn=toggle_prompts,
|
433 |
+
inputs=[prompt_visibility_state],
|
434 |
+
outputs=[toggle_prompts_button, prompt_section, prompt_visibility_state]
|
435 |
+
)
|
436 |
+
|
437 |
+
with gr.Column(scale=3):
|
438 |
+
with gr.Row():
|
439 |
+
timer_display = gr.Textbox(value="Time remaining: 08:00", label="")
|
440 |
+
start_button = gr.Button("Start Timer", elem_id="start_button")
|
441 |
+
|
442 |
+
start_button.click(start_timer, outputs=[is_running, target_timestamp]).then(
|
443 |
+
periodic_call, inputs=[is_running, target_timestamp], outputs=timer_display, every=1)
|
444 |
+
|
445 |
+
chatbot = gr.Chatbot(label="Chat here for autobiography generation", height=500)
|
446 |
+
|
447 |
+
with gr.Row():
|
448 |
+
transcription_box = gr.Textbox(
|
449 |
+
label="Transcription (You can edit this)", lines=3
|
450 |
+
)
|
451 |
+
audio_input = gr.Audio(
|
452 |
+
source="microphone", type="filepath", label="🎤 Record Audio"
|
453 |
+
)
|
454 |
+
|
455 |
+
with gr.Row():
|
456 |
+
submit_button = gr.Button("Submit", variant="primary", size="large")
|
457 |
+
save_conversation_button = gr.Button("End and Save Conversation", variant="secondary")
|
458 |
+
download_button = gr.Button("Download Conversations", variant="secondary")
|
459 |
+
download_biography_button = gr.Button("Download Biography", variant="secondary")
|
460 |
+
|
461 |
+
memory_graph_table = gr.Dataframe(
|
462 |
+
headers=["Date", "Topic", "Description", "People Involved"],
|
463 |
+
datatype=["str", "str", "str", "str"],
|
464 |
+
interactive=False,
|
465 |
+
label="Memory Events",
|
466 |
+
max_rows=5
|
467 |
+
)
|
468 |
+
|
469 |
+
biography_textbox = gr.Textbox(label="Autobiography", visible=False)
|
470 |
+
|
471 |
+
audio_input.change(
|
472 |
+
fn=start_recording,
|
473 |
+
inputs=[audio_input],
|
474 |
+
outputs=[transcription_box]
|
475 |
+
)
|
476 |
+
|
477 |
+
state = gr.State([])
|
478 |
+
|
479 |
+
submit_button.click(
|
480 |
+
submit_text_and_respond,
|
481 |
+
inputs=[transcription_box, api_key_state, username_input, state, chatbot_type_state],
|
482 |
+
outputs=[chatbot, transcription_box, memory_graph_table]
|
483 |
+
)
|
484 |
+
|
485 |
+
download_button.click(
|
486 |
+
fn=download_conversations,
|
487 |
+
inputs=[username_input, chatbot_type_state],
|
488 |
+
outputs=gr.Files()
|
489 |
+
)
|
490 |
+
|
491 |
+
download_biography_button.click(
|
492 |
+
fn=download_biography,
|
493 |
+
inputs=[username_input, chatbot_type_state],
|
494 |
+
outputs=[gr.File(label="Biography.txt"), biography_textbox]
|
495 |
+
)
|
496 |
+
|
497 |
+
save_conversation_button.click(
|
498 |
+
fn=save_conversation,
|
499 |
+
inputs=[username_input, chatbot_type_state],
|
500 |
+
outputs=None
|
501 |
+
)
|
502 |
+
|
503 |
+
|
504 |
+
app.queue()
|
505 |
+
app.launch(share=True, max_threads=10)
|
interview_protocol.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
protocols = {
|
2 |
+
"Life Chapters": ["Life As A Book"],
|
3 |
+
"Key Scenes in the Life Story": [
|
4 |
+
"High Point",
|
5 |
+
"Low Point",
|
6 |
+
"Turning Point",
|
7 |
+
"Positive Childhood Memory",
|
8 |
+
"Negative Childhood Memory",
|
9 |
+
"Vivid Adult Memory",
|
10 |
+
"Religious, Spiritual, or Mystical Experience",
|
11 |
+
"Wisdom Event"
|
12 |
+
],
|
13 |
+
"Future": ["Next Chapter", "Dreams and Plans", "Life Project"],
|
14 |
+
"Challenges": ["Life Challenge", "Health", "Loss", "Failure or Regret"],
|
15 |
+
"Personal Ideology": [
|
16 |
+
"Religious and Ethical Values",
|
17 |
+
"Political and Social Values",
|
18 |
+
"Change, Development Of Religious And Political Views",
|
19 |
+
"Key Value"
|
20 |
+
]
|
21 |
+
}
|
requirements.txt
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.2.1
|
2 |
+
altair==5.4.1
|
3 |
+
annotated-types==0.7.0
|
4 |
+
anyio==4.5.0
|
5 |
+
attrs==24.2.0
|
6 |
+
certifi==2024.8.30
|
7 |
+
charset-normalizer==3.3.2
|
8 |
+
click==8.1.7
|
9 |
+
contourpy==1.3.0
|
10 |
+
cycler==0.12.1
|
11 |
+
exceptiongroup==1.2.2
|
12 |
+
fastapi==0.115.0
|
13 |
+
ffmpy==0.4.0
|
14 |
+
filelock==3.16.1
|
15 |
+
fonttools==4.53.1
|
16 |
+
fsspec==2024.9.0
|
17 |
+
gradio==3.50.2
|
18 |
+
gradio_client==0.6.1
|
19 |
+
h11==0.14.0
|
20 |
+
httpcore==1.0.5
|
21 |
+
httpx==0.27.2
|
22 |
+
huggingface-hub==0.25.0
|
23 |
+
idna==3.10
|
24 |
+
importlib_resources==6.4.5
|
25 |
+
Jinja2==3.1.4
|
26 |
+
jsonschema==4.23.0
|
27 |
+
jsonschema-specifications==2023.12.1
|
28 |
+
kiwisolver==1.4.7
|
29 |
+
llvmlite==0.43.0
|
30 |
+
MarkupSafe==2.1.5
|
31 |
+
matplotlib==3.9.2
|
32 |
+
more-itertools==10.5.0
|
33 |
+
mpmath==1.3.0
|
34 |
+
narwhals==1.8.1
|
35 |
+
networkx==3.3
|
36 |
+
numba==0.60.0
|
37 |
+
numpy==1.26.4
|
38 |
+
openai-whisper==20231117
|
39 |
+
orjson==3.10.7
|
40 |
+
packaging==24.1
|
41 |
+
pandas==2.2.2
|
42 |
+
pillow==10.4.0
|
43 |
+
pydantic==2.9.2
|
44 |
+
pydantic_core==2.23.4
|
45 |
+
pydub==0.25.1
|
46 |
+
pyparsing==3.1.4
|
47 |
+
python-dateutil==2.9.0.post0
|
48 |
+
python-multipart==0.0.9
|
49 |
+
pytz==2024.2
|
50 |
+
PyYAML==6.0.2
|
51 |
+
referencing==0.35.1
|
52 |
+
regex==2024.9.11
|
53 |
+
requests==2.32.3
|
54 |
+
rpds-py==0.20.0
|
55 |
+
semantic-version==2.10.0
|
56 |
+
six==1.16.0
|
57 |
+
sniffio==1.3.1
|
58 |
+
starlette==0.38.5
|
59 |
+
sympy==1.13.3
|
60 |
+
tiktoken==0.7.0
|
61 |
+
torch
|
62 |
+
tqdm==4.66.5
|
63 |
+
typing_extensions==4.12.2
|
64 |
+
tzdata==2024.1
|
65 |
+
urllib3==2.2.3
|
66 |
+
uvicorn==0.30.6
|
67 |
+
websockets==11.0.3
|