oceansweep
commited on
Commit
•
4e1f4a3
1
Parent(s):
e15e1c7
Upload 13 files
Browse files- App_Function_Libraries/Gradio_UI/Audio_ingestion_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Character_Interaction_tab.py +797 -0
- App_Function_Libraries/Gradio_UI/Chat_ui.py +1 -0
- App_Function_Libraries/Gradio_UI/Embeddings_tab.py +365 -0
- App_Function_Libraries/Gradio_UI/Live_Recording.py +123 -125
- App_Function_Libraries/Gradio_UI/Llamafile_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/PDF_ingestion_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/Podcast_tab.py +1 -1
- App_Function_Libraries/Gradio_UI/RAG_Chat_tab.py +1 -174
- App_Function_Libraries/Gradio_UI/Utilities.py +1 -1
- App_Function_Libraries/Gradio_UI/Video_transcription_tab.py +10 -9
- App_Function_Libraries/Gradio_UI/Website_scraping_tab.py +24 -10
- App_Function_Libraries/Gradio_UI/Writing_tab.py +1 -325
App_Function_Libraries/Gradio_UI/Audio_ingestion_tab.py
CHANGED
@@ -7,7 +7,7 @@
|
|
7 |
import gradio as gr
|
8 |
#
|
9 |
# Local Imports
|
10 |
-
from App_Function_Libraries.Audio_Files import process_audio_files
|
11 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
12 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models
|
|
|
7 |
import gradio as gr
|
8 |
#
|
9 |
# Local Imports
|
10 |
+
from App_Function_Libraries.Audio.Audio_Files import process_audio_files
|
11 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
12 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models
|
App_Function_Libraries/Gradio_UI/Character_Interaction_tab.py
ADDED
@@ -0,0 +1,797 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Character_Interaction_tab.py
|
2 |
+
# Description: This file contains the functions that are used for Character Interactions in the Gradio UI.
|
3 |
+
#
|
4 |
+
# Imports
|
5 |
+
import base64
|
6 |
+
import io
|
7 |
+
from datetime import datetime as datetime
|
8 |
+
import logging
|
9 |
+
import json
|
10 |
+
import os
|
11 |
+
from typing import List, Dict, Tuple, Union
|
12 |
+
|
13 |
+
#
|
14 |
+
# External Imports
|
15 |
+
import gradio as gr
|
16 |
+
from PIL import Image
|
17 |
+
#
|
18 |
+
# Local Imports
|
19 |
+
from App_Function_Libraries.Chat import chat, load_characters
|
20 |
+
from App_Function_Libraries.Gradio_UI.Chat_ui import chat_wrapper
|
21 |
+
from App_Function_Libraries.Gradio_UI.Writing_tab import generate_writing_feedback
|
22 |
+
#
|
23 |
+
########################################################################################################################
|
24 |
+
#
|
25 |
+
# Single-Character chat Functions:
|
26 |
+
|
27 |
+
|
28 |
+
def chat_with_character(user_message, history, char_data, api_name_input, api_key):
|
29 |
+
if char_data is None:
|
30 |
+
return history, "Please import a character card first."
|
31 |
+
|
32 |
+
bot_message = generate_writing_feedback(user_message, char_data['name'], "Overall", api_name_input,
|
33 |
+
api_key)
|
34 |
+
history.append((user_message, bot_message))
|
35 |
+
return history, ""
|
36 |
+
|
37 |
+
|
38 |
+
def import_character_card(file):
|
39 |
+
if file is None:
|
40 |
+
logging.warning("No file provided for character card import")
|
41 |
+
return None
|
42 |
+
try:
|
43 |
+
if file.name.lower().endswith(('.png', '.webp')):
|
44 |
+
logging.info(f"Attempting to import character card from image: {file.name}")
|
45 |
+
json_data = extract_json_from_image(file)
|
46 |
+
if json_data:
|
47 |
+
logging.info("JSON data extracted from image, attempting to parse")
|
48 |
+
card_data = import_character_card_json(json_data)
|
49 |
+
if card_data:
|
50 |
+
# Save the image data
|
51 |
+
with Image.open(file) as img:
|
52 |
+
img_byte_arr = io.BytesIO()
|
53 |
+
img.save(img_byte_arr, format='PNG')
|
54 |
+
card_data['image'] = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
|
55 |
+
return card_data
|
56 |
+
else:
|
57 |
+
logging.warning("No JSON data found in the image")
|
58 |
+
else:
|
59 |
+
logging.info(f"Attempting to import character card from JSON file: {file.name}")
|
60 |
+
content = file.read().decode('utf-8')
|
61 |
+
return import_character_card_json(content)
|
62 |
+
except Exception as e:
|
63 |
+
logging.error(f"Error importing character card: {e}")
|
64 |
+
return None
|
65 |
+
|
66 |
+
|
67 |
+
def import_character_card_json(json_content):
|
68 |
+
try:
|
69 |
+
# Remove any leading/trailing whitespace
|
70 |
+
json_content = json_content.strip()
|
71 |
+
|
72 |
+
# Log the first 100 characters of the content
|
73 |
+
logging.debug(f"JSON content (first 100 chars): {json_content[:100]}...")
|
74 |
+
|
75 |
+
card_data = json.loads(json_content)
|
76 |
+
logging.debug(f"Parsed JSON data keys: {list(card_data.keys())}")
|
77 |
+
if 'spec' in card_data and card_data['spec'] == 'chara_card_v2':
|
78 |
+
logging.info("Detected V2 character card")
|
79 |
+
return card_data['data']
|
80 |
+
else:
|
81 |
+
logging.info("Assuming V1 character card")
|
82 |
+
return card_data
|
83 |
+
except json.JSONDecodeError as e:
|
84 |
+
logging.error(f"JSON decode error: {e}")
|
85 |
+
logging.error(f"Problematic JSON content: {json_content[:500]}...")
|
86 |
+
except Exception as e:
|
87 |
+
logging.error(f"Unexpected error parsing JSON: {e}")
|
88 |
+
return None
|
89 |
+
|
90 |
+
|
91 |
+
def extract_json_from_image(image_file):
|
92 |
+
logging.debug(f"Attempting to extract JSON from image: {image_file.name}")
|
93 |
+
try:
|
94 |
+
with Image.open(image_file) as img:
|
95 |
+
logging.debug("Image opened successfully")
|
96 |
+
metadata = img.info
|
97 |
+
if 'chara' in metadata:
|
98 |
+
logging.debug("Found 'chara' in image metadata")
|
99 |
+
chara_content = metadata['chara']
|
100 |
+
logging.debug(f"Content of 'chara' metadata (first 100 chars): {chara_content[:100]}...")
|
101 |
+
try:
|
102 |
+
decoded_content = base64.b64decode(chara_content).decode('utf-8')
|
103 |
+
logging.debug(f"Decoded content (first 100 chars): {decoded_content[:100]}...")
|
104 |
+
return decoded_content
|
105 |
+
except Exception as e:
|
106 |
+
logging.error(f"Error decoding base64 content: {e}")
|
107 |
+
|
108 |
+
logging.debug("'chara' not found in metadata, checking for base64 encoded data")
|
109 |
+
raw_data = img.tobytes()
|
110 |
+
possible_json = raw_data.split(b'{', 1)[-1].rsplit(b'}', 1)[0]
|
111 |
+
if possible_json:
|
112 |
+
try:
|
113 |
+
decoded = base64.b64decode(possible_json).decode('utf-8')
|
114 |
+
if decoded.startswith('{') and decoded.endswith('}'):
|
115 |
+
logging.debug("Found and decoded base64 JSON data")
|
116 |
+
return '{' + decoded + '}'
|
117 |
+
except Exception as e:
|
118 |
+
logging.error(f"Error decoding base64 data: {e}")
|
119 |
+
|
120 |
+
logging.warning("No JSON data found in the image")
|
121 |
+
except Exception as e:
|
122 |
+
logging.error(f"Error extracting JSON from image: {e}")
|
123 |
+
return None
|
124 |
+
|
125 |
+
|
126 |
+
def load_chat_history(file):
|
127 |
+
try:
|
128 |
+
content = file.read().decode('utf-8')
|
129 |
+
chat_data = json.loads(content)
|
130 |
+
return chat_data['history'], chat_data['character']
|
131 |
+
except Exception as e:
|
132 |
+
logging.error(f"Error loading chat history: {e}")
|
133 |
+
return None, None
|
134 |
+
|
135 |
+
|
136 |
+
def create_character_card_interaction_tab():
|
137 |
+
with gr.TabItem("Chat with a Character Card"):
|
138 |
+
gr.Markdown("# Chat with a Character Card")
|
139 |
+
with gr.Row():
|
140 |
+
with gr.Column(scale=1):
|
141 |
+
character_image = gr.Image(label="Character Image", type="filepath")
|
142 |
+
character_card_upload = gr.File(label="Upload Character Card")
|
143 |
+
import_card_button = gr.Button("Import Character Card")
|
144 |
+
load_characters_button = gr.Button("Load Existing Characters")
|
145 |
+
from App_Function_Libraries.Chat import get_character_names
|
146 |
+
character_dropdown = gr.Dropdown(label="Select Character", choices=get_character_names())
|
147 |
+
user_name_input = gr.Textbox(label="Your Name", placeholder="Enter your name here")
|
148 |
+
api_name_input = gr.Dropdown(
|
149 |
+
choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral",
|
150 |
+
"OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace",
|
151 |
+
"Custom-OpenAI-API"],
|
152 |
+
value=None,
|
153 |
+
# FIXME - make it so the user cant' click `Send Message` without first setting an API + Chatbot
|
154 |
+
label="API for Interaction(Mandatory)"
|
155 |
+
)
|
156 |
+
api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)",
|
157 |
+
placeholder="Enter your API key here", type="password")
|
158 |
+
temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
159 |
+
import_chat_button = gr.Button("Import Chat History")
|
160 |
+
chat_file_upload = gr.File(label="Upload Chat History JSON", visible=False)
|
161 |
+
|
162 |
+
with gr.Column(scale=2):
|
163 |
+
chat_history = gr.Chatbot(label="Conversation", height=800)
|
164 |
+
user_input = gr.Textbox(label="Your message")
|
165 |
+
send_message_button = gr.Button("Send Message")
|
166 |
+
regenerate_button = gr.Button("Regenerate Last Message")
|
167 |
+
clear_chat_button = gr.Button("Clear Chat")
|
168 |
+
save_chat_button = gr.Button("Save This Chat")
|
169 |
+
save_status = gr.Textbox(label="Save Status", interactive=False)
|
170 |
+
|
171 |
+
character_data = gr.State(None)
|
172 |
+
user_name = gr.State("")
|
173 |
+
|
174 |
+
def import_chat_history(file, current_history, char_data):
|
175 |
+
loaded_history, char_name = load_chat_history(file)
|
176 |
+
if loaded_history is None:
|
177 |
+
return current_history, char_data, "Failed to load chat history."
|
178 |
+
|
179 |
+
# Check if the loaded chat is for the current character
|
180 |
+
if char_data and char_data.get('name') != char_name:
|
181 |
+
return current_history, char_data, f"Warning: Loaded chat is for character '{char_name}', but current character is '{char_data.get('name')}'. Chat not imported."
|
182 |
+
|
183 |
+
# If no character is selected, try to load the character from the chat
|
184 |
+
if not char_data:
|
185 |
+
new_char_data = load_character(char_name)[0]
|
186 |
+
if new_char_data:
|
187 |
+
char_data = new_char_data
|
188 |
+
else:
|
189 |
+
return current_history, char_data, f"Warning: Character '{char_name}' not found. Please select the character manually."
|
190 |
+
|
191 |
+
return loaded_history, char_data, f"Chat history for '{char_name}' imported successfully."
|
192 |
+
|
193 |
+
def import_character(file):
|
194 |
+
card_data = import_character_card(file)
|
195 |
+
if card_data:
|
196 |
+
from App_Function_Libraries.Chat import save_character
|
197 |
+
save_character(card_data)
|
198 |
+
return card_data, gr.update(choices=get_character_names())
|
199 |
+
else:
|
200 |
+
return None, gr.update()
|
201 |
+
|
202 |
+
def load_character(name):
|
203 |
+
from App_Function_Libraries.Chat import load_characters
|
204 |
+
characters = load_characters()
|
205 |
+
char_data = characters.get(name)
|
206 |
+
if char_data:
|
207 |
+
first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
|
208 |
+
return char_data, [(None, first_message)] if first_message else [], None
|
209 |
+
return None, [], None
|
210 |
+
|
211 |
+
def load_character_image(name):
|
212 |
+
from App_Function_Libraries.Chat import load_characters
|
213 |
+
characters = load_characters()
|
214 |
+
char_data = characters.get(name)
|
215 |
+
if char_data and 'image_path' in char_data:
|
216 |
+
image_path = char_data['image_path']
|
217 |
+
if os.path.exists(image_path):
|
218 |
+
return image_path
|
219 |
+
else:
|
220 |
+
logging.warning(f"Image file not found: {image_path}")
|
221 |
+
return None
|
222 |
+
|
223 |
+
def load_character_and_image(name):
|
224 |
+
char_data, chat_history, _ = load_character(name)
|
225 |
+
image_path = load_character_image(name)
|
226 |
+
logging.debug(f"Character: {name}")
|
227 |
+
logging.debug(f"Character data: {char_data}")
|
228 |
+
logging.debug(f"Image path: {image_path}")
|
229 |
+
return char_data, chat_history, image_path
|
230 |
+
|
231 |
+
def character_chat_wrapper(message, history, char_data, api_endpoint, api_key, temperature, user_name):
|
232 |
+
logging.debug("Entered character_chat_wrapper")
|
233 |
+
if char_data is None:
|
234 |
+
return "Please select a character first.", history
|
235 |
+
|
236 |
+
if not user_name:
|
237 |
+
user_name = "User"
|
238 |
+
|
239 |
+
char_name = char_data.get('name', 'AI Assistant')
|
240 |
+
|
241 |
+
# Prepare the character's background information
|
242 |
+
char_background = f"""
|
243 |
+
Name: {char_name}
|
244 |
+
Description: {char_data.get('description', 'N/A')}
|
245 |
+
Personality: {char_data.get('personality', 'N/A')}
|
246 |
+
Scenario: {char_data.get('scenario', 'N/A')}
|
247 |
+
"""
|
248 |
+
|
249 |
+
# Prepare the system prompt for character impersonation
|
250 |
+
system_message = f"""You are roleplaying as {char_name}, the character described below. Respond to the user's messages in character, maintaining the personality and background provided. Do not break character or refer to yourself as an AI. Always refer to yourself as "{char_name}" and refer to the user as "{user_name}".
|
251 |
+
|
252 |
+
{char_background}
|
253 |
+
|
254 |
+
Additional instructions: {char_data.get('post_history_instructions', '')}
|
255 |
+
"""
|
256 |
+
|
257 |
+
# Prepare media_content and selected_parts
|
258 |
+
media_content = {
|
259 |
+
'id': char_name,
|
260 |
+
'title': char_name,
|
261 |
+
'content': char_background,
|
262 |
+
'description': char_data.get('description', ''),
|
263 |
+
'personality': char_data.get('personality', ''),
|
264 |
+
'scenario': char_data.get('scenario', '')
|
265 |
+
}
|
266 |
+
selected_parts = ['description', 'personality', 'scenario']
|
267 |
+
|
268 |
+
prompt = char_data.get('post_history_instructions', '')
|
269 |
+
|
270 |
+
# Prepare the input for the chat function
|
271 |
+
if not history:
|
272 |
+
full_message = f"{prompt}\n\n{user_name}: {message}" if prompt else f"{user_name}: {message}"
|
273 |
+
else:
|
274 |
+
full_message = f"{user_name}: {message}"
|
275 |
+
|
276 |
+
# Call the chat function
|
277 |
+
bot_message = chat(
|
278 |
+
full_message,
|
279 |
+
history,
|
280 |
+
media_content,
|
281 |
+
selected_parts,
|
282 |
+
api_endpoint,
|
283 |
+
api_key,
|
284 |
+
prompt,
|
285 |
+
temperature,
|
286 |
+
system_message
|
287 |
+
)
|
288 |
+
|
289 |
+
# Update history
|
290 |
+
history.append((message, bot_message))
|
291 |
+
return history
|
292 |
+
|
293 |
+
def save_chat_history(history, character_name):
|
294 |
+
# Create the Saved_Chats folder if it doesn't exist
|
295 |
+
save_directory = "Saved_Chats"
|
296 |
+
os.makedirs(save_directory, exist_ok=True)
|
297 |
+
|
298 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
299 |
+
filename = f"chat_history_{character_name}_{timestamp}.json"
|
300 |
+
filepath = os.path.join(save_directory, filename)
|
301 |
+
|
302 |
+
chat_data = {
|
303 |
+
"character": character_name,
|
304 |
+
"timestamp": timestamp,
|
305 |
+
"history": history
|
306 |
+
}
|
307 |
+
|
308 |
+
try:
|
309 |
+
with open(filepath, 'w', encoding='utf-8') as f:
|
310 |
+
json.dump(chat_data, f, ensure_ascii=False, indent=2)
|
311 |
+
return filepath
|
312 |
+
except Exception as e:
|
313 |
+
return f"Error saving chat: {str(e)}"
|
314 |
+
|
315 |
+
def save_current_chat(history, char_data):
|
316 |
+
if not char_data or not history:
|
317 |
+
return "No chat to save or character not selected."
|
318 |
+
|
319 |
+
character_name = char_data.get('name', 'Unknown')
|
320 |
+
result = save_chat_history(history, character_name)
|
321 |
+
if result.startswith("Error"):
|
322 |
+
return result
|
323 |
+
return f"Chat saved successfully as {result}"
|
324 |
+
|
325 |
+
def regenerate_last_message(history, char_data, api_name, api_key, temperature, user_name):
|
326 |
+
if not history:
|
327 |
+
return history
|
328 |
+
|
329 |
+
last_user_message = history[-1][0]
|
330 |
+
new_history = history[:-1]
|
331 |
+
|
332 |
+
return character_chat_wrapper(last_user_message, new_history, char_data, api_name, api_key, temperature,
|
333 |
+
user_name)
|
334 |
+
|
335 |
+
import_chat_button.click(
|
336 |
+
fn=lambda: gr.update(visible=True),
|
337 |
+
outputs=chat_file_upload
|
338 |
+
)
|
339 |
+
|
340 |
+
chat_file_upload.change(
|
341 |
+
fn=import_chat_history,
|
342 |
+
inputs=[chat_file_upload, chat_history, character_data],
|
343 |
+
outputs=[chat_history, character_data, save_status]
|
344 |
+
)
|
345 |
+
|
346 |
+
def update_character_info(name):
|
347 |
+
from App_Function_Libraries.Chat import load_characters
|
348 |
+
characters = load_characters()
|
349 |
+
char_data = characters.get(name)
|
350 |
+
|
351 |
+
image_path = char_data.get('image_path') if char_data else None
|
352 |
+
|
353 |
+
logging.debug(f"Character: {name}")
|
354 |
+
logging.debug(f"Character data: {char_data}")
|
355 |
+
logging.debug(f"Image path: {image_path}")
|
356 |
+
|
357 |
+
if image_path:
|
358 |
+
if os.path.exists(image_path):
|
359 |
+
logging.debug(f"Image file exists at {image_path}")
|
360 |
+
if os.access(image_path, os.R_OK):
|
361 |
+
logging.debug(f"Image file is readable")
|
362 |
+
else:
|
363 |
+
logging.warning(f"Image file is not readable: {image_path}")
|
364 |
+
image_path = None
|
365 |
+
else:
|
366 |
+
logging.warning(f"Image file does not exist: {image_path}")
|
367 |
+
image_path = None
|
368 |
+
else:
|
369 |
+
logging.warning("No image path provided for the character")
|
370 |
+
|
371 |
+
return char_data, None, image_path # Return None for chat_history
|
372 |
+
|
373 |
+
def on_character_select(name):
|
374 |
+
logging.debug(f"Character selected: {name}")
|
375 |
+
return update_character_info_with_error_handling(name)
|
376 |
+
|
377 |
+
def clear_chat_history():
|
378 |
+
return [], None # Return empty list for chat_history and None for character_data
|
379 |
+
|
380 |
+
def update_character_info_with_error_handling(name):
|
381 |
+
logging.debug(f"Entering update_character_info_with_error_handling for character: {name}")
|
382 |
+
try:
|
383 |
+
char_data, _, image_path = update_character_info(name)
|
384 |
+
logging.debug(f"Retrieved data: char_data={bool(char_data)}, image_path={image_path}")
|
385 |
+
|
386 |
+
if char_data:
|
387 |
+
first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
|
388 |
+
chat_history = [(None, first_message)] if first_message else []
|
389 |
+
else:
|
390 |
+
chat_history = []
|
391 |
+
|
392 |
+
logging.debug(f"Created chat_history with length: {len(chat_history)}")
|
393 |
+
|
394 |
+
if image_path and os.path.exists(image_path):
|
395 |
+
logging.debug(f"Image file exists at {image_path}")
|
396 |
+
return char_data, chat_history, image_path
|
397 |
+
else:
|
398 |
+
logging.warning(f"Image not found or invalid path: {image_path}")
|
399 |
+
return char_data, chat_history, None
|
400 |
+
except Exception as e:
|
401 |
+
logging.error(f"Error updating character info: {str(e)}", exc_info=True)
|
402 |
+
return None, [], None
|
403 |
+
finally:
|
404 |
+
logging.debug("Exiting update_character_info_with_error_handling")
|
405 |
+
|
406 |
+
import_card_button.click(
|
407 |
+
fn=import_character,
|
408 |
+
inputs=[character_card_upload],
|
409 |
+
outputs=[character_data, character_dropdown]
|
410 |
+
)
|
411 |
+
|
412 |
+
load_characters_button.click(
|
413 |
+
fn=lambda: gr.update(choices=get_character_names()),
|
414 |
+
outputs=character_dropdown
|
415 |
+
)
|
416 |
+
|
417 |
+
clear_chat_button.click(
|
418 |
+
fn=clear_chat_history,
|
419 |
+
inputs=[],
|
420 |
+
outputs=[chat_history, character_data]
|
421 |
+
)
|
422 |
+
|
423 |
+
character_dropdown.change(
|
424 |
+
fn=on_character_select,
|
425 |
+
inputs=[character_dropdown],
|
426 |
+
outputs=[character_data, chat_history, character_image]
|
427 |
+
)
|
428 |
+
|
429 |
+
send_message_button.click(
|
430 |
+
fn=character_chat_wrapper,
|
431 |
+
inputs=[user_input, chat_history, character_data, api_name_input, api_key_input, temperature_slider,
|
432 |
+
user_name_input],
|
433 |
+
outputs=[chat_history]
|
434 |
+
).then(lambda: "", outputs=user_input)
|
435 |
+
|
436 |
+
regenerate_button.click(
|
437 |
+
fn=regenerate_last_message,
|
438 |
+
inputs=[chat_history, character_data, api_name_input, api_key_input, temperature_slider, user_name_input],
|
439 |
+
outputs=[chat_history]
|
440 |
+
)
|
441 |
+
|
442 |
+
user_name_input.change(
|
443 |
+
fn=lambda name: name,
|
444 |
+
inputs=[user_name_input],
|
445 |
+
outputs=[user_name]
|
446 |
+
)
|
447 |
+
|
448 |
+
save_chat_button.click(
|
449 |
+
fn=save_current_chat,
|
450 |
+
inputs=[chat_history, character_data],
|
451 |
+
outputs=[save_status]
|
452 |
+
)
|
453 |
+
|
454 |
+
return character_data, chat_history, user_input, user_name, character_image
|
455 |
+
|
456 |
+
|
457 |
+
#
|
458 |
+
# End of Character chat tab
|
459 |
+
######################################################################################################################
|
460 |
+
#
|
461 |
+
# Multi-Character Chat Interface
|
462 |
+
|
463 |
+
def character_interaction_setup():
|
464 |
+
characters = load_characters()
|
465 |
+
return characters, [], None, None
|
466 |
+
|
467 |
+
|
468 |
+
def extract_character_response(response: Union[str, Tuple]) -> str:
|
469 |
+
if isinstance(response, tuple):
|
470 |
+
# If it's a tuple, try to extract the first string element
|
471 |
+
for item in response:
|
472 |
+
if isinstance(item, str):
|
473 |
+
return item.strip()
|
474 |
+
# If no string found, return a default message
|
475 |
+
return "I'm not sure how to respond."
|
476 |
+
elif isinstance(response, str):
|
477 |
+
# If it's already a string, just return it
|
478 |
+
return response.strip()
|
479 |
+
else:
|
480 |
+
# For any other type, return a default message
|
481 |
+
return "I'm having trouble forming a response."
|
482 |
+
|
483 |
+
# def process_character_response(response: str) -> str:
|
484 |
+
# # Remove any leading explanatory text before the first '---'
|
485 |
+
# parts = response.split('---')
|
486 |
+
# if len(parts) > 1:
|
487 |
+
# return '---' + '---'.join(parts[1:])
|
488 |
+
# return response.strip()
|
489 |
+
def process_character_response(response: Union[str, Tuple]) -> str:
|
490 |
+
if isinstance(response, tuple):
|
491 |
+
response = ' '.join(str(item) for item in response if isinstance(item, str))
|
492 |
+
|
493 |
+
if isinstance(response, str):
|
494 |
+
# Remove any leading explanatory text before the first '---'
|
495 |
+
parts = response.split('---')
|
496 |
+
if len(parts) > 1:
|
497 |
+
return '---' + '---'.join(parts[1:])
|
498 |
+
return response.strip()
|
499 |
+
else:
|
500 |
+
return "I'm having trouble forming a response."
|
501 |
+
|
502 |
+
def character_turn(characters: Dict, conversation: List[Tuple[str, str]],
|
503 |
+
current_character: str, other_characters: List[str],
|
504 |
+
api_endpoint: str, api_key: str, temperature: float,
|
505 |
+
scenario: str = "") -> Tuple[List[Tuple[str, str]], str]:
|
506 |
+
if not current_character or current_character not in characters:
|
507 |
+
return conversation, current_character
|
508 |
+
|
509 |
+
if not conversation and scenario:
|
510 |
+
conversation.append(("Scenario", scenario))
|
511 |
+
|
512 |
+
current_char = characters[current_character]
|
513 |
+
other_chars = [characters[char] for char in other_characters if char in characters and char != current_character]
|
514 |
+
|
515 |
+
prompt = f"{current_char['name']}'s personality: {current_char['personality']}\n"
|
516 |
+
for char in other_chars:
|
517 |
+
prompt += f"{char['name']}'s personality: {char['personality']}\n"
|
518 |
+
prompt += "Conversation so far:\n" + "\n".join([f"{sender}: {message}" for sender, message in conversation])
|
519 |
+
prompt += f"\n\nHow would {current_char['name']} respond?"
|
520 |
+
|
521 |
+
try:
|
522 |
+
response = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "", None, False, temperature, "")
|
523 |
+
processed_response = process_character_response(response)
|
524 |
+
conversation.append((current_char['name'], processed_response))
|
525 |
+
except Exception as e:
|
526 |
+
error_message = f"Error generating response: {str(e)}"
|
527 |
+
conversation.append((current_char['name'], error_message))
|
528 |
+
|
529 |
+
return conversation, current_character
|
530 |
+
|
531 |
+
|
532 |
+
def character_interaction(character1: str, character2: str, api_endpoint: str, api_key: str,
|
533 |
+
num_turns: int, scenario: str, temperature: float,
|
534 |
+
user_interjection: str = "") -> List[str]:
|
535 |
+
characters = load_characters()
|
536 |
+
char1 = characters[character1]
|
537 |
+
char2 = characters[character2]
|
538 |
+
conversation = []
|
539 |
+
current_speaker = char1
|
540 |
+
other_speaker = char2
|
541 |
+
|
542 |
+
# Add scenario to the conversation start
|
543 |
+
if scenario:
|
544 |
+
conversation.append(f"Scenario: {scenario}")
|
545 |
+
|
546 |
+
for turn in range(num_turns):
|
547 |
+
# Construct the prompt for the current speaker
|
548 |
+
prompt = f"{current_speaker['name']}'s personality: {current_speaker['personality']}\n"
|
549 |
+
prompt += f"{other_speaker['name']}'s personality: {other_speaker['personality']}\n"
|
550 |
+
prompt += f"Conversation so far:\n" + "\n".join(
|
551 |
+
[msg if isinstance(msg, str) else f"{msg[0]}: {msg[1]}" for msg in conversation])
|
552 |
+
|
553 |
+
# Add user interjection if provided
|
554 |
+
if user_interjection and turn == num_turns // 2:
|
555 |
+
prompt += f"\n\nUser interjection: {user_interjection}\n"
|
556 |
+
conversation.append(f"User: {user_interjection}")
|
557 |
+
|
558 |
+
prompt += f"\n\nHow would {current_speaker['name']} respond?"
|
559 |
+
|
560 |
+
# FIXME - figure out why the double print is happening
|
561 |
+
# Get response from the LLM
|
562 |
+
response = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "", None, False, temperature, "")
|
563 |
+
|
564 |
+
# Add the response to the conversation
|
565 |
+
conversation.append((current_speaker['name'], response))
|
566 |
+
|
567 |
+
# Switch speakers
|
568 |
+
current_speaker, other_speaker = other_speaker, current_speaker
|
569 |
+
|
570 |
+
# Convert the conversation to a list of strings for output
|
571 |
+
return [f"{msg[0]}: {msg[1]}" if isinstance(msg, tuple) else msg for msg in conversation]
|
572 |
+
|
573 |
+
|
574 |
+
def create_multiple_character_chat_tab():
|
575 |
+
with gr.TabItem("Multi-Character Chat"):
|
576 |
+
characters, conversation, current_character, other_character = character_interaction_setup()
|
577 |
+
|
578 |
+
with gr.Blocks() as character_interaction:
|
579 |
+
gr.Markdown("# Multi-Character Chat")
|
580 |
+
|
581 |
+
with gr.Row():
|
582 |
+
num_characters = gr.Dropdown(label="Number of Characters", choices=["2", "3", "4"], value="2")
|
583 |
+
character_selectors = [gr.Dropdown(label=f"Character {i + 1}", choices=list(characters.keys())) for i in
|
584 |
+
range(4)]
|
585 |
+
|
586 |
+
api_endpoint = gr.Dropdown(label="API Endpoint",
|
587 |
+
choices=["OpenAI", "Anthropic", "Local-LLM", "Cohere", "Groq", "DeepSeek",
|
588 |
+
"Mistral", "OpenRouter"])
|
589 |
+
api_key = gr.Textbox(label="API Key (if required)", type="password")
|
590 |
+
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7)
|
591 |
+
scenario = gr.Textbox(label="Scenario (optional)", lines=3)
|
592 |
+
|
593 |
+
chat_display = gr.Chatbot(label="Character Interaction")
|
594 |
+
current_index = gr.State(0)
|
595 |
+
|
596 |
+
next_turn_btn = gr.Button("Next Turn")
|
597 |
+
narrator_input = gr.Textbox(label="Narrator Input", placeholder="Add a narration or description...")
|
598 |
+
add_narration_btn = gr.Button("Add Narration")
|
599 |
+
error_box = gr.Textbox(label="Error Messages", visible=False)
|
600 |
+
reset_btn = gr.Button("Reset Conversation")
|
601 |
+
|
602 |
+
def update_character_selectors(num):
|
603 |
+
return [gr.update(visible=True) if i < int(num) else gr.update(visible=False) for i in range(4)]
|
604 |
+
|
605 |
+
num_characters.change(
|
606 |
+
update_character_selectors,
|
607 |
+
inputs=[num_characters],
|
608 |
+
outputs=character_selectors
|
609 |
+
)
|
610 |
+
|
611 |
+
def reset_conversation():
|
612 |
+
return [], 0, gr.update(value=""), gr.update(value="")
|
613 |
+
|
614 |
+
def take_turn(conversation, current_index, char1, char2, char3, char4, api_endpoint, api_key, temperature,
|
615 |
+
scenario):
|
616 |
+
char_selectors = [char for char in [char1, char2, char3, char4] if char] # Remove None values
|
617 |
+
num_chars = len(char_selectors)
|
618 |
+
|
619 |
+
if num_chars == 0:
|
620 |
+
return conversation, current_index # No characters selected, return without changes
|
621 |
+
|
622 |
+
if not conversation:
|
623 |
+
conversation = []
|
624 |
+
if scenario:
|
625 |
+
conversation.append(("Scenario", scenario))
|
626 |
+
|
627 |
+
current_character = char_selectors[current_index % num_chars]
|
628 |
+
next_index = (current_index + 1) % num_chars
|
629 |
+
|
630 |
+
prompt = f"Character speaking: {current_character}\nOther characters: {', '.join(char for char in char_selectors if char != current_character)}\n"
|
631 |
+
prompt += "Generate the next part of the conversation, including character dialogues and actions. Characters should speak in first person."
|
632 |
+
|
633 |
+
response, new_conversation, _ = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "",
|
634 |
+
None, False, temperature, "")
|
635 |
+
|
636 |
+
# Format the response
|
637 |
+
formatted_lines = []
|
638 |
+
for line in response.split('\n'):
|
639 |
+
if ':' in line:
|
640 |
+
speaker, text = line.split(':', 1)
|
641 |
+
formatted_lines.append(f"**{speaker.strip()}**: {text.strip()}")
|
642 |
+
else:
|
643 |
+
formatted_lines.append(line)
|
644 |
+
|
645 |
+
formatted_response = '\n'.join(formatted_lines)
|
646 |
+
|
647 |
+
# Update the last message in the conversation with the formatted response
|
648 |
+
if new_conversation:
|
649 |
+
new_conversation[-1] = (new_conversation[-1][0], formatted_response)
|
650 |
+
else:
|
651 |
+
new_conversation.append((current_character, formatted_response))
|
652 |
+
|
653 |
+
return new_conversation, next_index
|
654 |
+
|
655 |
+
def add_narration(narration, conversation):
|
656 |
+
if narration:
|
657 |
+
conversation.append(("Narrator", narration))
|
658 |
+
return conversation, ""
|
659 |
+
|
660 |
+
def take_turn_with_error_handling(conversation, current_index, char1, char2, char3, char4, api_endpoint,
|
661 |
+
api_key, temperature, scenario):
|
662 |
+
try:
|
663 |
+
new_conversation, next_index = take_turn(conversation, current_index, char1, char2, char3, char4,
|
664 |
+
api_endpoint, api_key, temperature, scenario)
|
665 |
+
return new_conversation, next_index, gr.update(visible=False, value="")
|
666 |
+
except Exception as e:
|
667 |
+
error_message = f"An error occurred: {str(e)}"
|
668 |
+
return conversation, current_index, gr.update(visible=True, value=error_message)
|
669 |
+
|
670 |
+
next_turn_btn.click(
|
671 |
+
take_turn_with_error_handling,
|
672 |
+
inputs=[chat_display, current_index] + character_selectors + [api_endpoint, api_key, temperature,
|
673 |
+
scenario],
|
674 |
+
outputs=[chat_display, current_index, error_box]
|
675 |
+
)
|
676 |
+
|
677 |
+
add_narration_btn.click(
|
678 |
+
add_narration,
|
679 |
+
inputs=[narrator_input, chat_display],
|
680 |
+
outputs=[chat_display, narrator_input]
|
681 |
+
)
|
682 |
+
|
683 |
+
reset_btn.click(
|
684 |
+
reset_conversation,
|
685 |
+
outputs=[chat_display, current_index, scenario, narrator_input]
|
686 |
+
)
|
687 |
+
|
688 |
+
return character_interaction
|
689 |
+
|
690 |
+
#
|
691 |
+
# End of Multi-Character chat tab
|
692 |
+
########################################################################################################################
|
693 |
+
#
|
694 |
+
# Narrator-Controlled Conversation Tab
|
695 |
+
|
696 |
+
# From `Fuzzlewumper` on Reddit.
|
697 |
+
def create_narrator_controlled_conversation_tab():
|
698 |
+
with gr.TabItem("Narrator-Controlled Conversation"):
|
699 |
+
gr.Markdown("# Narrator-Controlled Conversation")
|
700 |
+
|
701 |
+
with gr.Row():
|
702 |
+
with gr.Column(scale=1):
|
703 |
+
api_endpoint = gr.Dropdown(
|
704 |
+
label="API Endpoint",
|
705 |
+
choices=["OpenAI", "Anthropic", "Local-LLM", "Cohere", "Groq", "DeepSeek", "Mistral", "OpenRouter"],
|
706 |
+
value="OpenAI"
|
707 |
+
)
|
708 |
+
api_key = gr.Textbox(label="API Key (if required)", type="password")
|
709 |
+
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=1.0, step=0.1, value=0.7)
|
710 |
+
|
711 |
+
with gr.Column(scale=2):
|
712 |
+
narrator_input = gr.Textbox(
|
713 |
+
label="Narrator Input",
|
714 |
+
placeholder="Set the scene or provide context...",
|
715 |
+
lines=3
|
716 |
+
)
|
717 |
+
|
718 |
+
character_inputs = []
|
719 |
+
for i in range(4): # Allow up to 4 characters
|
720 |
+
with gr.Row():
|
721 |
+
name = gr.Textbox(label=f"Character {i + 1} Name")
|
722 |
+
description = gr.Textbox(label=f"Character {i + 1} Description", lines=3)
|
723 |
+
character_inputs.append((name, description))
|
724 |
+
|
725 |
+
conversation_display = gr.Chatbot(label="Conversation", height=400)
|
726 |
+
user_input = gr.Textbox(label="Your Input (optional)", placeholder="Add your own dialogue or action...")
|
727 |
+
|
728 |
+
with gr.Row():
|
729 |
+
generate_btn = gr.Button("Generate Next Interaction")
|
730 |
+
reset_btn = gr.Button("Reset Conversation")
|
731 |
+
|
732 |
+
error_box = gr.Textbox(label="Error Messages", visible=False)
|
733 |
+
|
734 |
+
def generate_interaction(conversation, narrator_text, user_text, api_endpoint, api_key, temperature,
|
735 |
+
*character_data):
|
736 |
+
try:
|
737 |
+
characters = [{"name": name.strip(), "description": desc.strip()}
|
738 |
+
for name, desc in zip(character_data[::2], character_data[1::2])
|
739 |
+
if name.strip() and desc.strip()]
|
740 |
+
|
741 |
+
if not characters:
|
742 |
+
raise ValueError("At least one character must be defined.")
|
743 |
+
|
744 |
+
prompt = f"Narrator: {narrator_text}\n\n"
|
745 |
+
for char in characters:
|
746 |
+
prompt += f"Character '{char['name']}': {char['description']}\n"
|
747 |
+
prompt += "\nGenerate the next part of the conversation, including character dialogues and actions. "
|
748 |
+
prompt += "Characters should speak in first person. "
|
749 |
+
if user_text:
|
750 |
+
prompt += f"\nIncorporate this user input: {user_text}"
|
751 |
+
prompt += "\nResponse:"
|
752 |
+
|
753 |
+
response, conversation, _ = chat_wrapper(prompt, conversation, {}, [], api_endpoint, api_key, "", None,
|
754 |
+
False, temperature, "")
|
755 |
+
|
756 |
+
# Format the response
|
757 |
+
formatted_lines = []
|
758 |
+
for line in response.split('\n'):
|
759 |
+
if ':' in line:
|
760 |
+
speaker, text = line.split(':', 1)
|
761 |
+
formatted_lines.append(f"**{speaker.strip()}**: {text.strip()}")
|
762 |
+
else:
|
763 |
+
formatted_lines.append(line)
|
764 |
+
|
765 |
+
formatted_response = '\n'.join(formatted_lines)
|
766 |
+
|
767 |
+
# Update the last message in the conversation with the formatted response
|
768 |
+
if conversation:
|
769 |
+
conversation[-1] = (conversation[-1][0], formatted_response)
|
770 |
+
else:
|
771 |
+
conversation.append((None, formatted_response))
|
772 |
+
|
773 |
+
return conversation, gr.update(value=""), gr.update(value=""), gr.update(visible=False, value="")
|
774 |
+
except Exception as e:
|
775 |
+
error_message = f"An error occurred: {str(e)}"
|
776 |
+
return conversation, gr.update(), gr.update(), gr.update(visible=True, value=error_message)
|
777 |
+
|
778 |
+
def reset_conversation():
|
779 |
+
return [], gr.update(value=""), gr.update(value=""), gr.update(visible=False, value="")
|
780 |
+
|
781 |
+
generate_btn.click(
|
782 |
+
generate_interaction,
|
783 |
+
inputs=[conversation_display, narrator_input, user_input, api_endpoint, api_key, temperature] +
|
784 |
+
[input for char_input in character_inputs for input in char_input],
|
785 |
+
outputs=[conversation_display, narrator_input, user_input, error_box]
|
786 |
+
)
|
787 |
+
|
788 |
+
reset_btn.click(
|
789 |
+
reset_conversation,
|
790 |
+
outputs=[conversation_display, narrator_input, user_input, error_box]
|
791 |
+
)
|
792 |
+
|
793 |
+
return api_endpoint, api_key, temperature, narrator_input, conversation_display, user_input, generate_btn, reset_btn, error_box
|
794 |
+
|
795 |
+
#
|
796 |
+
# End of Multi-Character chat tab
|
797 |
+
########################################################################################################################
|
App_Function_Libraries/Gradio_UI/Chat_ui.py
CHANGED
@@ -984,6 +984,7 @@ def process_with_llm(workflow, context, prompt, api_endpoint, api_key):
|
|
984 |
api_key_snippet = api_key[:5] + "..." if api_key else "Not provided"
|
985 |
return f"LLM output using {api_endpoint} (API Key: {api_key_snippet}) for {workflow} with context: {context[:30]}... and prompt: {prompt[:30]}..."
|
986 |
|
|
|
987 |
#
|
988 |
# End of Chat_ui.py
|
989 |
#######################################################################################################################
|
|
|
984 |
api_key_snippet = api_key[:5] + "..." if api_key else "Not provided"
|
985 |
return f"LLM output using {api_endpoint} (API Key: {api_key_snippet}) for {workflow} with context: {context[:30]}... and prompt: {prompt[:30]}..."
|
986 |
|
987 |
+
|
988 |
#
|
989 |
# End of Chat_ui.py
|
990 |
#######################################################################################################################
|
App_Function_Libraries/Gradio_UI/Embeddings_tab.py
ADDED
@@ -0,0 +1,365 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Embeddings_tabc.py
|
2 |
+
# Description: This file contains the code for the RAG Chat tab in the Gradio UI
|
3 |
+
#
|
4 |
+
# Imports
|
5 |
+
import json
|
6 |
+
import logging
|
7 |
+
#
|
8 |
+
# External Imports
|
9 |
+
import gradio as gr
|
10 |
+
|
11 |
+
from App_Function_Libraries.Chunk_Lib import improved_chunking_process, determine_chunk_position
|
12 |
+
#
|
13 |
+
# Local Imports
|
14 |
+
from App_Function_Libraries.DB.DB_Manager import get_all_content_from_database
|
15 |
+
from App_Function_Libraries.RAG.ChromaDB_Library import chroma_client, \
|
16 |
+
store_in_chroma
|
17 |
+
from App_Function_Libraries.RAG.Embeddings_Create import create_embedding
|
18 |
+
#
|
19 |
+
########################################################################################################################
|
20 |
+
#
|
21 |
+
# Functions:
|
22 |
+
|
23 |
+
# FIXME - under construction
|
24 |
+
def create_embeddings_tab():
|
25 |
+
with gr.TabItem("Create Embeddings"):
|
26 |
+
gr.Markdown("# Create Embeddings for All Content")
|
27 |
+
|
28 |
+
with gr.Row():
|
29 |
+
with gr.Column():
|
30 |
+
embedding_provider = gr.Radio(
|
31 |
+
choices=["huggingface", "local", "openai"],
|
32 |
+
label="Select Embedding Provider",
|
33 |
+
value="huggingface"
|
34 |
+
)
|
35 |
+
gr.Markdown("Note: Local provider requires a running Llama.cpp/llamafile server.")
|
36 |
+
gr.Markdown("OpenAI provider requires a valid API key. ")
|
37 |
+
gr.Markdown("OpenAI Embeddings models: `text-embedding-3-small`, `text-embedding-3-large`")
|
38 |
+
gr.Markdown("HuggingFace provider requires a valid model name, i.e. `dunzhang/stella_en_400M_v5`")
|
39 |
+
embedding_model = gr.Textbox(
|
40 |
+
label="Embedding Model",
|
41 |
+
value="Enter your embedding model name here", lines=3
|
42 |
+
)
|
43 |
+
embedding_api_url = gr.Textbox(
|
44 |
+
label="API URL (for local provider)",
|
45 |
+
value="http://localhost:8080/embedding",
|
46 |
+
visible=False
|
47 |
+
)
|
48 |
+
|
49 |
+
# Add chunking options
|
50 |
+
chunking_method = gr.Dropdown(
|
51 |
+
choices=["words", "sentences", "paragraphs", "tokens", "semantic"],
|
52 |
+
label="Chunking Method",
|
53 |
+
value="words"
|
54 |
+
)
|
55 |
+
max_chunk_size = gr.Slider(
|
56 |
+
minimum=1, maximum=8000, step=1, value=500,
|
57 |
+
label="Max Chunk Size"
|
58 |
+
)
|
59 |
+
chunk_overlap = gr.Slider(
|
60 |
+
minimum=0, maximum=4000, step=1, value=200,
|
61 |
+
label="Chunk Overlap"
|
62 |
+
)
|
63 |
+
adaptive_chunking = gr.Checkbox(
|
64 |
+
label="Use Adaptive Chunking",
|
65 |
+
value=False
|
66 |
+
)
|
67 |
+
|
68 |
+
create_button = gr.Button("Create Embeddings")
|
69 |
+
|
70 |
+
with gr.Column():
|
71 |
+
status_output = gr.Textbox(label="Status", lines=10)
|
72 |
+
|
73 |
+
def update_provider_options(provider):
|
74 |
+
return gr.update(visible=provider == "local")
|
75 |
+
|
76 |
+
embedding_provider.change(
|
77 |
+
fn=update_provider_options,
|
78 |
+
inputs=[embedding_provider],
|
79 |
+
outputs=[embedding_api_url]
|
80 |
+
)
|
81 |
+
|
82 |
+
def create_all_embeddings(provider, model, api_url, method, max_size, overlap, adaptive):
|
83 |
+
try:
|
84 |
+
all_content = get_all_content_from_database()
|
85 |
+
if not all_content:
|
86 |
+
return "No content found in the database."
|
87 |
+
|
88 |
+
chunk_options = {
|
89 |
+
'method': method,
|
90 |
+
'max_size': max_size,
|
91 |
+
'overlap': overlap,
|
92 |
+
'adaptive': adaptive
|
93 |
+
}
|
94 |
+
|
95 |
+
collection_name = "all_content_embeddings"
|
96 |
+
collection = chroma_client.get_or_create_collection(name=collection_name)
|
97 |
+
|
98 |
+
for item in all_content:
|
99 |
+
media_id = item['id']
|
100 |
+
text = item['content']
|
101 |
+
|
102 |
+
chunks = improved_chunking_process(text, chunk_options)
|
103 |
+
for i, chunk in enumerate(chunks):
|
104 |
+
chunk_text = chunk['text']
|
105 |
+
chunk_id = f"doc_{media_id}_chunk_{i}"
|
106 |
+
|
107 |
+
existing = collection.get(ids=[chunk_id])
|
108 |
+
if existing['ids']:
|
109 |
+
continue
|
110 |
+
|
111 |
+
embedding = create_embedding(chunk_text, provider, model, api_url)
|
112 |
+
metadata = {
|
113 |
+
"media_id": str(media_id),
|
114 |
+
"chunk_index": i,
|
115 |
+
"total_chunks": len(chunks),
|
116 |
+
"chunking_method": method,
|
117 |
+
"max_chunk_size": max_size,
|
118 |
+
"chunk_overlap": overlap,
|
119 |
+
"adaptive_chunking": adaptive,
|
120 |
+
"embedding_model": model,
|
121 |
+
"embedding_provider": provider,
|
122 |
+
**chunk['metadata']
|
123 |
+
}
|
124 |
+
store_in_chroma(collection_name, [chunk_text], [embedding], [chunk_id], [metadata])
|
125 |
+
|
126 |
+
return "Embeddings created and stored successfully for all content."
|
127 |
+
except Exception as e:
|
128 |
+
logging.error(f"Error during embedding creation: {str(e)}")
|
129 |
+
return f"Error: {str(e)}"
|
130 |
+
|
131 |
+
create_button.click(
|
132 |
+
fn=create_all_embeddings,
|
133 |
+
inputs=[embedding_provider, embedding_model, embedding_api_url,
|
134 |
+
chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking],
|
135 |
+
outputs=status_output
|
136 |
+
)
|
137 |
+
|
138 |
+
|
139 |
+
def create_view_embeddings_tab():
|
140 |
+
with gr.TabItem("View/Update Embeddings"):
|
141 |
+
gr.Markdown("# View and Update Embeddings")
|
142 |
+
item_mapping = gr.State({})
|
143 |
+
with gr.Row():
|
144 |
+
with gr.Column():
|
145 |
+
item_dropdown = gr.Dropdown(label="Select Item", choices=[], interactive=True)
|
146 |
+
refresh_button = gr.Button("Refresh Item List")
|
147 |
+
embedding_status = gr.Textbox(label="Embedding Status", interactive=False)
|
148 |
+
embedding_preview = gr.Textbox(label="Embedding Preview", interactive=False, lines=5)
|
149 |
+
embedding_metadata = gr.Textbox(label="Embedding Metadata", interactive=False, lines=10)
|
150 |
+
|
151 |
+
with gr.Column():
|
152 |
+
create_new_embedding_button = gr.Button("Create New Embedding")
|
153 |
+
embedding_provider = gr.Radio(
|
154 |
+
choices=["huggingface", "local", "openai"],
|
155 |
+
label="Select Embedding Provider",
|
156 |
+
value="huggingface"
|
157 |
+
)
|
158 |
+
gr.Markdown("Note: Local provider requires a running Llama.cpp/llamafile server.")
|
159 |
+
gr.Markdown("OpenAI provider requires a valid API key. ")
|
160 |
+
gr.Markdown("OpenAI Embeddings models: `text-embedding-3-small`, `text-embedding-3-large`")
|
161 |
+
gr.Markdown("HuggingFace provider requires a valid model name, i.e. `dunzhang/stella_en_400M_v5`")
|
162 |
+
embedding_model = gr.Textbox(
|
163 |
+
label="Embedding Model",
|
164 |
+
value="Enter your embedding model name here", lines=3
|
165 |
+
)
|
166 |
+
embedding_api_url = gr.Textbox(
|
167 |
+
label="API URL (for local provider)",
|
168 |
+
value="http://localhost:8080/embedding",
|
169 |
+
visible=False
|
170 |
+
)
|
171 |
+
chunking_method = gr.Dropdown(
|
172 |
+
choices=["words", "sentences", "paragraphs", "tokens", "semantic"],
|
173 |
+
label="Chunking Method",
|
174 |
+
value="words"
|
175 |
+
)
|
176 |
+
max_chunk_size = gr.Slider(
|
177 |
+
minimum=1, maximum=8000, step=1, value=500,
|
178 |
+
label="Max Chunk Size"
|
179 |
+
)
|
180 |
+
chunk_overlap = gr.Slider(
|
181 |
+
minimum=0, maximum=5000, step=1, value=200,
|
182 |
+
label="Chunk Overlap"
|
183 |
+
)
|
184 |
+
adaptive_chunking = gr.Checkbox(
|
185 |
+
label="Use Adaptive Chunking",
|
186 |
+
value=False
|
187 |
+
)
|
188 |
+
|
189 |
+
def get_items_with_embedding_status():
|
190 |
+
try:
|
191 |
+
items = get_all_content_from_database()
|
192 |
+
collection = chroma_client.get_or_create_collection(name="all_content_embeddings")
|
193 |
+
choices = []
|
194 |
+
new_item_mapping = {}
|
195 |
+
for item in items:
|
196 |
+
try:
|
197 |
+
result = collection.get(ids=[f"doc_{item['id']}_chunk_0"])
|
198 |
+
embedding_exists = result is not None and result.get('ids') and len(result['ids']) > 0
|
199 |
+
status = "Embedding exists" if embedding_exists else "No embedding"
|
200 |
+
except Exception as e:
|
201 |
+
print(f"Error checking embedding for item {item['id']}: {str(e)}")
|
202 |
+
status = "Error checking"
|
203 |
+
choice = f"{item['title']} ({status})"
|
204 |
+
choices.append(choice)
|
205 |
+
new_item_mapping[choice] = item['id']
|
206 |
+
return gr.update(choices=choices), new_item_mapping
|
207 |
+
except Exception as e:
|
208 |
+
print(f"Error in get_items_with_embedding_status: {str(e)}")
|
209 |
+
return gr.update(choices=["Error: Unable to fetch items"]), {}
|
210 |
+
|
211 |
+
def update_provider_options(provider):
|
212 |
+
return gr.update(visible=provider == "local")
|
213 |
+
|
214 |
+
def check_embedding_status(selected_item, item_mapping):
|
215 |
+
if not selected_item:
|
216 |
+
return "Please select an item", "", ""
|
217 |
+
|
218 |
+
try:
|
219 |
+
item_id = item_mapping.get(selected_item)
|
220 |
+
if item_id is None:
|
221 |
+
return f"Invalid item selected: {selected_item}", "", ""
|
222 |
+
|
223 |
+
item_title = selected_item.rsplit(' (', 1)[0]
|
224 |
+
collection = chroma_client.get_or_create_collection(name="all_content_embeddings")
|
225 |
+
|
226 |
+
result = collection.get(ids=[f"doc_{item_id}_chunk_0"], include=["embeddings", "metadatas"])
|
227 |
+
logging.info(f"ChromaDB result for item '{item_title}' (ID: {item_id}): {result}")
|
228 |
+
|
229 |
+
if not result['ids']:
|
230 |
+
return f"No embedding found for item '{item_title}' (ID: {item_id})", "", ""
|
231 |
+
|
232 |
+
if not result['embeddings'] or not result['embeddings'][0]:
|
233 |
+
return f"Embedding data missing for item '{item_title}' (ID: {item_id})", "", ""
|
234 |
+
|
235 |
+
embedding = result['embeddings'][0]
|
236 |
+
metadata = result['metadatas'][0] if result['metadatas'] else {}
|
237 |
+
embedding_preview = str(embedding[:50])
|
238 |
+
status = f"Embedding exists for item '{item_title}' (ID: {item_id})"
|
239 |
+
return status, f"First 50 elements of embedding:\n{embedding_preview}", json.dumps(metadata, indent=2)
|
240 |
+
|
241 |
+
except Exception as e:
|
242 |
+
logging.error(f"Error in check_embedding_status: {str(e)}")
|
243 |
+
return f"Error processing item: {selected_item}. Details: {str(e)}", "", ""
|
244 |
+
|
245 |
+
def create_new_embedding_for_item(selected_item, provider, model, api_url, method, max_size, overlap, adaptive, item_mapping):
|
246 |
+
if not selected_item:
|
247 |
+
return "Please select an item", "", ""
|
248 |
+
|
249 |
+
try:
|
250 |
+
item_id = item_mapping.get(selected_item)
|
251 |
+
if item_id is None:
|
252 |
+
return f"Invalid item selected: {selected_item}", "", ""
|
253 |
+
|
254 |
+
items = get_all_content_from_database()
|
255 |
+
item = next((item for item in items if item['id'] == item_id), None)
|
256 |
+
if not item:
|
257 |
+
return f"Item not found: {item_id}", "", ""
|
258 |
+
|
259 |
+
chunk_options = {
|
260 |
+
'method': method,
|
261 |
+
'max_size': max_size,
|
262 |
+
'overlap': overlap,
|
263 |
+
'adaptive': adaptive
|
264 |
+
}
|
265 |
+
|
266 |
+
chunks = improved_chunking_process(item['content'], chunk_options)
|
267 |
+
collection_name = "all_content_embeddings"
|
268 |
+
collection = chroma_client.get_or_create_collection(name=collection_name)
|
269 |
+
|
270 |
+
# Delete existing embeddings for this item
|
271 |
+
existing_ids = [f"doc_{item_id}_chunk_{i}" for i in range(len(chunks))]
|
272 |
+
collection.delete(ids=existing_ids)
|
273 |
+
|
274 |
+
for i, chunk in enumerate(chunks):
|
275 |
+
chunk_text = chunk['text']
|
276 |
+
chunk_metadata = chunk['metadata']
|
277 |
+
chunk_position = determine_chunk_position(chunk_metadata['relative_position'])
|
278 |
+
|
279 |
+
chunk_header = f"""
|
280 |
+
Original Document: {item['title']}
|
281 |
+
Chunk: {i + 1} of {len(chunks)}
|
282 |
+
Position: {chunk_position}
|
283 |
+
Header: {chunk_metadata.get('header_text', 'N/A')}
|
284 |
+
|
285 |
+
--- Chunk Content ---
|
286 |
+
"""
|
287 |
+
|
288 |
+
full_chunk_text = chunk_header + chunk_text
|
289 |
+
chunk_id = f"doc_{item_id}_chunk_{i}"
|
290 |
+
embedding = create_embedding(full_chunk_text, provider, model, api_url)
|
291 |
+
metadata = {
|
292 |
+
"media_id": str(item_id),
|
293 |
+
"chunk_index": i,
|
294 |
+
"total_chunks": len(chunks),
|
295 |
+
"chunking_method": method,
|
296 |
+
"max_chunk_size": max_size,
|
297 |
+
"chunk_overlap": overlap,
|
298 |
+
"adaptive_chunking": adaptive,
|
299 |
+
"embedding_model": model,
|
300 |
+
"embedding_provider": provider,
|
301 |
+
**chunk_metadata
|
302 |
+
}
|
303 |
+
store_in_chroma(collection_name, [full_chunk_text], [embedding], [chunk_id], [metadata])
|
304 |
+
|
305 |
+
embedding_preview = str(embedding[:50])
|
306 |
+
status = f"New embeddings created and stored for item: {item['title']} (ID: {item_id})"
|
307 |
+
return status, f"First 50 elements of new embedding:\n{embedding_preview}", json.dumps(metadata, indent=2)
|
308 |
+
except Exception as e:
|
309 |
+
logging.error(f"Error in create_new_embedding_for_item: {str(e)}")
|
310 |
+
return f"Error creating embedding: {str(e)}", "", ""
|
311 |
+
|
312 |
+
refresh_button.click(
|
313 |
+
get_items_with_embedding_status,
|
314 |
+
outputs=[item_dropdown, item_mapping]
|
315 |
+
)
|
316 |
+
item_dropdown.change(
|
317 |
+
check_embedding_status,
|
318 |
+
inputs=[item_dropdown, item_mapping],
|
319 |
+
outputs=[embedding_status, embedding_preview, embedding_metadata]
|
320 |
+
)
|
321 |
+
create_new_embedding_button.click(
|
322 |
+
create_new_embedding_for_item,
|
323 |
+
inputs=[item_dropdown, embedding_provider, embedding_model, embedding_api_url,
|
324 |
+
chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking, item_mapping],
|
325 |
+
outputs=[embedding_status, embedding_preview, embedding_metadata]
|
326 |
+
)
|
327 |
+
embedding_provider.change(
|
328 |
+
update_provider_options,
|
329 |
+
inputs=[embedding_provider],
|
330 |
+
outputs=[embedding_api_url]
|
331 |
+
)
|
332 |
+
|
333 |
+
return item_dropdown, refresh_button, embedding_status, embedding_preview, embedding_metadata, create_new_embedding_button, embedding_provider, embedding_model, embedding_api_url, chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking
|
334 |
+
|
335 |
+
|
336 |
+
def create_purge_embeddings_tab():
|
337 |
+
with gr.TabItem("Purge Embeddings"):
|
338 |
+
gr.Markdown("# Purge Embeddings")
|
339 |
+
|
340 |
+
with gr.Row():
|
341 |
+
with gr.Column():
|
342 |
+
purge_button = gr.Button("Purge All Embeddings")
|
343 |
+
with gr.Column():
|
344 |
+
status_output = gr.Textbox(label="Status", lines=10)
|
345 |
+
|
346 |
+
def purge_all_embeddings():
|
347 |
+
try:
|
348 |
+
collection_name = "all_content_embeddings"
|
349 |
+
chroma_client.delete_collection(collection_name)
|
350 |
+
chroma_client.create_collection(collection_name)
|
351 |
+
return "All embeddings have been purged successfully."
|
352 |
+
except Exception as e:
|
353 |
+
logging.error(f"Error during embedding purge: {str(e)}")
|
354 |
+
return f"Error: {str(e)}"
|
355 |
+
|
356 |
+
purge_button.click(
|
357 |
+
fn=purge_all_embeddings,
|
358 |
+
outputs=status_output
|
359 |
+
)
|
360 |
+
|
361 |
+
|
362 |
+
|
363 |
+
#
|
364 |
+
# End of file
|
365 |
+
########################################################################################################################
|
App_Function_Libraries/Gradio_UI/Live_Recording.py
CHANGED
@@ -1,125 +1,123 @@
|
|
1 |
-
# Live_Recording.py
|
2 |
-
# Description: Gradio UI for live audio recording and transcription.
|
3 |
-
#
|
4 |
-
# Import necessary modules and functions
|
5 |
-
import logging
|
6 |
-
import os
|
7 |
-
# External Imports
|
8 |
-
import gradio as gr
|
9 |
-
# Local Imports
|
10 |
-
|
11 |
-
|
12 |
-
from App_Function_Libraries.DB.DB_Manager import add_media_to_database
|
13 |
-
#
|
14 |
-
#######################################################################################################################
|
15 |
-
#
|
16 |
-
# Functions:
|
17 |
-
|
18 |
-
whisper_models = ["small", "medium", "small.en", "medium.en", "medium", "large", "large-v1", "large-v2", "large-v3",
|
19 |
-
"distil-large-v2", "distil-medium.en", "distil-small.en"]
|
20 |
-
|
21 |
-
def create_live_recording_tab():
|
22 |
-
with gr.Tab("Live Recording and Transcription"):
|
23 |
-
gr.Markdown("# Live Audio Recording and Transcription")
|
24 |
-
with gr.Row():
|
25 |
-
with gr.Column():
|
26 |
-
duration = gr.Slider(minimum=1, maximum=8000, value=15, label="Recording Duration (seconds)")
|
27 |
-
whisper_models_input = gr.Dropdown(choices=whisper_models, value="medium", label="Whisper Model")
|
28 |
-
vad_filter = gr.Checkbox(label="Use VAD Filter")
|
29 |
-
save_recording = gr.Checkbox(label="Save Recording")
|
30 |
-
save_to_db = gr.Checkbox(label="Save Transcription to Database(Must be checked to save - can be checked afer transcription)", value=False)
|
31 |
-
custom_title = gr.Textbox(label="Custom Title (for database)", visible=False)
|
32 |
-
record_button = gr.Button("Start Recording")
|
33 |
-
stop_button = gr.Button("Stop Recording")
|
34 |
-
with gr.Column():
|
35 |
-
output = gr.Textbox(label="Transcription", lines=10)
|
36 |
-
audio_output = gr.Audio(label="Recorded Audio", visible=False)
|
37 |
-
|
38 |
-
recording_state = gr.State(value=None)
|
39 |
-
|
40 |
-
def start_recording(duration):
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
"
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
)
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
# End of Functions
|
125 |
-
########################################################################################################################
|
|
|
1 |
+
# Live_Recording.py
|
2 |
+
# Description: Gradio UI for live audio recording and transcription.
|
3 |
+
#
|
4 |
+
# Import necessary modules and functions
|
5 |
+
import logging
|
6 |
+
import os
|
7 |
+
# External Imports
|
8 |
+
import gradio as gr
|
9 |
+
# Local Imports
|
10 |
+
from App_Function_Libraries.Audio.Audio_Transcription_Lib import (record_audio, speech_to_text, save_audio_temp,
|
11 |
+
stop_recording)
|
12 |
+
from App_Function_Libraries.DB.DB_Manager import add_media_to_database
|
13 |
+
#
|
14 |
+
#######################################################################################################################
|
15 |
+
#
|
16 |
+
# Functions:
|
17 |
+
|
18 |
+
whisper_models = ["small", "medium", "small.en", "medium.en", "medium", "large", "large-v1", "large-v2", "large-v3",
|
19 |
+
"distil-large-v2", "distil-medium.en", "distil-small.en"]
|
20 |
+
|
21 |
+
def create_live_recording_tab():
|
22 |
+
with gr.Tab("Live Recording and Transcription"):
|
23 |
+
gr.Markdown("# Live Audio Recording and Transcription")
|
24 |
+
with gr.Row():
|
25 |
+
with gr.Column():
|
26 |
+
duration = gr.Slider(minimum=1, maximum=8000, value=15, label="Recording Duration (seconds)")
|
27 |
+
whisper_models_input = gr.Dropdown(choices=whisper_models, value="medium", label="Whisper Model")
|
28 |
+
vad_filter = gr.Checkbox(label="Use VAD Filter")
|
29 |
+
save_recording = gr.Checkbox(label="Save Recording")
|
30 |
+
save_to_db = gr.Checkbox(label="Save Transcription to Database(Must be checked to save - can be checked afer transcription)", value=False)
|
31 |
+
custom_title = gr.Textbox(label="Custom Title (for database)", visible=False)
|
32 |
+
record_button = gr.Button("Start Recording")
|
33 |
+
stop_button = gr.Button("Stop Recording")
|
34 |
+
with gr.Column():
|
35 |
+
output = gr.Textbox(label="Transcription", lines=10)
|
36 |
+
audio_output = gr.Audio(label="Recorded Audio", visible=False)
|
37 |
+
|
38 |
+
recording_state = gr.State(value=None)
|
39 |
+
|
40 |
+
def start_recording(duration):
|
41 |
+
p, stream, audio_queue, stop_event, audio_thread = record_audio(duration)
|
42 |
+
return (p, stream, audio_queue, stop_event, audio_thread)
|
43 |
+
|
44 |
+
def end_recording_and_transcribe(recording_state, whisper_model, vad_filter, save_recording, save_to_db, custom_title):
|
45 |
+
if recording_state is None:
|
46 |
+
return "Recording hasn't started yet.", None
|
47 |
+
|
48 |
+
p, stream, audio_queue, stop_event, audio_thread = recording_state
|
49 |
+
audio_data = stop_recording(p, stream, audio_queue, stop_event, audio_thread)
|
50 |
+
|
51 |
+
temp_file = save_audio_temp(audio_data)
|
52 |
+
segments = speech_to_text(temp_file, whisper_model=whisper_model, vad_filter=vad_filter)
|
53 |
+
transcription = "\n".join([segment["Text"] for segment in segments])
|
54 |
+
|
55 |
+
if save_recording:
|
56 |
+
return transcription, temp_file
|
57 |
+
else:
|
58 |
+
os.remove(temp_file)
|
59 |
+
return transcription, None
|
60 |
+
|
61 |
+
def save_transcription_to_db(transcription, custom_title):
|
62 |
+
if custom_title.strip() == "":
|
63 |
+
custom_title = "Self-recorded Audio"
|
64 |
+
|
65 |
+
try:
|
66 |
+
url = "self_recorded"
|
67 |
+
info_dict = {
|
68 |
+
"title": custom_title,
|
69 |
+
"uploader": "self-recorded",
|
70 |
+
"webpage_url": url
|
71 |
+
}
|
72 |
+
segments = [{"Text": transcription}]
|
73 |
+
summary = ""
|
74 |
+
keywords = ["self-recorded", "audio"]
|
75 |
+
custom_prompt_input = ""
|
76 |
+
whisper_model = "self-recorded"
|
77 |
+
media_type = "audio"
|
78 |
+
|
79 |
+
result = add_media_to_database(
|
80 |
+
url=url,
|
81 |
+
info_dict=info_dict,
|
82 |
+
segments=segments,
|
83 |
+
summary=summary,
|
84 |
+
keywords=keywords,
|
85 |
+
custom_prompt_input=custom_prompt_input,
|
86 |
+
whisper_model=whisper_model,
|
87 |
+
media_type=media_type
|
88 |
+
)
|
89 |
+
return f"Transcription saved to database successfully. {result}"
|
90 |
+
except Exception as e:
|
91 |
+
logging.error(f"Error saving transcription to database: {str(e)}")
|
92 |
+
return f"Error saving transcription to database: {str(e)}"
|
93 |
+
|
94 |
+
def update_custom_title_visibility(save_to_db):
|
95 |
+
return gr.update(visible=save_to_db)
|
96 |
+
|
97 |
+
record_button.click(
|
98 |
+
fn=start_recording,
|
99 |
+
inputs=[duration],
|
100 |
+
outputs=[recording_state]
|
101 |
+
)
|
102 |
+
|
103 |
+
stop_button.click(
|
104 |
+
fn=end_recording_and_transcribe,
|
105 |
+
inputs=[recording_state, whisper_models_input, vad_filter, save_recording, save_to_db, custom_title],
|
106 |
+
outputs=[output, audio_output]
|
107 |
+
)
|
108 |
+
|
109 |
+
save_to_db.change(
|
110 |
+
fn=update_custom_title_visibility,
|
111 |
+
inputs=[save_to_db],
|
112 |
+
outputs=[custom_title]
|
113 |
+
)
|
114 |
+
|
115 |
+
gr.Button("Save to Database").click(
|
116 |
+
fn=save_transcription_to_db,
|
117 |
+
inputs=[output, custom_title],
|
118 |
+
outputs=gr.Textbox(label="Database Save Status")
|
119 |
+
)
|
120 |
+
|
121 |
+
#
|
122 |
+
# End of Functions
|
123 |
+
########################################################################################################################
|
|
|
|
App_Function_Libraries/Gradio_UI/Llamafile_tab.py
CHANGED
@@ -9,7 +9,7 @@ import glob
|
|
9 |
import gradio as gr
|
10 |
#
|
11 |
# Local Imports
|
12 |
-
from App_Function_Libraries.Llamafile import start_llamafile
|
13 |
#
|
14 |
#######################################################################################################################
|
15 |
#
|
|
|
9 |
import gradio as gr
|
10 |
#
|
11 |
# Local Imports
|
12 |
+
from App_Function_Libraries.Local_LLM.Llamafile import start_llamafile
|
13 |
#
|
14 |
#######################################################################################################################
|
15 |
#
|
App_Function_Libraries/Gradio_UI/PDF_ingestion_tab.py
CHANGED
@@ -12,7 +12,7 @@ import gradio as gr
|
|
12 |
# Local Imports
|
13 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
14 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
15 |
-
from App_Function_Libraries.PDF_Ingestion_Lib import extract_metadata_from_pdf, extract_text_and_format_from_pdf, \
|
16 |
process_and_cleanup_pdf
|
17 |
#
|
18 |
#
|
|
|
12 |
# Local Imports
|
13 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
14 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
15 |
+
from App_Function_Libraries.PDF.PDF_Ingestion_Lib import extract_metadata_from_pdf, extract_text_and_format_from_pdf, \
|
16 |
process_and_cleanup_pdf
|
17 |
#
|
18 |
#
|
App_Function_Libraries/Gradio_UI/Podcast_tab.py
CHANGED
@@ -8,7 +8,7 @@
|
|
8 |
import gradio as gr
|
9 |
#
|
10 |
# Local Imports
|
11 |
-
from App_Function_Libraries.Audio_Files import process_podcast
|
12 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
14 |
|
|
|
8 |
import gradio as gr
|
9 |
#
|
10 |
# Local Imports
|
11 |
+
from App_Function_Libraries.Audio.Audio_Files import process_podcast
|
12 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
13 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
14 |
|
App_Function_Libraries/Gradio_UI/RAG_Chat_tab.py
CHANGED
@@ -8,10 +8,7 @@ import logging
|
|
8 |
import gradio as gr
|
9 |
#
|
10 |
# Local Imports
|
11 |
-
|
12 |
-
from App_Function_Libraries.RAG.ChromaDB_Library import chroma_client, \
|
13 |
-
check_embedding_status, store_in_chroma
|
14 |
-
from App_Function_Libraries.RAG.Embeddings_Create import create_embedding
|
15 |
from App_Function_Libraries.RAG.RAG_Libary_2 import enhanced_rag_pipeline
|
16 |
#
|
17 |
########################################################################################################################
|
@@ -71,176 +68,6 @@ def create_rag_tab():
|
|
71 |
search_button.click(perform_rag_search, inputs=[search_query, keywords_input, api_choice], outputs=[result_output, context_output])
|
72 |
|
73 |
|
74 |
-
# FIXME - under construction
|
75 |
-
def create_embeddings_tab():
|
76 |
-
with gr.TabItem("Create Embeddings"):
|
77 |
-
gr.Markdown("# Create Embeddings for All Content")
|
78 |
-
|
79 |
-
with gr.Row():
|
80 |
-
with gr.Column():
|
81 |
-
embedding_provider = gr.Radio(
|
82 |
-
choices=["openai", "local", "huggingface"],
|
83 |
-
label="Select Embedding Provider",
|
84 |
-
value="openai"
|
85 |
-
)
|
86 |
-
embedding_model = gr.Textbox(
|
87 |
-
label="Embedding Model",
|
88 |
-
value="text-embedding-3-small"
|
89 |
-
)
|
90 |
-
embedding_api_url = gr.Textbox(
|
91 |
-
label="API URL (for local provider)",
|
92 |
-
value="http://localhost:8080/embedding",
|
93 |
-
visible=False
|
94 |
-
)
|
95 |
-
create_button = gr.Button("Create Embeddings")
|
96 |
-
|
97 |
-
with gr.Column():
|
98 |
-
status_output = gr.Textbox(label="Status", lines=10)
|
99 |
-
|
100 |
-
def update_provider_options(provider):
|
101 |
-
return gr.update(visible=provider == "local")
|
102 |
-
|
103 |
-
embedding_provider.change(
|
104 |
-
fn=update_provider_options,
|
105 |
-
inputs=[embedding_provider],
|
106 |
-
outputs=[embedding_api_url]
|
107 |
-
)
|
108 |
-
|
109 |
-
def create_all_embeddings(provider, model, api_url):
|
110 |
-
try:
|
111 |
-
all_content = get_all_content_from_database()
|
112 |
-
if not all_content:
|
113 |
-
return "No content found in the database."
|
114 |
-
|
115 |
-
collection_name = "all_content_embeddings"
|
116 |
-
collection = chroma_client.get_or_create_collection(name=collection_name)
|
117 |
-
|
118 |
-
for item in all_content:
|
119 |
-
media_id = item['id']
|
120 |
-
text = item['content']
|
121 |
-
|
122 |
-
existing = collection.get(ids=[f"doc_{media_id}"])
|
123 |
-
if existing['ids']:
|
124 |
-
continue
|
125 |
-
|
126 |
-
embedding = create_embedding(text, provider, model, api_url)
|
127 |
-
store_in_chroma(collection_name, [text], [embedding], [f"doc_{media_id}"], [{"media_id": media_id}])
|
128 |
-
|
129 |
-
return "Embeddings created and stored successfully for all new content."
|
130 |
-
except Exception as e:
|
131 |
-
logging.error(f"Error during embedding creation: {str(e)}")
|
132 |
-
return f"Error: {str(e)}"
|
133 |
-
|
134 |
-
create_button.click(
|
135 |
-
fn=create_all_embeddings,
|
136 |
-
inputs=[embedding_provider, embedding_model, embedding_api_url],
|
137 |
-
outputs=status_output
|
138 |
-
)
|
139 |
-
|
140 |
-
|
141 |
-
def create_view_embeddings_tab():
|
142 |
-
with gr.TabItem("View/Update Embeddings"):
|
143 |
-
gr.Markdown("# View and Update Embeddings")
|
144 |
-
item_mapping = gr.State({})
|
145 |
-
with gr.Row():
|
146 |
-
with gr.Column():
|
147 |
-
item_dropdown = gr.Dropdown(label="Select Item", choices=[], interactive=True)
|
148 |
-
refresh_button = gr.Button("Refresh Item List")
|
149 |
-
embedding_status = gr.Textbox(label="Embedding Status", interactive=False)
|
150 |
-
embedding_preview = gr.Textbox(label="Embedding Preview", interactive=False, lines=5)
|
151 |
-
|
152 |
-
with gr.Column():
|
153 |
-
create_new_embedding_button = gr.Button("Create New Embedding")
|
154 |
-
embedding_provider = gr.Radio(
|
155 |
-
choices=["openai", "local", "huggingface"],
|
156 |
-
label="Embedding Provider",
|
157 |
-
value="openai"
|
158 |
-
)
|
159 |
-
embedding_model = gr.Textbox(
|
160 |
-
label="Embedding Model",
|
161 |
-
value="text-embedding-3-small",
|
162 |
-
visible=True
|
163 |
-
)
|
164 |
-
embedding_api_url = gr.Textbox(
|
165 |
-
label="API URL (for local provider)",
|
166 |
-
value="http://localhost:8080/embedding",
|
167 |
-
visible=False
|
168 |
-
)
|
169 |
-
|
170 |
-
def get_items_with_embedding_status():
|
171 |
-
try:
|
172 |
-
items = get_all_content_from_database()
|
173 |
-
collection = chroma_client.get_or_create_collection(name="all_content_embeddings")
|
174 |
-
choices = []
|
175 |
-
new_item_mapping = {}
|
176 |
-
for item in items:
|
177 |
-
try:
|
178 |
-
result = collection.get(ids=[f"doc_{item['id']}"])
|
179 |
-
embedding_exists = result is not None and result.get('ids') and len(result['ids']) > 0
|
180 |
-
status = "Embedding exists" if embedding_exists else "No embedding"
|
181 |
-
except Exception as e:
|
182 |
-
print(f"Error checking embedding for item {item['id']}: {str(e)}")
|
183 |
-
status = "Error checking"
|
184 |
-
choice = f"{item['title']} ({status})"
|
185 |
-
choices.append(choice)
|
186 |
-
new_item_mapping[choice] = item['id']
|
187 |
-
return gr.update(choices=choices), new_item_mapping
|
188 |
-
except Exception as e:
|
189 |
-
print(f"Error in get_items_with_embedding_status: {str(e)}")
|
190 |
-
return gr.update(choices=["Error: Unable to fetch items"]), {}
|
191 |
-
|
192 |
-
def update_provider_options(provider):
|
193 |
-
return gr.update(visible=provider == "local")
|
194 |
-
|
195 |
-
def create_new_embedding_for_item(selected_item, provider, model, api_url, item_mapping):
|
196 |
-
if not selected_item:
|
197 |
-
return "Please select an item", ""
|
198 |
-
|
199 |
-
try:
|
200 |
-
item_id = item_mapping.get(selected_item)
|
201 |
-
if item_id is None:
|
202 |
-
return f"Invalid item selected: {selected_item}", ""
|
203 |
-
|
204 |
-
items = get_all_content_from_database()
|
205 |
-
item = next((item for item in items if item['id'] == item_id), None)
|
206 |
-
if not item:
|
207 |
-
return f"Item not found: {item_id}", ""
|
208 |
-
|
209 |
-
embedding = create_embedding(item['content'], provider, model, api_url)
|
210 |
-
|
211 |
-
collection_name = "all_content_embeddings"
|
212 |
-
metadata = {"media_id": item_id, "title": item['title']}
|
213 |
-
store_in_chroma(collection_name, [item['content']], [embedding], [f"doc_{item_id}"],
|
214 |
-
[{"media_id": item_id, "title": item['title']}])
|
215 |
-
|
216 |
-
embedding_preview = str(embedding[:50])
|
217 |
-
status = f"New embedding created and stored for item: {item['title']} (ID: {item_id})"
|
218 |
-
return status, f"First 50 elements of new embedding:\n{embedding_preview}\n\nMetadata: {metadata}"
|
219 |
-
except Exception as e:
|
220 |
-
logging.error(f"Error in create_new_embedding_for_item: {str(e)}")
|
221 |
-
return f"Error creating embedding: {str(e)}", ""
|
222 |
-
|
223 |
-
refresh_button.click(
|
224 |
-
get_items_with_embedding_status,
|
225 |
-
outputs=[item_dropdown, item_mapping]
|
226 |
-
)
|
227 |
-
item_dropdown.change(
|
228 |
-
check_embedding_status,
|
229 |
-
inputs=[item_dropdown, item_mapping],
|
230 |
-
outputs=[embedding_status, embedding_preview]
|
231 |
-
)
|
232 |
-
create_new_embedding_button.click(
|
233 |
-
create_new_embedding_for_item,
|
234 |
-
inputs=[item_dropdown, embedding_provider, embedding_model, embedding_api_url, item_mapping],
|
235 |
-
outputs=[embedding_status, embedding_preview]
|
236 |
-
)
|
237 |
-
embedding_provider.change(
|
238 |
-
update_provider_options,
|
239 |
-
inputs=[embedding_provider],
|
240 |
-
outputs=[embedding_api_url]
|
241 |
-
)
|
242 |
-
|
243 |
-
return item_dropdown, refresh_button, embedding_status, embedding_preview, create_new_embedding_button, embedding_provider, embedding_model, embedding_api_url
|
244 |
|
245 |
#
|
246 |
# End of file
|
|
|
8 |
import gradio as gr
|
9 |
#
|
10 |
# Local Imports
|
11 |
+
|
|
|
|
|
|
|
12 |
from App_Function_Libraries.RAG.RAG_Libary_2 import enhanced_rag_pipeline
|
13 |
#
|
14 |
########################################################################################################################
|
|
|
68 |
search_button.click(perform_rag_search, inputs=[search_query, keywords_input, api_choice], outputs=[result_output, context_output])
|
69 |
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
#
|
73 |
# End of file
|
App_Function_Libraries/Gradio_UI/Utilities.py
CHANGED
@@ -40,7 +40,7 @@ def create_utilities_yt_audio_tab():
|
|
40 |
output_file_audio = gr.File(label="Download Audio")
|
41 |
output_message_audio = gr.Textbox(label="Status")
|
42 |
|
43 |
-
from App_Function_Libraries.Audio_Files import download_youtube_audio
|
44 |
download_button_audio.click(
|
45 |
fn=download_youtube_audio,
|
46 |
inputs=youtube_url_input_audio,
|
|
|
40 |
output_file_audio = gr.File(label="Download Audio")
|
41 |
output_message_audio = gr.Textbox(label="Status")
|
42 |
|
43 |
+
from App_Function_Libraries.Audio.Audio_Files import download_youtube_audio
|
44 |
download_button_audio.click(
|
45 |
fn=download_youtube_audio,
|
46 |
inputs=youtube_url_input_audio,
|
App_Function_Libraries/Gradio_UI/Video_transcription_tab.py
CHANGED
@@ -14,7 +14,7 @@ import yt_dlp
|
|
14 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts, add_media_to_database
|
15 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
16 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import error_handler
|
17 |
-
from App_Function_Libraries.Summarization_General_Lib import perform_transcription, perform_summarization, \
|
18 |
save_transcription_and_summary
|
19 |
from App_Function_Libraries.Utils.Utils import convert_to_seconds, safe_read_file, format_transcription, \
|
20 |
create_download_directory, generate_unique_identifier, extract_text_from_segments
|
@@ -156,9 +156,9 @@ def create_video_transcription_tab():
|
|
156 |
with gr.Column():
|
157 |
chunk_method = gr.Dropdown(choices=['words', 'sentences', 'paragraphs', 'tokens'],
|
158 |
label="Chunking Method")
|
159 |
-
max_chunk_size = gr.Slider(minimum=100, maximum=
|
160 |
label="Max Chunk Size")
|
161 |
-
chunk_overlap = gr.Slider(minimum=0, maximum=
|
162 |
use_adaptive_chunking = gr.Checkbox(
|
163 |
label="Use Adaptive Chunking (Adjust chunking based on text complexity)")
|
164 |
use_multi_level_chunking = gr.Checkbox(label="Use Multi-level Chunking")
|
@@ -492,8 +492,8 @@ def create_video_transcription_tab():
|
|
492 |
# FIXME - remove dead args for process_url_with_metadata
|
493 |
@error_handler
|
494 |
def process_url_with_metadata(input_item, num_speakers, whisper_model, custom_prompt, offset, api_name,
|
495 |
-
api_key,
|
496 |
-
|
497 |
detail_level, question_box, keywords, local_file_path, diarize, end_time=None,
|
498 |
include_timestamps=True, metadata=None, use_chunking=False,
|
499 |
chunk_options=None, keep_original_video=False, current_whisper_model="Blank"):
|
@@ -562,10 +562,11 @@ def create_video_transcription_tab():
|
|
562 |
# Download video/audio
|
563 |
logging.info("Downloading video/audio...")
|
564 |
video_file_path = download_video(input_item, download_path, full_info, download_video_flag,
|
565 |
-
current_whisper_model=
|
566 |
-
if
|
567 |
-
logging.
|
568 |
-
|
|
|
569 |
|
570 |
logging.info(f"Processing file: {video_file_path}")
|
571 |
|
|
|
14 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts, add_media_to_database
|
15 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import whisper_models, update_user_prompt
|
16 |
from App_Function_Libraries.Gradio_UI.Gradio_Shared import error_handler
|
17 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import perform_transcription, perform_summarization, \
|
18 |
save_transcription_and_summary
|
19 |
from App_Function_Libraries.Utils.Utils import convert_to_seconds, safe_read_file, format_transcription, \
|
20 |
create_download_directory, generate_unique_identifier, extract_text_from_segments
|
|
|
156 |
with gr.Column():
|
157 |
chunk_method = gr.Dropdown(choices=['words', 'sentences', 'paragraphs', 'tokens'],
|
158 |
label="Chunking Method")
|
159 |
+
max_chunk_size = gr.Slider(minimum=100, maximum=8000, value=400, step=1,
|
160 |
label="Max Chunk Size")
|
161 |
+
chunk_overlap = gr.Slider(minimum=0, maximum=5000, value=100, step=1, label="Chunk Overlap")
|
162 |
use_adaptive_chunking = gr.Checkbox(
|
163 |
label="Use Adaptive Chunking (Adjust chunking based on text complexity)")
|
164 |
use_multi_level_chunking = gr.Checkbox(label="Use Multi-level Chunking")
|
|
|
492 |
# FIXME - remove dead args for process_url_with_metadata
|
493 |
@error_handler
|
494 |
def process_url_with_metadata(input_item, num_speakers, whisper_model, custom_prompt, offset, api_name,
|
495 |
+
api_key, vad_filter, download_video_flag, download_audio,
|
496 |
+
rolling_summarization,
|
497 |
detail_level, question_box, keywords, local_file_path, diarize, end_time=None,
|
498 |
include_timestamps=True, metadata=None, use_chunking=False,
|
499 |
chunk_options=None, keep_original_video=False, current_whisper_model="Blank"):
|
|
|
562 |
# Download video/audio
|
563 |
logging.info("Downloading video/audio...")
|
564 |
video_file_path = download_video(input_item, download_path, full_info, download_video_flag,
|
565 |
+
current_whisper_model=current_whisper_model)
|
566 |
+
if video_file_path is None:
|
567 |
+
logging.info(
|
568 |
+
f"Download skipped for {input_item}. Media might already exist or be processed.")
|
569 |
+
return input_item, None, None, None, None, info_dict
|
570 |
|
571 |
logging.info(f"Processing file: {video_file_path}")
|
572 |
|
App_Function_Libraries/Gradio_UI/Website_scraping_tab.py
CHANGED
@@ -19,11 +19,11 @@ from playwright.sync_api import sync_playwright
|
|
19 |
|
20 |
#
|
21 |
# Local Imports
|
22 |
-
from App_Function_Libraries.Article_Extractor_Lib import scrape_from_sitemap, scrape_by_url_level, scrape_article
|
23 |
-
from App_Function_Libraries.Article_Summarization_Lib import scrape_and_summarize_multiple
|
24 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
25 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
26 |
-
from App_Function_Libraries.Summarization_General_Lib import summarize
|
27 |
|
28 |
|
29 |
#
|
@@ -423,12 +423,12 @@ def create_website_scraping_tab():
|
|
423 |
result = await scrape_and_summarize_multiple(url_input, custom_prompt, api_name, api_key, keywords,
|
424 |
custom_titles, system_prompt)
|
425 |
elif scrape_method == "Sitemap":
|
426 |
-
result = scrape_from_sitemap
|
427 |
elif scrape_method == "URL Level":
|
428 |
if url_level is None:
|
429 |
return convert_json_to_markdown(
|
430 |
json.dumps({"error": "URL level is required for URL Level scraping."}))
|
431 |
-
result = scrape_by_url_level
|
432 |
elif scrape_method == "Recursive Scraping":
|
433 |
result = await recursive_scrape(url_input, max_pages, max_depth, progress.update, delay=1.0)
|
434 |
else:
|
@@ -437,16 +437,30 @@ def create_website_scraping_tab():
|
|
437 |
# Ensure result is always a list of dictionaries
|
438 |
if isinstance(result, dict):
|
439 |
result = [result]
|
440 |
-
elif
|
441 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
|
443 |
if summarize_checkbox:
|
444 |
total_articles = len(result)
|
445 |
for i, article in enumerate(result):
|
446 |
progress.update(f"Summarizing article {i + 1}/{total_articles}")
|
447 |
-
|
448 |
-
|
449 |
-
|
|
|
|
|
|
|
|
|
450 |
|
451 |
# Concatenate all content
|
452 |
all_content = "\n\n".join(
|
|
|
19 |
|
20 |
#
|
21 |
# Local Imports
|
22 |
+
from App_Function_Libraries.Web_Scraping.Article_Extractor_Lib import scrape_from_sitemap, scrape_by_url_level, scrape_article
|
23 |
+
from App_Function_Libraries.Web_Scraping.Article_Summarization_Lib import scrape_and_summarize_multiple
|
24 |
from App_Function_Libraries.DB.DB_Manager import load_preset_prompts
|
25 |
from App_Function_Libraries.Gradio_UI.Chat_ui import update_user_prompt
|
26 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize
|
27 |
|
28 |
|
29 |
#
|
|
|
423 |
result = await scrape_and_summarize_multiple(url_input, custom_prompt, api_name, api_key, keywords,
|
424 |
custom_titles, system_prompt)
|
425 |
elif scrape_method == "Sitemap":
|
426 |
+
result = await asyncio.to_thread(scrape_from_sitemap, url_input)
|
427 |
elif scrape_method == "URL Level":
|
428 |
if url_level is None:
|
429 |
return convert_json_to_markdown(
|
430 |
json.dumps({"error": "URL level is required for URL Level scraping."}))
|
431 |
+
result = await asyncio.to_thread(scrape_by_url_level, url_input, url_level)
|
432 |
elif scrape_method == "Recursive Scraping":
|
433 |
result = await recursive_scrape(url_input, max_pages, max_depth, progress.update, delay=1.0)
|
434 |
else:
|
|
|
437 |
# Ensure result is always a list of dictionaries
|
438 |
if isinstance(result, dict):
|
439 |
result = [result]
|
440 |
+
elif isinstance(result, list):
|
441 |
+
if all(isinstance(item, str) for item in result):
|
442 |
+
# Convert list of strings to list of dictionaries
|
443 |
+
result = [{"content": item} for item in result]
|
444 |
+
elif not all(isinstance(item, dict) for item in result):
|
445 |
+
raise ValueError("Not all items in result are dictionaries or strings")
|
446 |
+
else:
|
447 |
+
raise ValueError(f"Unexpected result type: {type(result)}")
|
448 |
+
|
449 |
+
# Ensure all items in result are dictionaries
|
450 |
+
if not all(isinstance(item, dict) for item in result):
|
451 |
+
raise ValueError("Not all items in result are dictionaries")
|
452 |
|
453 |
if summarize_checkbox:
|
454 |
total_articles = len(result)
|
455 |
for i, article in enumerate(result):
|
456 |
progress.update(f"Summarizing article {i + 1}/{total_articles}")
|
457 |
+
content = article.get('content', '')
|
458 |
+
if content:
|
459 |
+
summary = await asyncio.to_thread(summarize, content, custom_prompt, api_name, api_key,
|
460 |
+
temperature, system_prompt)
|
461 |
+
article['summary'] = summary
|
462 |
+
else:
|
463 |
+
article['summary'] = "No content available to summarize."
|
464 |
|
465 |
# Concatenate all content
|
466 |
all_content = "\n\n".join(
|
App_Function_Libraries/Gradio_UI/Writing_tab.py
CHANGED
@@ -2,20 +2,13 @@
|
|
2 |
# Description: This file contains the functions that are used for writing in the Gradio UI.
|
3 |
#
|
4 |
# Imports
|
5 |
-
import base64
|
6 |
-
from datetime import datetime as datetime
|
7 |
-
import logging
|
8 |
-
import json
|
9 |
-
import os
|
10 |
#
|
11 |
# External Imports
|
12 |
import gradio as gr
|
13 |
-
from PIL import Image
|
14 |
import textstat
|
15 |
#
|
16 |
# Local Imports
|
17 |
-
from App_Function_Libraries.Summarization_General_Lib import perform_summarization
|
18 |
-
from App_Function_Libraries.Chat import chat
|
19 |
#
|
20 |
########################################################################################################################
|
21 |
#
|
@@ -375,323 +368,6 @@ def create_creative_writing_tab():
|
|
375 |
gr.Markdown("# Utility to be added...")
|
376 |
|
377 |
|
378 |
-
def chat_with_character(user_message, history, char_data, api_name_input, api_key):
|
379 |
-
if char_data is None:
|
380 |
-
return history, "Please import a character card first."
|
381 |
-
|
382 |
-
bot_message = generate_writing_feedback(user_message, char_data['name'], "Overall", api_name_input,
|
383 |
-
api_key)
|
384 |
-
history.append((user_message, bot_message))
|
385 |
-
return history, ""
|
386 |
-
|
387 |
-
def import_character_card(file):
|
388 |
-
if file is None:
|
389 |
-
logging.warning("No file provided for character card import")
|
390 |
-
return None
|
391 |
-
try:
|
392 |
-
if file.name.lower().endswith(('.png', '.webp')):
|
393 |
-
logging.info(f"Attempting to import character card from image: {file.name}")
|
394 |
-
json_data = extract_json_from_image(file)
|
395 |
-
if json_data:
|
396 |
-
logging.info("JSON data extracted from image, attempting to parse")
|
397 |
-
return import_character_card_json(json_data)
|
398 |
-
else:
|
399 |
-
logging.warning("No JSON data found in the image")
|
400 |
-
else:
|
401 |
-
logging.info(f"Attempting to import character card from JSON file: {file.name}")
|
402 |
-
content = file.read().decode('utf-8')
|
403 |
-
return import_character_card_json(content)
|
404 |
-
except Exception as e:
|
405 |
-
logging.error(f"Error importing character card: {e}")
|
406 |
-
return None
|
407 |
-
|
408 |
-
|
409 |
-
def import_character_card_json(json_content):
|
410 |
-
try:
|
411 |
-
# Remove any leading/trailing whitespace
|
412 |
-
json_content = json_content.strip()
|
413 |
-
|
414 |
-
# Log the first 100 characters of the content
|
415 |
-
logging.debug(f"JSON content (first 100 chars): {json_content[:100]}...")
|
416 |
-
|
417 |
-
card_data = json.loads(json_content)
|
418 |
-
logging.debug(f"Parsed JSON data keys: {list(card_data.keys())}")
|
419 |
-
if 'spec' in card_data and card_data['spec'] == 'chara_card_v2':
|
420 |
-
logging.info("Detected V2 character card")
|
421 |
-
return card_data['data']
|
422 |
-
else:
|
423 |
-
logging.info("Assuming V1 character card")
|
424 |
-
return card_data
|
425 |
-
except json.JSONDecodeError as e:
|
426 |
-
logging.error(f"JSON decode error: {e}")
|
427 |
-
logging.error(f"Problematic JSON content: {json_content[:500]}...")
|
428 |
-
except Exception as e:
|
429 |
-
logging.error(f"Unexpected error parsing JSON: {e}")
|
430 |
-
return None
|
431 |
-
|
432 |
-
|
433 |
-
def extract_json_from_image(image_file):
|
434 |
-
logging.debug(f"Attempting to extract JSON from image: {image_file.name}")
|
435 |
-
try:
|
436 |
-
with Image.open(image_file) as img:
|
437 |
-
logging.debug("Image opened successfully")
|
438 |
-
metadata = img.info
|
439 |
-
if 'chara' in metadata:
|
440 |
-
logging.debug("Found 'chara' in image metadata")
|
441 |
-
chara_content = metadata['chara']
|
442 |
-
logging.debug(f"Content of 'chara' metadata (first 100 chars): {chara_content[:100]}...")
|
443 |
-
try:
|
444 |
-
decoded_content = base64.b64decode(chara_content).decode('utf-8')
|
445 |
-
logging.debug(f"Decoded content (first 100 chars): {decoded_content[:100]}...")
|
446 |
-
return decoded_content
|
447 |
-
except Exception as e:
|
448 |
-
logging.error(f"Error decoding base64 content: {e}")
|
449 |
-
|
450 |
-
logging.debug("'chara' not found in metadata, checking for base64 encoded data")
|
451 |
-
raw_data = img.tobytes()
|
452 |
-
possible_json = raw_data.split(b'{', 1)[-1].rsplit(b'}', 1)[0]
|
453 |
-
if possible_json:
|
454 |
-
try:
|
455 |
-
decoded = base64.b64decode(possible_json).decode('utf-8')
|
456 |
-
if decoded.startswith('{') and decoded.endswith('}'):
|
457 |
-
logging.debug("Found and decoded base64 JSON data")
|
458 |
-
return '{' + decoded + '}'
|
459 |
-
except Exception as e:
|
460 |
-
logging.error(f"Error decoding base64 data: {e}")
|
461 |
-
|
462 |
-
logging.warning("No JSON data found in the image")
|
463 |
-
except Exception as e:
|
464 |
-
logging.error(f"Error extracting JSON from image: {e}")
|
465 |
-
return None
|
466 |
-
|
467 |
-
def load_chat_history(file):
|
468 |
-
try:
|
469 |
-
content = file.read().decode('utf-8')
|
470 |
-
chat_data = json.loads(content)
|
471 |
-
return chat_data['history'], chat_data['character']
|
472 |
-
except Exception as e:
|
473 |
-
logging.error(f"Error loading chat history: {e}")
|
474 |
-
return None, None
|
475 |
-
|
476 |
-
|
477 |
-
# FIXME This should be in the chat tab....
|
478 |
-
def create_character_card_interaction_tab():
|
479 |
-
with gr.TabItem("Chat with a Character Card"):
|
480 |
-
gr.Markdown("# Chat with a Character Card")
|
481 |
-
with gr.Row():
|
482 |
-
with gr.Column(scale=1):
|
483 |
-
character_card_upload = gr.File(label="Upload Character Card")
|
484 |
-
import_card_button = gr.Button("Import Character Card")
|
485 |
-
load_characters_button = gr.Button("Load Existing Characters")
|
486 |
-
from App_Function_Libraries.Chat import get_character_names
|
487 |
-
character_dropdown = gr.Dropdown(label="Select Character", choices=get_character_names())
|
488 |
-
api_name_input = gr.Dropdown(
|
489 |
-
choices=[None, "Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral",
|
490 |
-
"OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace", "Custom-OpenAI-API"],
|
491 |
-
value=None,
|
492 |
-
# FIXME - make it so the user cant' click `Send Message` without first setting an API + Chatbot
|
493 |
-
label="API for Interaction(Mandatory)"
|
494 |
-
)
|
495 |
-
api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)",
|
496 |
-
placeholder="Enter your API key here", type="password")
|
497 |
-
temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature")
|
498 |
-
import_chat_button = gr.Button("Import Chat History")
|
499 |
-
chat_file_upload = gr.File(label="Upload Chat History JSON", visible=False)
|
500 |
-
|
501 |
-
|
502 |
-
with gr.Column(scale=2):
|
503 |
-
chat_history = gr.Chatbot(label="Conversation")
|
504 |
-
user_input = gr.Textbox(label="Your message")
|
505 |
-
send_message_button = gr.Button("Send Message")
|
506 |
-
regenerate_button = gr.Button("Regenerate Last Message")
|
507 |
-
save_chat_button = gr.Button("Save This Chat")
|
508 |
-
save_status = gr.Textbox(label="Save Status", interactive=False)
|
509 |
-
|
510 |
-
character_data = gr.State(None)
|
511 |
-
|
512 |
-
def import_chat_history(file, current_history, char_data):
|
513 |
-
loaded_history, char_name = load_chat_history(file)
|
514 |
-
if loaded_history is None:
|
515 |
-
return current_history, char_data, "Failed to load chat history."
|
516 |
-
|
517 |
-
# Check if the loaded chat is for the current character
|
518 |
-
if char_data and char_data.get('name') != char_name:
|
519 |
-
return current_history, char_data, f"Warning: Loaded chat is for character '{char_name}', but current character is '{char_data.get('name')}'. Chat not imported."
|
520 |
-
|
521 |
-
# If no character is selected, try to load the character from the chat
|
522 |
-
if not char_data:
|
523 |
-
new_char_data = load_character(char_name)[0]
|
524 |
-
if new_char_data:
|
525 |
-
char_data = new_char_data
|
526 |
-
else:
|
527 |
-
return current_history, char_data, f"Warning: Character '{char_name}' not found. Please select the character manually."
|
528 |
-
|
529 |
-
return loaded_history, char_data, f"Chat history for '{char_name}' imported successfully."
|
530 |
-
|
531 |
-
def import_character(file):
|
532 |
-
card_data = import_character_card(file)
|
533 |
-
if card_data:
|
534 |
-
from App_Function_Libraries.Chat import save_character
|
535 |
-
save_character(card_data)
|
536 |
-
return card_data, gr.update(choices=get_character_names())
|
537 |
-
else:
|
538 |
-
return None, gr.update()
|
539 |
-
|
540 |
-
def load_character(name):
|
541 |
-
from App_Function_Libraries.Chat import load_characters
|
542 |
-
characters = load_characters()
|
543 |
-
char_data = characters.get(name)
|
544 |
-
if char_data:
|
545 |
-
first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
|
546 |
-
return char_data, [(None, first_message)] if first_message else []
|
547 |
-
return None, []
|
548 |
-
|
549 |
-
def character_chat_wrapper(message, history, char_data, api_endpoint, api_key, temperature):
|
550 |
-
logging.debug("Entered character_chat_wrapper")
|
551 |
-
if char_data is None:
|
552 |
-
return "Please select a character first.", history
|
553 |
-
|
554 |
-
# Prepare the character's background information
|
555 |
-
char_background = f"""
|
556 |
-
Name: {char_data.get('name', 'Unknown')}
|
557 |
-
Description: {char_data.get('description', 'N/A')}
|
558 |
-
Personality: {char_data.get('personality', 'N/A')}
|
559 |
-
Scenario: {char_data.get('scenario', 'N/A')}
|
560 |
-
"""
|
561 |
-
|
562 |
-
# Prepare the system prompt for character impersonation
|
563 |
-
system_message = f"""You are roleplaying as the character described below. Respond to the user's messages in character, maintaining the personality and background provided. Do not break character or refer to yourself as an AI.
|
564 |
-
|
565 |
-
{char_background}
|
566 |
-
|
567 |
-
Additional instructions: {char_data.get('post_history_instructions', '')}
|
568 |
-
"""
|
569 |
-
|
570 |
-
# Prepare media_content and selected_parts
|
571 |
-
media_content = {
|
572 |
-
'id': char_data.get('name'),
|
573 |
-
'title': char_data.get('name', 'Unknown Character'),
|
574 |
-
'content': char_background,
|
575 |
-
'description': char_data.get('description', ''),
|
576 |
-
'personality': char_data.get('personality', ''),
|
577 |
-
'scenario': char_data.get('scenario', '')
|
578 |
-
}
|
579 |
-
selected_parts = ['description', 'personality', 'scenario']
|
580 |
-
|
581 |
-
prompt = char_data.get('post_history_instructions', '')
|
582 |
-
|
583 |
-
# Prepare the input for the chat function
|
584 |
-
if not history:
|
585 |
-
full_message = f"{prompt}\n\n{message}" if prompt else message
|
586 |
-
else:
|
587 |
-
full_message = message
|
588 |
-
|
589 |
-
# Call the chat function
|
590 |
-
bot_message = chat(
|
591 |
-
message,
|
592 |
-
history,
|
593 |
-
media_content,
|
594 |
-
selected_parts,
|
595 |
-
api_endpoint,
|
596 |
-
api_key,
|
597 |
-
prompt,
|
598 |
-
temperature,
|
599 |
-
system_message
|
600 |
-
)
|
601 |
-
|
602 |
-
# Update history
|
603 |
-
history.append((message, bot_message))
|
604 |
-
return history
|
605 |
-
|
606 |
-
def save_chat_history(history, character_name):
|
607 |
-
# Create the Saved_Chats folder if it doesn't exist
|
608 |
-
save_directory = "Saved_Chats"
|
609 |
-
os.makedirs(save_directory, exist_ok=True)
|
610 |
-
|
611 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
612 |
-
filename = f"chat_history_{character_name}_{timestamp}.json"
|
613 |
-
filepath = os.path.join(save_directory, filename)
|
614 |
-
|
615 |
-
chat_data = {
|
616 |
-
"character": character_name,
|
617 |
-
"timestamp": timestamp,
|
618 |
-
"history": history
|
619 |
-
}
|
620 |
-
|
621 |
-
try:
|
622 |
-
with open(filepath, 'w', encoding='utf-8') as f:
|
623 |
-
json.dump(chat_data, f, ensure_ascii=False, indent=2)
|
624 |
-
return filepath
|
625 |
-
except Exception as e:
|
626 |
-
return f"Error saving chat: {str(e)}"
|
627 |
-
|
628 |
-
def save_current_chat(history, char_data):
|
629 |
-
if not char_data or not history:
|
630 |
-
return "No chat to save or character not selected."
|
631 |
-
|
632 |
-
character_name = char_data.get('name', 'Unknown')
|
633 |
-
result = save_chat_history(history, character_name)
|
634 |
-
if result.startswith("Error"):
|
635 |
-
return result
|
636 |
-
return f"Chat saved successfully as {result}"
|
637 |
-
|
638 |
-
def regenerate_last_message(history, char_data, api_name, api_key, temperature):
|
639 |
-
if not history:
|
640 |
-
return history
|
641 |
-
|
642 |
-
last_user_message = history[-1][0]
|
643 |
-
new_history = history[:-1]
|
644 |
-
|
645 |
-
return character_chat_wrapper(last_user_message, new_history, char_data, api_name, api_key, temperature)
|
646 |
-
|
647 |
-
import_chat_button.click(
|
648 |
-
fn=lambda: gr.update(visible=True),
|
649 |
-
outputs=chat_file_upload
|
650 |
-
)
|
651 |
-
|
652 |
-
chat_file_upload.change(
|
653 |
-
fn=import_chat_history,
|
654 |
-
inputs=[chat_file_upload, chat_history, character_data],
|
655 |
-
outputs=[chat_history, character_data, save_status]
|
656 |
-
)
|
657 |
-
|
658 |
-
import_card_button.click(
|
659 |
-
fn=import_character,
|
660 |
-
inputs=[character_card_upload],
|
661 |
-
outputs=[character_data, character_dropdown]
|
662 |
-
)
|
663 |
-
|
664 |
-
load_characters_button.click(
|
665 |
-
fn=lambda: gr.update(choices=get_character_names()),
|
666 |
-
outputs=character_dropdown
|
667 |
-
)
|
668 |
-
|
669 |
-
character_dropdown.change(
|
670 |
-
fn=load_character,
|
671 |
-
inputs=[character_dropdown],
|
672 |
-
outputs=[character_data, chat_history]
|
673 |
-
)
|
674 |
-
|
675 |
-
send_message_button.click(
|
676 |
-
fn=character_chat_wrapper,
|
677 |
-
inputs=[user_input, chat_history, character_data, api_name_input, api_key_input, temperature_slider],
|
678 |
-
outputs=[chat_history]
|
679 |
-
).then(lambda: "", outputs=user_input)
|
680 |
-
|
681 |
-
regenerate_button.click(
|
682 |
-
fn=regenerate_last_message,
|
683 |
-
inputs=[chat_history, character_data, api_name_input, api_key_input, temperature_slider],
|
684 |
-
outputs=[chat_history]
|
685 |
-
)
|
686 |
-
|
687 |
-
save_chat_button.click(
|
688 |
-
fn=save_current_chat,
|
689 |
-
inputs=[chat_history, character_data],
|
690 |
-
outputs=[save_status]
|
691 |
-
)
|
692 |
-
|
693 |
-
return character_data, chat_history, user_input
|
694 |
-
|
695 |
|
696 |
def create_mikupad_tab():
|
697 |
with gr.TabItem("Mikupad"):
|
|
|
2 |
# Description: This file contains the functions that are used for writing in the Gradio UI.
|
3 |
#
|
4 |
# Imports
|
|
|
|
|
|
|
|
|
|
|
5 |
#
|
6 |
# External Imports
|
7 |
import gradio as gr
|
|
|
8 |
import textstat
|
9 |
#
|
10 |
# Local Imports
|
11 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import perform_summarization
|
|
|
12 |
#
|
13 |
########################################################################################################################
|
14 |
#
|
|
|
368 |
gr.Markdown("# Utility to be added...")
|
369 |
|
370 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
|
372 |
def create_mikupad_tab():
|
373 |
with gr.TabItem("Mikupad"):
|