Spaces:
Running
Running
oceansweep
commited on
Commit
•
c7f0d1a
1
Parent(s):
91b175e
Upload 2 files
Browse files
App_Function_Libraries/LLM_API_Calls.py
CHANGED
@@ -25,15 +25,15 @@ import json
|
|
25 |
import logging
|
26 |
import os
|
27 |
import time
|
|
|
|
|
28 |
import requests
|
29 |
#
|
30 |
# Import 3rd-Party Libraries
|
31 |
-
from openai import OpenAI
|
32 |
from requests import RequestException
|
33 |
#
|
34 |
# Import Local libraries
|
35 |
-
from App_Function_Libraries.
|
36 |
-
from App_Function_Libraries.Utils import load_and_log_configs
|
37 |
#
|
38 |
#######################################################################################################################
|
39 |
# Function Definitions
|
@@ -62,21 +62,78 @@ def extract_text_from_segments(segments):
|
|
62 |
|
63 |
|
64 |
|
65 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
loaded_config_data = load_and_log_configs()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
|
|
|
|
|
|
|
|
68 |
try:
|
69 |
# API key validation
|
70 |
-
if
|
71 |
-
logging.info("OpenAI:
|
72 |
logging.info("OpenAI: Attempting to use API key from config file")
|
73 |
-
|
74 |
|
75 |
-
if
|
76 |
-
logging.error("OpenAI:
|
77 |
return "OpenAI: API Key Not Provided/Found in Config file or is empty"
|
78 |
|
79 |
-
logging.debug(f"OpenAI: Using API Key: {
|
80 |
|
81 |
# Input data handling
|
82 |
logging.debug(f"OpenAI: Raw input data type: {type(input_data)}")
|
@@ -120,7 +177,6 @@ def chat_with_openai(api_key, input_data, custom_prompt_arg, temp=None, system_m
|
|
120 |
else:
|
121 |
raise ValueError(f"OpenAI: Invalid input data format: {type(data)}")
|
122 |
|
123 |
-
openai_model = loaded_config_data['models']['openai'] or "gpt-4o"
|
124 |
logging.debug(f"OpenAI: Extracted text (first 500 chars): {text[:500]}...")
|
125 |
logging.debug(f"OpenAI: Custom prompt: {custom_prompt_arg}")
|
126 |
|
@@ -153,12 +209,14 @@ def chat_with_openai(api_key, input_data, custom_prompt_arg, temp=None, system_m
|
|
153 |
|
154 |
logging.debug("OpenAI: Posting request")
|
155 |
response = requests.post('https://api.openai.com/v1/chat/completions', headers=headers, json=data)
|
156 |
-
|
157 |
if response.status_code == 200:
|
158 |
response_data = response.json()
|
|
|
159 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
160 |
chat_response = response_data['choices'][0]['message']['content'].strip()
|
161 |
logging.debug("openai: Chat Sent successfully")
|
|
|
162 |
return chat_response
|
163 |
else:
|
164 |
logging.warning("openai: Chat response not found in the response data")
|
@@ -182,13 +240,14 @@ def chat_with_anthropic(api_key, input_data, model, custom_prompt_arg, max_retri
|
|
182 |
try:
|
183 |
loaded_config_data = load_and_log_configs()
|
184 |
global anthropic_api_key
|
|
|
185 |
# API key validation
|
186 |
-
if api_key
|
187 |
logging.info("Anthropic: API key not provided as parameter")
|
188 |
logging.info("Anthropic: Attempting to use API key from config file")
|
189 |
anthropic_api_key = loaded_config_data['api_keys']['anthropic']
|
190 |
|
191 |
-
if api_key
|
192 |
logging.error("Anthropic: API key not found or is empty")
|
193 |
return "Anthropic: API Key Not Provided/Found in Config file or is empty"
|
194 |
|
@@ -237,7 +296,7 @@ def chat_with_anthropic(api_key, input_data, model, custom_prompt_arg, max_retri
|
|
237 |
try:
|
238 |
logging.debug("anthropic: Posting request to API")
|
239 |
response = requests.post('https://api.anthropic.com/v1/messages', headers=headers, json=data)
|
240 |
-
|
241 |
# Check if the status code indicates success
|
242 |
if response.status_code == 200:
|
243 |
logging.debug("anthropic: Post submittal successful")
|
@@ -275,15 +334,16 @@ def chat_with_anthropic(api_key, input_data, model, custom_prompt_arg, max_retri
|
|
275 |
# Summarize with Cohere
|
276 |
def chat_with_cohere(api_key, input_data, model, custom_prompt_arg, system_prompt=None):
|
277 |
global cohere_api_key
|
|
|
278 |
loaded_config_data = load_and_log_configs()
|
279 |
try:
|
280 |
# API key validation
|
281 |
-
if api_key
|
282 |
logging.info("cohere: API key not provided as parameter")
|
283 |
logging.info("cohere: Attempting to use API key from config file")
|
284 |
cohere_api_key = loaded_config_data['api_keys']['cohere']
|
285 |
|
286 |
-
if api_key
|
287 |
logging.error("cohere: API key not found or is empty")
|
288 |
return "cohere: API Key Not Provided/Found in Config file or is empty"
|
289 |
|
@@ -324,7 +384,7 @@ def chat_with_cohere(api_key, input_data, model, custom_prompt_arg, system_promp
|
|
324 |
print("cohere: Submitting request to API endpoint")
|
325 |
response = requests.post('https://api.cohere.ai/v1/chat', headers=headers, json=data)
|
326 |
response_data = response.json()
|
327 |
-
logging.debug("API
|
328 |
|
329 |
if response.status_code == 200:
|
330 |
if 'text' in response_data:
|
@@ -439,9 +499,10 @@ def chat_with_groq(api_key, input_data, custom_prompt_arg, temp=None, system_mes
|
|
439 |
response = requests.post('https://api.groq.com/openai/v1/chat/completions', headers=headers, json=data)
|
440 |
|
441 |
response_data = response.json()
|
442 |
-
logging.debug("API
|
443 |
|
444 |
if response.status_code == 200:
|
|
|
445 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
446 |
summary = response_data['choices'][0]['message']['content'].strip()
|
447 |
logging.debug("groq: Chat request successful")
|
@@ -554,7 +615,7 @@ def chat_with_openrouter(api_key, input_data, custom_prompt_arg, temp=None, syst
|
|
554 |
)
|
555 |
|
556 |
response_data = response.json()
|
557 |
-
logging.debug("API Response Data: %s", response_data)
|
558 |
|
559 |
if response.status_code == 200:
|
560 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
@@ -580,11 +641,11 @@ def chat_with_huggingface(api_key, input_data, custom_prompt_arg, system_prompt=
|
|
580 |
logging.debug(f"huggingface: Summarization process starting...")
|
581 |
try:
|
582 |
# API key validation
|
583 |
-
if api_key
|
584 |
logging.info("HuggingFace: API key not provided as parameter")
|
585 |
logging.info("HuggingFace: Attempting to use API key from config file")
|
586 |
huggingface_api_key = loaded_config_data['api_keys']['openai']
|
587 |
-
if api_key
|
588 |
logging.error("HuggingFace: API key not found or is empty")
|
589 |
return "HuggingFace: API Key Not Provided/Found in Config file or is empty"
|
590 |
logging.debug(f"HuggingFace: Using API Key: {api_key[:5]}...{api_key[-5:]}")
|
@@ -611,9 +672,9 @@ def chat_with_huggingface(api_key, input_data, custom_prompt_arg, system_prompt=
|
|
611 |
logging.debug("huggingface: Submitting request...")
|
612 |
|
613 |
response = requests.post(API_URL, headers=headers, json=data)
|
614 |
-
|
615 |
if response.status_code == 200:
|
616 |
-
summary = response.json()[0]['
|
617 |
logging.debug("huggingface: Chat request successful")
|
618 |
print("Chat request successful.")
|
619 |
return summary
|
@@ -712,9 +773,10 @@ def chat_with_deepseek(api_key, input_data, custom_prompt_arg, temp=None, system
|
|
712 |
|
713 |
logging.debug("DeepSeek: Posting request")
|
714 |
response = requests.post('https://api.deepseek.com/chat/completions', headers=headers, json=data)
|
715 |
-
|
716 |
if response.status_code == 200:
|
717 |
response_data = response.json()
|
|
|
718 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
719 |
summary = response_data['choices'][0]['message']['content'].strip()
|
720 |
logging.debug("DeepSeek: Chat request successful")
|
@@ -802,9 +864,10 @@ def chat_with_mistral(api_key, input_data, custom_prompt_arg, temp=None, system_
|
|
802 |
|
803 |
logging.debug("Mistral: Posting request")
|
804 |
response = requests.post('https://api.mistral.ai/v1/chat/completions', headers=headers, json=data)
|
805 |
-
|
806 |
if response.status_code == 200:
|
807 |
response_data = response.json()
|
|
|
808 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
809 |
summary = response_data['choices'][0]['message']['content'].strip()
|
810 |
logging.debug("Mistral: request successful")
|
@@ -824,59 +887,59 @@ def chat_with_mistral(api_key, input_data, custom_prompt_arg, temp=None, system_
|
|
824 |
|
825 |
# Stashed in here since OpenAI usage.... #FIXME
|
826 |
# FIXME - https://docs.vllm.ai/en/latest/getting_started/quickstart.html .... Great docs.
|
827 |
-
def chat_with_vllm(input_data, custom_prompt_input, api_key=None, vllm_api_url="http://127.0.0.1:8000/v1/chat/completions", system_prompt=None):
|
828 |
-
|
829 |
-
|
830 |
-
|
831 |
-
|
832 |
-
|
833 |
-
|
834 |
-
|
835 |
-
|
836 |
-
|
837 |
-
|
838 |
-
|
839 |
-
|
840 |
-
|
841 |
-
|
842 |
-
|
843 |
-
|
844 |
-
|
845 |
-
|
846 |
-
|
847 |
-
|
848 |
-
|
849 |
-
|
850 |
-
|
851 |
-
|
852 |
-
|
853 |
-
|
854 |
-
|
855 |
-
|
856 |
-
|
857 |
-
|
858 |
-
|
859 |
-
|
860 |
-
|
861 |
-
|
862 |
-
|
863 |
-
|
864 |
-
|
865 |
-
|
866 |
-
|
867 |
-
|
868 |
-
|
869 |
-
|
870 |
-
|
871 |
-
|
872 |
-
|
873 |
-
|
874 |
-
|
875 |
-
|
876 |
-
|
877 |
-
|
878 |
-
|
879 |
-
|
880 |
|
881 |
|
882 |
|
|
|
25 |
import logging
|
26 |
import os
|
27 |
import time
|
28 |
+
from typing import List
|
29 |
+
|
30 |
import requests
|
31 |
#
|
32 |
# Import 3rd-Party Libraries
|
|
|
33 |
from requests import RequestException
|
34 |
#
|
35 |
# Import Local libraries
|
36 |
+
from App_Function_Libraries.Utils.Utils import load_and_log_configs
|
|
|
37 |
#
|
38 |
#######################################################################################################################
|
39 |
# Function Definitions
|
|
|
62 |
|
63 |
|
64 |
|
65 |
+
def get_openai_embeddings(input_data: str, model: str) -> List[float]:
|
66 |
+
"""
|
67 |
+
Get embeddings for the input text from OpenAI API.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
input_data (str): The input text to get embeddings for.
|
71 |
+
model (str): The model to use for generating embeddings.
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
List[float]: The embeddings generated by the API.
|
75 |
+
"""
|
76 |
loaded_config_data = load_and_log_configs()
|
77 |
+
api_key = loaded_config_data['api_keys']['openai']
|
78 |
+
|
79 |
+
if not api_key:
|
80 |
+
logging.error("OpenAI: API key not found or is empty")
|
81 |
+
raise ValueError("OpenAI: API Key Not Provided/Found in Config file or is empty")
|
82 |
+
|
83 |
+
logging.debug(f"OpenAI: Using API Key: {api_key[:5]}...{api_key[-5:]}")
|
84 |
+
logging.debug(f"OpenAI: Raw input data (first 500 chars): {str(input_data)[:500]}...")
|
85 |
+
logging.debug(f"OpenAI: Using model: {model}")
|
86 |
+
|
87 |
+
headers = {
|
88 |
+
'Authorization': f'Bearer {api_key}',
|
89 |
+
'Content-Type': 'application/json'
|
90 |
+
}
|
91 |
+
|
92 |
+
request_data = {
|
93 |
+
"input": input_data,
|
94 |
+
"model": model,
|
95 |
+
}
|
96 |
+
|
97 |
+
try:
|
98 |
+
logging.debug("OpenAI: Posting request to embeddings API")
|
99 |
+
response = requests.post('https://api.openai.com/v1/embeddings', headers=headers, json=request_data)
|
100 |
+
logging.debug(f"Full API response data: {response}")
|
101 |
+
if response.status_code == 200:
|
102 |
+
response_data = response.json()
|
103 |
+
if 'data' in response_data and len(response_data['data']) > 0:
|
104 |
+
embedding = response_data['data'][0]['embedding']
|
105 |
+
logging.debug("OpenAI: Embeddings retrieved successfully")
|
106 |
+
return embedding
|
107 |
+
else:
|
108 |
+
logging.warning("OpenAI: Embedding data not found in the response")
|
109 |
+
raise ValueError("OpenAI: Embedding data not available in the response")
|
110 |
+
else:
|
111 |
+
logging.error(f"OpenAI: Embeddings request failed with status code {response.status_code}")
|
112 |
+
logging.error(f"OpenAI: Error response: {response.text}")
|
113 |
+
raise ValueError(f"OpenAI: Failed to retrieve embeddings. Status code: {response.status_code}")
|
114 |
+
except requests.RequestException as e:
|
115 |
+
logging.error(f"OpenAI: Error making API request: {str(e)}", exc_info=True)
|
116 |
+
raise ValueError(f"OpenAI: Error making API request: {str(e)}")
|
117 |
+
except Exception as e:
|
118 |
+
logging.error(f"OpenAI: Unexpected error: {str(e)}", exc_info=True)
|
119 |
+
raise ValueError(f"OpenAI: Unexpected error occurred: {str(e)}")
|
120 |
|
121 |
+
|
122 |
+
def chat_with_openai(api_key, input_data, custom_prompt_arg, temp=None, system_message=None):
|
123 |
+
loaded_config_data = load_and_log_configs()
|
124 |
+
openai_api_key = api_key
|
125 |
try:
|
126 |
# API key validation
|
127 |
+
if not openai_api_key:
|
128 |
+
logging.info("OpenAI: API key not provided as parameter")
|
129 |
logging.info("OpenAI: Attempting to use API key from config file")
|
130 |
+
openai_api_key = loaded_config_data['api_keys']['openai']
|
131 |
|
132 |
+
if not openai_api_key:
|
133 |
+
logging.error("OpenAI: API key not found or is empty")
|
134 |
return "OpenAI: API Key Not Provided/Found in Config file or is empty"
|
135 |
|
136 |
+
logging.debug(f"OpenAI: Using API Key: {openai_api_key[:5]}...{openai_api_key[-5:]}")
|
137 |
|
138 |
# Input data handling
|
139 |
logging.debug(f"OpenAI: Raw input data type: {type(input_data)}")
|
|
|
177 |
else:
|
178 |
raise ValueError(f"OpenAI: Invalid input data format: {type(data)}")
|
179 |
|
|
|
180 |
logging.debug(f"OpenAI: Extracted text (first 500 chars): {text[:500]}...")
|
181 |
logging.debug(f"OpenAI: Custom prompt: {custom_prompt_arg}")
|
182 |
|
|
|
209 |
|
210 |
logging.debug("OpenAI: Posting request")
|
211 |
response = requests.post('https://api.openai.com/v1/chat/completions', headers=headers, json=data)
|
212 |
+
logging.debug(f"Full API response data: {response}")
|
213 |
if response.status_code == 200:
|
214 |
response_data = response.json()
|
215 |
+
logging.debug(response_data)
|
216 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
217 |
chat_response = response_data['choices'][0]['message']['content'].strip()
|
218 |
logging.debug("openai: Chat Sent successfully")
|
219 |
+
logging.debug(f"openai: Chat response: {chat_response}")
|
220 |
return chat_response
|
221 |
else:
|
222 |
logging.warning("openai: Chat response not found in the response data")
|
|
|
240 |
try:
|
241 |
loaded_config_data = load_and_log_configs()
|
242 |
global anthropic_api_key
|
243 |
+
anthropic_api_key = api_key
|
244 |
# API key validation
|
245 |
+
if not api_key:
|
246 |
logging.info("Anthropic: API key not provided as parameter")
|
247 |
logging.info("Anthropic: Attempting to use API key from config file")
|
248 |
anthropic_api_key = loaded_config_data['api_keys']['anthropic']
|
249 |
|
250 |
+
if not api_key or api_key.strip() == "":
|
251 |
logging.error("Anthropic: API key not found or is empty")
|
252 |
return "Anthropic: API Key Not Provided/Found in Config file or is empty"
|
253 |
|
|
|
296 |
try:
|
297 |
logging.debug("anthropic: Posting request to API")
|
298 |
response = requests.post('https://api.anthropic.com/v1/messages', headers=headers, json=data)
|
299 |
+
logging.debug(f"Full API response data: {response}")
|
300 |
# Check if the status code indicates success
|
301 |
if response.status_code == 200:
|
302 |
logging.debug("anthropic: Post submittal successful")
|
|
|
334 |
# Summarize with Cohere
|
335 |
def chat_with_cohere(api_key, input_data, model, custom_prompt_arg, system_prompt=None):
|
336 |
global cohere_api_key
|
337 |
+
cohere_api_key = api_key
|
338 |
loaded_config_data = load_and_log_configs()
|
339 |
try:
|
340 |
# API key validation
|
341 |
+
if not api_key:
|
342 |
logging.info("cohere: API key not provided as parameter")
|
343 |
logging.info("cohere: Attempting to use API key from config file")
|
344 |
cohere_api_key = loaded_config_data['api_keys']['cohere']
|
345 |
|
346 |
+
if not api_key or api_key.strip() == "":
|
347 |
logging.error("cohere: API key not found or is empty")
|
348 |
return "cohere: API Key Not Provided/Found in Config file or is empty"
|
349 |
|
|
|
384 |
print("cohere: Submitting request to API endpoint")
|
385 |
response = requests.post('https://api.cohere.ai/v1/chat', headers=headers, json=data)
|
386 |
response_data = response.json()
|
387 |
+
logging.debug(f"Full API response data: {response_data}")
|
388 |
|
389 |
if response.status_code == 200:
|
390 |
if 'text' in response_data:
|
|
|
499 |
response = requests.post('https://api.groq.com/openai/v1/chat/completions', headers=headers, json=data)
|
500 |
|
501 |
response_data = response.json()
|
502 |
+
logging.debug(f"Full API response data: {response_data}")
|
503 |
|
504 |
if response.status_code == 200:
|
505 |
+
logging.debug(response_data)
|
506 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
507 |
summary = response_data['choices'][0]['message']['content'].strip()
|
508 |
logging.debug("groq: Chat request successful")
|
|
|
615 |
)
|
616 |
|
617 |
response_data = response.json()
|
618 |
+
logging.debug("Full API Response Data: %s", response_data)
|
619 |
|
620 |
if response.status_code == 200:
|
621 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
|
|
641 |
logging.debug(f"huggingface: Summarization process starting...")
|
642 |
try:
|
643 |
# API key validation
|
644 |
+
if not api_key:
|
645 |
logging.info("HuggingFace: API key not provided as parameter")
|
646 |
logging.info("HuggingFace: Attempting to use API key from config file")
|
647 |
huggingface_api_key = loaded_config_data['api_keys']['openai']
|
648 |
+
if not api_key or api_key.strip() == "":
|
649 |
logging.error("HuggingFace: API key not found or is empty")
|
650 |
return "HuggingFace: API Key Not Provided/Found in Config file or is empty"
|
651 |
logging.debug(f"HuggingFace: Using API Key: {api_key[:5]}...{api_key[-5:]}")
|
|
|
672 |
logging.debug("huggingface: Submitting request...")
|
673 |
|
674 |
response = requests.post(API_URL, headers=headers, json=data)
|
675 |
+
logging.debug(f"Full API response data: {response}")
|
676 |
if response.status_code == 200:
|
677 |
+
summary = response.json()[0]['generated_text'].strip()
|
678 |
logging.debug("huggingface: Chat request successful")
|
679 |
print("Chat request successful.")
|
680 |
return summary
|
|
|
773 |
|
774 |
logging.debug("DeepSeek: Posting request")
|
775 |
response = requests.post('https://api.deepseek.com/chat/completions', headers=headers, json=data)
|
776 |
+
logging.debug(f"Full API response data: {response}")
|
777 |
if response.status_code == 200:
|
778 |
response_data = response.json()
|
779 |
+
logging.debug(response_data)
|
780 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
781 |
summary = response_data['choices'][0]['message']['content'].strip()
|
782 |
logging.debug("DeepSeek: Chat request successful")
|
|
|
864 |
|
865 |
logging.debug("Mistral: Posting request")
|
866 |
response = requests.post('https://api.mistral.ai/v1/chat/completions', headers=headers, json=data)
|
867 |
+
logging.debug(f"Full API response data: {response}")
|
868 |
if response.status_code == 200:
|
869 |
response_data = response.json()
|
870 |
+
logging.debug(response_data)
|
871 |
if 'choices' in response_data and len(response_data['choices']) > 0:
|
872 |
summary = response_data['choices'][0]['message']['content'].strip()
|
873 |
logging.debug("Mistral: request successful")
|
|
|
887 |
|
888 |
# Stashed in here since OpenAI usage.... #FIXME
|
889 |
# FIXME - https://docs.vllm.ai/en/latest/getting_started/quickstart.html .... Great docs.
|
890 |
+
# def chat_with_vllm(input_data, custom_prompt_input, api_key=None, vllm_api_url="http://127.0.0.1:8000/v1/chat/completions", system_prompt=None):
|
891 |
+
# loaded_config_data = load_and_log_configs()
|
892 |
+
# llm_model = loaded_config_data['models']['vllm']
|
893 |
+
# # API key validation
|
894 |
+
# if api_key is None:
|
895 |
+
# logging.info("vLLM: API key not provided as parameter")
|
896 |
+
# logging.info("vLLM: Attempting to use API key from config file")
|
897 |
+
# api_key = loaded_config_data['api_keys']['llama']
|
898 |
+
#
|
899 |
+
# if api_key is None or api_key.strip() == "":
|
900 |
+
# logging.info("vLLM: API key not found or is empty")
|
901 |
+
# vllm_client = OpenAI(
|
902 |
+
# base_url=vllm_api_url,
|
903 |
+
# api_key=custom_prompt_input
|
904 |
+
# )
|
905 |
+
#
|
906 |
+
# if isinstance(input_data, str) and os.path.isfile(input_data):
|
907 |
+
# logging.debug("vLLM: Loading json data for summarization")
|
908 |
+
# with open(input_data, 'r') as file:
|
909 |
+
# data = json.load(file)
|
910 |
+
# else:
|
911 |
+
# logging.debug("vLLM: Using provided string data for summarization")
|
912 |
+
# data = input_data
|
913 |
+
#
|
914 |
+
# logging.debug(f"vLLM: Loaded data: {data}")
|
915 |
+
# logging.debug(f"vLLM: Type of data: {type(data)}")
|
916 |
+
#
|
917 |
+
# if isinstance(data, dict) and 'summary' in data:
|
918 |
+
# # If the loaded data is a dictionary and already contains a summary, return it
|
919 |
+
# logging.debug("vLLM: Summary already exists in the loaded data")
|
920 |
+
# return data['summary']
|
921 |
+
#
|
922 |
+
# # If the loaded data is a list of segment dictionaries or a string, proceed with summarization
|
923 |
+
# if isinstance(data, list):
|
924 |
+
# segments = data
|
925 |
+
# text = extract_text_from_segments(segments)
|
926 |
+
# elif isinstance(data, str):
|
927 |
+
# text = data
|
928 |
+
# else:
|
929 |
+
# raise ValueError("Invalid input data format")
|
930 |
+
#
|
931 |
+
#
|
932 |
+
# custom_prompt = custom_prompt_input
|
933 |
+
#
|
934 |
+
# completion = client.chat.completions.create(
|
935 |
+
# model=llm_model,
|
936 |
+
# messages=[
|
937 |
+
# {"role": "system", "content": f"{system_prompt}"},
|
938 |
+
# {"role": "user", "content": f"{text} \n\n\n\n{custom_prompt}"}
|
939 |
+
# ]
|
940 |
+
# )
|
941 |
+
# vllm_summary = completion.choices[0].message.content
|
942 |
+
# return vllm_summary
|
943 |
|
944 |
|
945 |
|
App_Function_Libraries/LLM_API_Calls_Local.py
CHANGED
@@ -4,10 +4,11 @@
|
|
4 |
# This library is used to perform summarization with a 'local' inference engine.
|
5 |
#
|
6 |
####
|
|
|
7 |
|
8 |
####################
|
9 |
# Function List
|
10 |
-
# FIXME - UPDATE
|
11 |
# 1. chat_with_local_llm(text, custom_prompt_arg)
|
12 |
# 2. chat_with_llama(api_url, text, token, custom_prompt)
|
13 |
# 3. chat_with_kobold(api_url, text, kobold_api_token, custom_prompt)
|
@@ -20,7 +21,7 @@
|
|
20 |
####################
|
21 |
# Import necessary libraries
|
22 |
# Import Local
|
23 |
-
from Utils import *
|
24 |
#
|
25 |
#######################################################################################################################
|
26 |
# Function Definitions
|
@@ -399,8 +400,324 @@ def chat_with_aphrodite(input_data, custom_prompt_input, api_key=None, api_IP="h
|
|
399 |
return "Error summarizing with Aphrodite."
|
400 |
|
401 |
|
402 |
-
|
403 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
404 |
|
405 |
|
406 |
def save_summary_to_file(summary, file_path):
|
|
|
4 |
# This library is used to perform summarization with a 'local' inference engine.
|
5 |
#
|
6 |
####
|
7 |
+
from typing import Union
|
8 |
|
9 |
####################
|
10 |
# Function List
|
11 |
+
# FIXME - UPDATE
|
12 |
# 1. chat_with_local_llm(text, custom_prompt_arg)
|
13 |
# 2. chat_with_llama(api_url, text, token, custom_prompt)
|
14 |
# 3. chat_with_kobold(api_url, text, kobold_api_token, custom_prompt)
|
|
|
21 |
####################
|
22 |
# Import necessary libraries
|
23 |
# Import Local
|
24 |
+
from App_Function_Libraries.Utils.Utils import *
|
25 |
#
|
26 |
#######################################################################################################################
|
27 |
# Function Definitions
|
|
|
400 |
return "Error summarizing with Aphrodite."
|
401 |
|
402 |
|
403 |
+
# FIXME
|
404 |
+
def chat_with_ollama(input_data, custom_prompt, api_url="http://127.0.0.1:11434/api/generate", api_key=None, temp=None, system_message=None, model=None):
|
405 |
+
try:
|
406 |
+
logging.debug("ollama: Loading and validating configurations")
|
407 |
+
loaded_config_data = load_and_log_configs()
|
408 |
+
if loaded_config_data is None:
|
409 |
+
logging.error("Failed to load configuration data")
|
410 |
+
ollama_api_key = None
|
411 |
+
else:
|
412 |
+
# Prioritize the API key passed as a parameter
|
413 |
+
if api_key and api_key.strip():
|
414 |
+
ollama_api_key = api_key
|
415 |
+
logging.info("Ollama: Using API key provided as parameter")
|
416 |
+
else:
|
417 |
+
# If no parameter is provided, use the key from the config
|
418 |
+
ollama_api_key = loaded_config_data['api_keys'].get('ollama')
|
419 |
+
if ollama_api_key:
|
420 |
+
logging.info("Ollama: Using API key from config file")
|
421 |
+
else:
|
422 |
+
logging.warning("Ollama: No API key found in config file")
|
423 |
+
|
424 |
+
model = loaded_config_data['models']['ollama']
|
425 |
+
|
426 |
+
# Load transcript
|
427 |
+
logging.debug("Ollama: Loading JSON data")
|
428 |
+
if isinstance(input_data, str) and os.path.isfile(input_data):
|
429 |
+
logging.debug("Ollama: Loading json data for summarization")
|
430 |
+
with open(input_data, 'r') as file:
|
431 |
+
data = json.load(file)
|
432 |
+
else:
|
433 |
+
logging.debug("Ollama: Using provided string data for summarization")
|
434 |
+
data = input_data
|
435 |
+
|
436 |
+
logging.debug(f"Ollama: Loaded data: {data}")
|
437 |
+
logging.debug(f"Ollama: Type of data: {type(data)}")
|
438 |
+
|
439 |
+
if isinstance(data, dict) and 'summary' in data:
|
440 |
+
# If the loaded data is a dictionary and already contains a summary, return it
|
441 |
+
logging.debug("Ollama: Summary already exists in the loaded data")
|
442 |
+
return data['summary']
|
443 |
+
|
444 |
+
# If the loaded data is a list of segment dictionaries or a string, proceed with summarization
|
445 |
+
if isinstance(data, list):
|
446 |
+
segments = data
|
447 |
+
text = extract_text_from_segments(segments)
|
448 |
+
elif isinstance(data, str):
|
449 |
+
text = data
|
450 |
+
else:
|
451 |
+
raise ValueError("Ollama: Invalid input data format")
|
452 |
+
|
453 |
+
headers = {
|
454 |
+
'accept': 'application/json',
|
455 |
+
'content-type': 'application/json',
|
456 |
+
}
|
457 |
+
if len(ollama_api_key) > 5:
|
458 |
+
headers['Authorization'] = f'Bearer {ollama_api_key}'
|
459 |
+
|
460 |
+
ollama_prompt = f"{custom_prompt} \n\n\n\n{text}"
|
461 |
+
if system_message is None:
|
462 |
+
system_message = "You are a helpful AI assistant."
|
463 |
+
logging.debug(f"llama: Prompt being sent is {ollama_prompt}")
|
464 |
+
if system_message is None:
|
465 |
+
system_message = "You are a helpful AI assistant."
|
466 |
+
|
467 |
+
data = {
|
468 |
+
"model": model,
|
469 |
+
"messages": [
|
470 |
+
{"role": "system",
|
471 |
+
"content": system_message
|
472 |
+
},
|
473 |
+
{"role": "user",
|
474 |
+
"content": ollama_prompt
|
475 |
+
}
|
476 |
+
],
|
477 |
+
}
|
478 |
+
|
479 |
+
logging.debug("Ollama: Submitting request to API endpoint")
|
480 |
+
print("Ollama: Submitting request to API endpoint")
|
481 |
+
response = requests.post(api_url, headers=headers, json=data)
|
482 |
+
response_data = response.json()
|
483 |
+
logging.debug("API Response Data: %s", response_data)
|
484 |
+
|
485 |
+
if response.status_code == 200:
|
486 |
+
# if 'X' in response_data:
|
487 |
+
logging.debug(response_data)
|
488 |
+
summary = response_data['content'].strip()
|
489 |
+
logging.debug("Ollama: Chat request successful")
|
490 |
+
print("\n\nChat request successful.")
|
491 |
+
return summary
|
492 |
+
else:
|
493 |
+
logging.error(f"\n\nOllama: API request failed with status code {response.status_code}: {response.text}")
|
494 |
+
return f"Ollama: API request failed: {response.text}"
|
495 |
+
|
496 |
+
except Exception as e:
|
497 |
+
logging.error("\n\nOllama: Error in processing: %s", str(e))
|
498 |
+
return f"Ollama: Error occurred while processing summary with ollama: {str(e)}"
|
499 |
+
|
500 |
+
def chat_with_vllm(
|
501 |
+
input_data: Union[str, dict, list],
|
502 |
+
custom_prompt_input: str,
|
503 |
+
api_key: str = None,
|
504 |
+
vllm_api_url: str = "http://127.0.0.1:8000/v1/chat/completions",
|
505 |
+
model: str = None,
|
506 |
+
system_prompt: str = None,
|
507 |
+
temp: float = 0.7
|
508 |
+
) -> str:
|
509 |
+
logging.debug("vLLM: Summarization process starting...")
|
510 |
+
try:
|
511 |
+
logging.debug("vLLM: Loading and validating configurations")
|
512 |
+
loaded_config_data = load_and_log_configs()
|
513 |
+
if loaded_config_data is None:
|
514 |
+
logging.error("Failed to load configuration data")
|
515 |
+
vllm_api_key = None
|
516 |
+
else:
|
517 |
+
# Prioritize the API key passed as a parameter
|
518 |
+
if api_key and api_key.strip():
|
519 |
+
vllm_api_key = api_key
|
520 |
+
logging.info("vLLM: Using API key provided as parameter")
|
521 |
+
else:
|
522 |
+
# If no parameter is provided, use the key from the config
|
523 |
+
vllm_api_key = loaded_config_data['api_keys'].get('vllm')
|
524 |
+
if vllm_api_key:
|
525 |
+
logging.info("vLLM: Using API key from config file")
|
526 |
+
else:
|
527 |
+
logging.warning("vLLM: No API key found in config file")
|
528 |
+
|
529 |
+
logging.debug(f"vLLM: Using API Key: {vllm_api_key[:5]}...{vllm_api_key[-5:]}")
|
530 |
+
# Process input data
|
531 |
+
if isinstance(input_data, str) and os.path.isfile(input_data):
|
532 |
+
logging.debug("vLLM: Loading json data for summarization")
|
533 |
+
with open(input_data, 'r') as file:
|
534 |
+
data = json.load(file)
|
535 |
+
else:
|
536 |
+
logging.debug("vLLM: Using provided data for summarization")
|
537 |
+
data = input_data
|
538 |
+
|
539 |
+
logging.debug(f"vLLM: Type of data: {type(data)}")
|
540 |
+
|
541 |
+
# Extract text for summarization
|
542 |
+
if isinstance(data, dict) and 'summary' in data:
|
543 |
+
logging.debug("vLLM: Summary already exists in the loaded data")
|
544 |
+
return data['summary']
|
545 |
+
elif isinstance(data, list):
|
546 |
+
text = extract_text_from_segments(data)
|
547 |
+
elif isinstance(data, str):
|
548 |
+
text = data
|
549 |
+
elif isinstance(data, dict):
|
550 |
+
text = json.dumps(data)
|
551 |
+
else:
|
552 |
+
raise ValueError("Invalid input data format")
|
553 |
+
|
554 |
+
logging.debug(f"vLLM: Extracted text (showing first 500 chars): {text[:500]}...")
|
555 |
+
|
556 |
+
if system_prompt is None:
|
557 |
+
system_prompt = "You are a helpful AI assistant."
|
558 |
+
|
559 |
+
model = model or loaded_config_data['models']['vllm']
|
560 |
+
if system_prompt is None:
|
561 |
+
system_prompt = "You are a helpful AI assistant."
|
562 |
+
|
563 |
+
# Prepare the API request
|
564 |
+
headers = {
|
565 |
+
"Content-Type": "application/json"
|
566 |
+
}
|
567 |
+
|
568 |
+
payload = {
|
569 |
+
"model": model,
|
570 |
+
"messages": [
|
571 |
+
{"role": "system", "content": system_prompt},
|
572 |
+
{"role": "user", "content": f"{custom_prompt_input}\n\n{text}"}
|
573 |
+
]
|
574 |
+
}
|
575 |
+
|
576 |
+
# Make the API call
|
577 |
+
logging.debug(f"vLLM: Sending request to {vllm_api_url}")
|
578 |
+
response = requests.post(vllm_api_url, headers=headers, json=payload)
|
579 |
+
|
580 |
+
# Check for successful response
|
581 |
+
response.raise_for_status()
|
582 |
+
|
583 |
+
# Extract and return the summary
|
584 |
+
response_data = response.json()
|
585 |
+
if 'choices' in response_data and len(response_data['choices']) > 0:
|
586 |
+
summary = response_data['choices'][0]['message']['content']
|
587 |
+
logging.debug("vLLM: Summarization successful")
|
588 |
+
logging.debug(f"vLLM: Summary (first 500 chars): {summary[:500]}...")
|
589 |
+
return summary
|
590 |
+
else:
|
591 |
+
raise ValueError("Unexpected response format from vLLM API")
|
592 |
+
|
593 |
+
except requests.RequestException as e:
|
594 |
+
logging.error(f"vLLM: API request failed: {str(e)}")
|
595 |
+
return f"Error: vLLM API request failed - {str(e)}"
|
596 |
+
except json.JSONDecodeError as e:
|
597 |
+
logging.error(f"vLLM: Failed to parse API response: {str(e)}")
|
598 |
+
return f"Error: Failed to parse vLLM API response - {str(e)}"
|
599 |
+
except Exception as e:
|
600 |
+
logging.error(f"vLLM: Unexpected error during summarization: {str(e)}")
|
601 |
+
return f"Error: Unexpected error during vLLM summarization - {str(e)}"
|
602 |
+
|
603 |
+
|
604 |
+
def chat_with_custom_openai(api_key, input_data, custom_prompt_arg, temp=None, system_message=None):
|
605 |
+
loaded_config_data = load_and_log_configs()
|
606 |
+
custom_openai_api_key = api_key
|
607 |
+
try:
|
608 |
+
# API key validation
|
609 |
+
if not custom_openai_api_key:
|
610 |
+
logging.info("Custom OpenAI API: API key not provided as parameter")
|
611 |
+
logging.info("Custom OpenAI API: Attempting to use API key from config file")
|
612 |
+
custom_openai_api_key = loaded_config_data['api_keys']['custom_openai_api_key']
|
613 |
+
|
614 |
+
if not custom_openai_api_key:
|
615 |
+
logging.error("Custom OpenAI API: API key not found or is empty")
|
616 |
+
return "Custom OpenAI API: API Key Not Provided/Found in Config file or is empty"
|
617 |
+
|
618 |
+
logging.debug(f"Custom OpenAI API: Using API Key: {custom_openai_api_key[:5]}...{custom_openai_api_key[-5:]}")
|
619 |
+
|
620 |
+
# Input data handling
|
621 |
+
logging.debug(f"Custom OpenAI API: Raw input data type: {type(input_data)}")
|
622 |
+
logging.debug(f"Custom OpenAI API: Raw input data (first 500 chars): {str(input_data)[:500]}...")
|
623 |
+
|
624 |
+
if isinstance(input_data, str):
|
625 |
+
if input_data.strip().startswith('{'):
|
626 |
+
# It's likely a JSON string
|
627 |
+
logging.debug("Custom OpenAI API: Parsing provided JSON string data for summarization")
|
628 |
+
try:
|
629 |
+
data = json.loads(input_data)
|
630 |
+
except json.JSONDecodeError as e:
|
631 |
+
logging.error(f"Custom OpenAI API: Error parsing JSON string: {str(e)}")
|
632 |
+
return f"Custom OpenAI API: Error parsing JSON input: {str(e)}"
|
633 |
+
elif os.path.isfile(input_data):
|
634 |
+
logging.debug("Custom OpenAI API: Loading JSON data from file for summarization")
|
635 |
+
with open(input_data, 'r') as file:
|
636 |
+
data = json.load(file)
|
637 |
+
else:
|
638 |
+
logging.debug("Custom OpenAI API: Using provided string data for summarization")
|
639 |
+
data = input_data
|
640 |
+
else:
|
641 |
+
data = input_data
|
642 |
+
|
643 |
+
logging.debug(f"Custom OpenAI API: Processed data type: {type(data)}")
|
644 |
+
logging.debug(f"Custom OpenAI API: Processed data (first 500 chars): {str(data)[:500]}...")
|
645 |
+
|
646 |
+
# Text extraction
|
647 |
+
if isinstance(data, dict):
|
648 |
+
if 'summary' in data:
|
649 |
+
logging.debug("Custom OpenAI API: Summary already exists in the loaded data")
|
650 |
+
return data['summary']
|
651 |
+
elif 'segments' in data:
|
652 |
+
text = extract_text_from_segments(data['segments'])
|
653 |
+
else:
|
654 |
+
text = json.dumps(data) # Convert dict to string if no specific format
|
655 |
+
elif isinstance(data, list):
|
656 |
+
text = extract_text_from_segments(data)
|
657 |
+
elif isinstance(data, str):
|
658 |
+
text = data
|
659 |
+
else:
|
660 |
+
raise ValueError(f"Custom OpenAI API: Invalid input data format: {type(data)}")
|
661 |
+
|
662 |
+
logging.debug(f"Custom OpenAI API: Extracted text (first 500 chars): {text[:500]}...")
|
663 |
+
logging.debug(f"v: Custom prompt: {custom_prompt_arg}")
|
664 |
+
|
665 |
+
openai_model = loaded_config_data['models']['openai'] or "gpt-4o"
|
666 |
+
logging.debug(f"Custom OpenAI API: Using model: {openai_model}")
|
667 |
+
|
668 |
+
headers = {
|
669 |
+
'Authorization': f'Bearer {custom_openai_api_key}',
|
670 |
+
'Content-Type': 'application/json'
|
671 |
+
}
|
672 |
+
|
673 |
+
logging.debug(
|
674 |
+
f"OpenAI API Key: {custom_openai_api_key[:5]}...{custom_openai_api_key[-5:] if custom_openai_api_key else None}")
|
675 |
+
logging.debug("Custom OpenAI API: Preparing data + prompt for submittal")
|
676 |
+
openai_prompt = f"{text} \n\n\n\n{custom_prompt_arg}"
|
677 |
+
if temp is None:
|
678 |
+
temp = 0.7
|
679 |
+
if system_message is None:
|
680 |
+
system_message = "You are a helpful AI assistant who does whatever the user requests."
|
681 |
+
temp = float(temp)
|
682 |
+
data = {
|
683 |
+
"model": openai_model,
|
684 |
+
"messages": [
|
685 |
+
{"role": "system", "content": system_message},
|
686 |
+
{"role": "user", "content": openai_prompt}
|
687 |
+
],
|
688 |
+
"max_tokens": 4096,
|
689 |
+
"temperature": temp
|
690 |
+
}
|
691 |
+
|
692 |
+
custom_openai_url = loaded_config_data['Local_api_ip']['custom_openai_api_ip']
|
693 |
+
|
694 |
+
logging.debug("Custom OpenAI API: Posting request")
|
695 |
+
response = requests.post(custom_openai_url, headers=headers, json=data)
|
696 |
+
logging.debug(f"Custom OpenAI API full API response data: {response}")
|
697 |
+
if response.status_code == 200:
|
698 |
+
response_data = response.json()
|
699 |
+
logging.debug(response_data)
|
700 |
+
if 'choices' in response_data and len(response_data['choices']) > 0:
|
701 |
+
chat_response = response_data['choices'][0]['message']['content'].strip()
|
702 |
+
logging.debug("Custom OpenAI API: Chat Sent successfully")
|
703 |
+
logging.debug(f"Custom OpenAI API: Chat response: {chat_response}")
|
704 |
+
return chat_response
|
705 |
+
else:
|
706 |
+
logging.warning("Custom OpenAI API: Chat response not found in the response data")
|
707 |
+
return "Custom OpenAI API: Chat not available"
|
708 |
+
else:
|
709 |
+
logging.error(f"Custom OpenAI API: Chat request failed with status code {response.status_code}")
|
710 |
+
logging.error(f"Custom OpenAI API: Error response: {response.text}")
|
711 |
+
return f"OpenAI: Failed to process chat response. Status code: {response.status_code}"
|
712 |
+
except json.JSONDecodeError as e:
|
713 |
+
logging.error(f"Custom OpenAI API: Error decoding JSON: {str(e)}", exc_info=True)
|
714 |
+
return f"Custom OpenAI API: Error decoding JSON input: {str(e)}"
|
715 |
+
except requests.RequestException as e:
|
716 |
+
logging.error(f"Custom OpenAI API: Error making API request: {str(e)}", exc_info=True)
|
717 |
+
return f"Custom OpenAI API: Error making API request: {str(e)}"
|
718 |
+
except Exception as e:
|
719 |
+
logging.error(f"Custom OpenAI API: Unexpected error: {str(e)}", exc_info=True)
|
720 |
+
return f"Custom OpenAI API: Unexpected error occurred: {str(e)}"
|
721 |
|
722 |
|
723 |
def save_summary_to_file(summary, file_path):
|