File size: 47,891 Bytes
63d903a 0b607fb be913ab 63d903a 9b9a599 2594602 63d903a c1bd83b 63d903a b4dffd4 a6abb8f c89450b 5676761 d03b227 a6abb8f a7533b2 a6abb8f 2594602 781b94b 28ed44f 63d903a a6abb8f 63d903a a7533b2 b4dffd4 149b538 9b9a599 63bd648 dae04df 38817b8 0b91a62 d03b227 9b9a599 b4dffd4 63d903a 5676761 b4dffd4 63d903a 495c1d2 63d903a 9b9a599 63d903a d52f389 63d903a 149b538 5676761 63d903a 149b538 5676761 63d903a 5676761 63d903a 149b538 cca553c 5676761 cca553c 5676761 cca553c 5676761 c89450b 149b538 5676761 149b538 5676761 c89450b 5676761 c89450b cca553c 5676761 c89450b cca553c c89450b d52f389 9b9a599 5676761 63d903a 9b9a599 c89450b 9b9a599 b4dffd4 781b94b b4dffd4 781b94b b4dffd4 9b9a599 a6abb8f b4dffd4 a6abb8f 63d903a 9b9a599 b4dffd4 9b9a599 63d903a 9b9a599 42fe9e9 d03b227 42fe9e9 d03b227 2558749 d03b227 42fe9e9 0179d02 42fe9e9 d03b227 42fe9e9 6c0a344 42fe9e9 d03b227 a3c94f0 d03b227 2558749 42fe9e9 2558749 a3c94f0 d03b227 2558749 d03b227 a3c94f0 05627b5 e3b0733 05627b5 e3b0733 f3e5661 e3b0733 05627b5 e3b0733 05627b5 e3b0733 05627b5 e3b0733 05627b5 e3b0733 05627b5 e3b0733 05627b5 e3b0733 bb07ed2 e3b0733 05627b5 e3b0733 5ed09d4 eef32e4 6048ff3 de32af2 6048ff3 de32af2 6048ff3 de32af2 6048ff3 0b91a62 6048ff3 eef32e4 a1bfede de32af2 6048ff3 eef32e4 ed6ccba 6048ff3 a1bfede 6048ff3 de32af2 6048ff3 de32af2 6048ff3 de32af2 d7a8a93 a1bfede 996270e 6048ff3 de32af2 eef32e4 6048ff3 9547bec 10b6796 9547bec 10b6796 6048ff3 de32af2 6048ff3 eb21d5d 6048ff3 eb21d5d 9b9a599 a6abb8f 149b538 9b9a599 149b538 9b9a599 a6abb8f 9b9a599 448313a dc56661 5676761 dc56661 5676761 cd540fc 5676761 149b538 5676761 05627b5 cd540fc 05627b5 cd540fc d03b227 cd540fc 9e81a2f 6048ff3 d03b227 05627b5 cd540fc d03b227 cd540fc 9e81a2f d03b227 05627b5 e3b0733 6048ff3 e3b0733 05627b5 9b9a599 5676761 9b9a599 05627b5 9b9a599 204d06f a6abb8f dc56661 a6abb8f dc56661 6b3b427 a6abb8f 9b9a599 a6abb8f 6b3b427 a6abb8f 149b538 a6abb8f 63d903a 0c9ba4e 149b538 b4dffd4 149b538 b4dffd4 149b538 b4dffd4 c1bd83b 149b538 c1bd83b 149b538 c1bd83b 149b538 c1bd83b 149b538 c1bd83b 149b538 b4dffd4 a6abb8f 149b538 a6abb8f 149b538 a6abb8f 5db53b7 b0ca421 2653adc 218c9a4 a6abb8f 149b538 a6abb8f 5db53b7 b5da50d a6abb8f cbab141 a6abb8f 149b538 d1372f5 b4dffd4 63d903a b4dffd4 149b538 b4dffd4 a6c785f 149b538 c89450b 149b538 d7187db a422324 d7187db a422324 d7187db b4dffd4 a422324 149b538 9b9a599 149b538 781b94b 570f979 b4dffd4 be22620 b4dffd4 149b538 b4dffd4 9b9a599 b4dffd4 9b9a599 685135d 570f979 b4dffd4 9b9a599 b4dffd4 9b9a599 149b538 204d06f 978efd2 204d06f b4dffd4 570f979 b4dffd4 a422324 63d903a 5676761 cca553c 63d903a d52f389 63d903a c89450b 467bf82 149b538 9b9a599 570f979 9b9a599 d52f389 9b9a599 570f979 9b9a599 c89450b 9b9a599 570f979 9b9a599 570f979 63d903a 149b538 63d903a 0b607fb 2594602 f9f0a5c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 |
import os
import json
import re
import gradio as gr
import requests
from duckduckgo_search import DDGS
from typing import List, Dict
from pydantic import BaseModel, Field
from tempfile import NamedTemporaryFile
from langchain_community.vectorstores import FAISS
from langchain_core.vectorstores import VectorStore
from langchain_core.documents import Document
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from llama_parse import LlamaParse
from huggingface_hub import InferenceClient
import inspect
import logging
import shutil
import pandas as pd
from docx import Document as DocxDocument
import google.generativeai as genai
from huggingface_hub import InferenceClient
# Set up basic configuration for logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Environment variables and configurations
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
llama_cloud_api_key = os.environ.get("LLAMA_CLOUD_API_KEY")
ACCOUNT_ID = os.environ.get("CLOUDFARE_ACCOUNT_ID")
API_TOKEN = os.environ.get("CLOUDFLARE_AUTH_TOKEN")
API_BASE_URL = "https://api.cloudflare.com/client/v4/accounts/a17f03e0f049ccae0c15cdcf3b9737ce/ai/run/"
print(f"ACCOUNT_ID: {ACCOUNT_ID}")
print(f"CLOUDFLARE_AUTH_TOKEN: {API_TOKEN[:5]}..." if API_TOKEN else "Not set")
MODELS = [
"mistralai/Mistral-7B-Instruct-v0.3",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"@cf/meta/llama-3.1-8b-instruct",
"mistralai/Mistral-Nemo-Instruct-2407",
"mistralai/Mathstral-7B-v0.1",
"meta-llama/Meta-Llama-3.1-8B-Instruct",
"meta-llama/Meta-Llama-3.1-70B-Instruct",
"mattshumer/Reflection-Llama-3.1-70B",
"gemini-1.5-flash",
"duckduckgo/gpt-4o-mini",
"duckduckgo/claude-3-haiku",
"duckduckgo/llama-3.1-70b",
"duckduckgo/mixtral-8x7b"
]
# Initialize LlamaParse
llama_parser = LlamaParse(
api_key=llama_cloud_api_key,
result_type="markdown",
num_workers=4,
verbose=True,
language="en",
)
def load_office_document(file: NamedTemporaryFile) -> List[Document]:
file_extension = os.path.splitext(file.name)[1].lower()
documents = []
if file_extension in ['.xlsx', '.xls']:
df = pd.read_excel(file.name)
for _, row in df.iterrows():
content = ' '.join(str(cell) for cell in row if pd.notna(cell))
documents.append(Document(page_content=content, metadata={"source": file.name}))
elif file_extension == '.docx':
doc = Document(file.name)
for para in doc.paragraphs:
if para.text.strip():
documents.append(Document(page_content=para.text, metadata={"source": file.name}))
return documents
def load_document(file: NamedTemporaryFile, parser: str = "llamaparse") -> List[Document]:
"""Loads and splits the document into pages."""
if parser == "pypdf":
loader = PyPDFLoader(file.name)
return loader.load_and_split()
elif parser == "llamaparse":
try:
documents = llama_parser.load_data(file.name)
return [Document(page_content=doc.text, metadata={"source": file.name}) for doc in documents]
except Exception as e:
print(f"Error using Llama Parse: {str(e)}")
print("Falling back to PyPDF parser")
loader = PyPDFLoader(file.name)
return loader.load_and_split()
else:
raise ValueError("Invalid parser specified. Use 'pypdf' or 'llamaparse'.")
def get_embeddings():
return HuggingFaceEmbeddings(model_name="avsolatorio/GIST-Embedding-v0")
# Add this at the beginning of your script, after imports
DOCUMENTS_FILE = "uploaded_documents.json"
def load_documents():
if os.path.exists(DOCUMENTS_FILE):
with open(DOCUMENTS_FILE, "r") as f:
return json.load(f)
return []
def save_documents(documents):
with open(DOCUMENTS_FILE, "w") as f:
json.dump(documents, f)
# Replace the global uploaded_documents with this
uploaded_documents = load_documents()
# Modify the update_vectors function
def update_vectors(files, parser):
global uploaded_documents
logging.info(f"Entering update_vectors with {len(files)} files and parser: {parser}")
if not files:
logging.warning("No files provided for update_vectors")
return "Please upload at least one file.", display_documents()
embed = get_embeddings()
total_chunks = 0
all_data = []
for file in files:
logging.info(f"Processing file: {file.name}")
try:
file_extension = os.path.splitext(file.name)[1].lower()
if file_extension in ['.xlsx', '.xls', '.docx']:
if parser != "office":
logging.warning(f"Using office parser for {file.name} regardless of selected parser")
data = load_office_document(file)
elif file_extension == '.pdf':
if parser == "office":
logging.warning(f"Cannot use office parser for PDF file {file.name}. Using llamaparse.")
data = load_document(file, "llamaparse")
else:
data = load_document(file, parser)
else:
logging.warning(f"Unsupported file type: {file_extension}")
continue
if not data:
logging.warning(f"No chunks loaded from {file.name}")
continue
logging.info(f"Loaded {len(data)} chunks from {file.name}")
all_data.extend(data)
total_chunks += len(data)
if not any(doc["name"] == file.name for doc in uploaded_documents):
uploaded_documents.append({"name": file.name, "selected": True})
logging.info(f"Added new document to uploaded_documents: {file.name}")
else:
logging.info(f"Document already exists in uploaded_documents: {file.name}")
except Exception as e:
logging.error(f"Error processing file {file.name}: {str(e)}")
logging.info(f"Total chunks processed: {total_chunks}")
if not all_data:
logging.warning("No valid data extracted from uploaded files")
return "No valid data could be extracted from the uploaded files. Please check the file contents and try again.", display_documents()
try:
# Update the appropriate vector store based on file type
pdf_data = [doc for doc in all_data if doc.metadata["source"].lower().endswith('.pdf')]
office_data = [doc for doc in all_data if not doc.metadata["source"].lower().endswith('.pdf')]
if pdf_data:
if os.path.exists("faiss_database"):
pdf_database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
pdf_database.add_documents(pdf_data)
else:
pdf_database = FAISS.from_documents(pdf_data, embed)
pdf_database.save_local("faiss_database")
logging.info("PDF FAISS database updated and saved")
if office_data:
if os.path.exists("office_faiss_database"):
office_database = FAISS.load_local("office_faiss_database", embed, allow_dangerous_deserialization=True)
office_database.add_documents(office_data)
else:
office_database = FAISS.from_documents(office_data, embed)
office_database.save_local("office_faiss_database")
logging.info("Office FAISS database updated and saved")
except Exception as e:
logging.error(f"Error updating FAISS database: {str(e)}")
return f"Error updating vector store: {str(e)}", display_documents()
# Save the updated list of documents
save_documents(uploaded_documents)
# Return a tuple with the status message and the updated document list
return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files.", display_documents()
def delete_documents(selected_docs):
global uploaded_documents
if not selected_docs:
return "No documents selected for deletion.", display_documents()
embed = get_embeddings()
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
deleted_docs = []
docs_to_keep = []
for doc in database.docstore._dict.values():
if doc.metadata.get("source") not in selected_docs:
docs_to_keep.append(doc)
else:
deleted_docs.append(doc.metadata.get("source", "Unknown"))
# Print debugging information
logging.info(f"Total documents before deletion: {len(database.docstore._dict)}")
logging.info(f"Documents to keep: {len(docs_to_keep)}")
logging.info(f"Documents to delete: {len(deleted_docs)}")
if not docs_to_keep:
# If all documents are deleted, remove the FAISS database directory
if os.path.exists("faiss_database"):
shutil.rmtree("faiss_database")
logging.info("All documents deleted. Removed FAISS database directory.")
else:
# Create new FAISS index with remaining documents
new_database = FAISS.from_documents(docs_to_keep, embed)
new_database.save_local("faiss_database")
logging.info(f"Created new FAISS index with {len(docs_to_keep)} documents.")
# Update uploaded_documents list
uploaded_documents = [doc for doc in uploaded_documents if doc["name"] not in deleted_docs]
save_documents(uploaded_documents)
return f"Deleted documents: {', '.join(deleted_docs)}", display_documents()
def chatbot_interface(message, history, model, temperature, num_calls):
if not message.strip():
return "", history
history = history + [(message, "")]
try:
for response in respond(message, history, model, temperature, num_calls):
history[-1] = (message, response)
yield history
except gr.CancelledError:
yield history
except Exception as e:
logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
history[-1] = (message, f"An unexpected error occurred: {str(e)}")
yield history
def retry_last_response(history, model, temperature, num_calls):
if not history:
return history
last_user_msg = history[-1][0]
history = history[:-1] # Remove the last response
return chatbot_interface(last_user_msg, history, model, temperature, num_calls)
def truncate_context(context, max_length=16000):
"""Truncate the context to a maximum length."""
if len(context) <= max_length:
return context
return context[:max_length] + "..."
def get_response_from_duckduckgo(query, model, context, num_calls=1, temperature=0.2):
logging.info(f"Using DuckDuckGo chat with model: {model}")
ddg_model = model.split('/')[-1] # Extract the model name from the full string
# Truncate the context to avoid exceeding input limits
truncated_context = truncate_context(context)
full_response = ""
for _ in range(num_calls):
try:
# Include truncated context in the query
contextualized_query = f"Using the following context:\n{truncated_context}\n\nUser question: {query}"
results = DDGS().chat(contextualized_query, model=ddg_model)
full_response += results + "\n"
logging.info(f"DuckDuckGo API response received. Length: {len(results)}")
except Exception as e:
logging.error(f"Error in generating response from DuckDuckGo: {str(e)}")
yield f"An error occurred with the {model} model: {str(e)}. Please try again."
return
yield full_response.strip()
class ConversationManager:
def __init__(self):
self.history = []
self.current_context = None
def add_interaction(self, query, response):
self.history.append((query, response))
self.current_context = f"Previous query: {query}\nPrevious response summary: {response[:200]}..."
def get_context(self):
return self.current_context
conversation_manager = ConversationManager()
def get_web_search_results(query: str, max_results: int = 10) -> List[Dict[str, str]]:
try:
results = list(DDGS().text(query, max_results=max_results))
if not results:
print(f"No results found for query: {query}")
return results
except Exception as e:
print(f"An error occurred during web search: {str(e)}")
return [{"error": f"An error occurred during web search: {str(e)}"}]
def rephrase_query(original_query: str, conversation_manager: ConversationManager) -> str:
context = conversation_manager.get_context()
if context:
prompt = f"""You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps:
1. Determine if the new query is a continuation of the previous conversation or an entirely new topic.
2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual.
3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context.
4. Provide ONLY the rephrased query without any additional explanation or reasoning.
Context: {context}
New query: {original_query}
Rephrased query:"""
response = DDGS().chat(prompt, model="llama-3.1-70b")
rephrased_query = response.split('\n')[0].strip()
return rephrased_query
return original_query
def summarize_web_results(query: str, search_results: List[Dict[str, str]], conversation_manager: ConversationManager) -> str:
try:
context = conversation_manager.get_context()
search_context = "\n\n".join([f"Title: {result['title']}\nContent: {result['body']}" for result in search_results])
prompt = f"""You are a highly intelligent & expert analyst and your job is to skillfully articulate the web search results about '{query}' and considering the context: {context},
You have to create a comprehensive news summary FOCUSING on the context provided to you.
Include key facts, relevant statistics, and expert opinions if available.
Ensure the article is well-structured with an introduction, main body, and conclusion, IF NECESSARY.
Address the query in the context of the ongoing conversation IF APPLICABLE.
Cite sources directly within the generated text and not at the end of the generated text, integrating URLs where appropriate to support the information provided:
{search_context}
Article:"""
summary = DDGS().chat(prompt, model="llama-3.1-70b")
return summary
except Exception as e:
return f"An error occurred during summarization: {str(e)}"
def get_response_from_gemini(query, model, selected_docs, file_type, num_calls=1, temperature=0.2):
# Configure the Gemini API
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
# Define the model
gemini_model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
generation_config={
"temperature": temperature,
"top_p": 1,
"top_k": 1,
"max_output_tokens": 20000,
},
)
if file_type == "excel":
# Excel functionality remains the same
system_instruction = """You are a highly specialized Python programmer with deep expertise in data analysis and visualization using Excel spreadsheets.
Your primary goal is to generate accurate and efficient Python code to perform calculations or create visualizations based on the user's requests.
Strictly use the data provided to write code that identifies key metrics, trends, and significant details relevant to the query.
Do not make assumptions or include any information that is not explicitly supported by the dataset.
If the user requests a calculation, provide the appropriate Python code to execute it, and if a visualization is needed, generate code using the matplotlib library to create the chart.
Based on the following data extracted from Excel spreadsheets:\n{context}\n\nPlease provide the Python code needed to execute the following task: '{query}'.
Ensure that the code is derived directly from the dataset.
If a chart is requested, use the matplotlib library to generate the appropriate visualization."""
full_prompt = f"{system_instruction}\n\nContext:\n{selected_docs}\n\nUser query: {query}"
elif file_type == "pdf":
# PDF functionality similar to get_response_from_pdf
embed = get_embeddings()
if os.path.exists("faiss_database"):
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
else:
yield "No documents available. Please upload PDF documents to answer questions."
return
# Pre-filter the documents
filtered_docs = [doc for doc_id, doc in database.docstore._dict.items()
if isinstance(doc, Document) and doc.metadata.get("source") in selected_docs]
if not filtered_docs:
yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
return
# Create a new FAISS index with only the selected documents
filtered_db = FAISS.from_documents(filtered_docs, embed)
retriever = filtered_db.as_retriever(search_kwargs={"k": 10})
relevant_docs = retriever.get_relevant_documents(query)
context_str = "\n".join([doc.page_content for doc in relevant_docs])
system_instruction = """You are a highly specialized financial analyst assistant with expertise in analyzing and summarizing financial documents.
Your goal is to provide accurate, detailed, and precise summaries based on the context provided.
Avoid making assumptions or adding information that is not explicitly supported by the context from the PDF documents.
Using the following context from the PDF documents:\n{context_str}\n\nPlease generate a step-by-step reasoning before arriving at a comprehensive and accurate summary addressing the following question: '{query}'.
Ensure your response is strictly based on the provided context, highlighting key financial metrics, trends, and significant details relevant to the query.
Avoid any speculative or unverified information."""
full_prompt = f"{system_instruction}\n\nContext:\n{context_str}\n\nUser query: {query}\n\nPlease generate a step-by-step reasoning before arriving at a comprehensive and accurate summary addressing the question. Ensure your response is strictly based on the provided context, highlighting key metrics, trends, and significant details relevant to the query. Avoid any speculative or unverified information."
else:
raise ValueError("Invalid file type. Use 'excel' or 'pdf'.")
full_response = ""
for _ in range(num_calls):
try:
# Generate content with streaming enabled
response = gemini_model.generate_content(full_prompt, stream=True)
for chunk in response:
if chunk.text:
full_response += chunk.text
yield full_response # Yield the accumulated response so far
except Exception as e:
yield f"An error occurred with the Gemini model: {str(e)}. Please try again."
if not full_response:
yield "No response generated from the Gemini model."
def get_response_from_excel(query, model, context, num_calls=3, temperature=0.2):
logging.info(f"Getting response from Excel using model: {model}")
messages = [
{"role": "system", "content": "You are a highly specialized Python programmer with deep expertise in data analysis and visualization using Excel spreadsheets. Your primary goal is to generate accurate and efficient Python code to perform calculations or create visualizations based on the user's requests. Strictly use the data provided to write code that identifies key metrics, trends, and significant details relevant to the query. Do not make assumptions or include any information that is not explicitly supported by the dataset. If the user requests a calculation, provide the appropriate Python code to execute it, and if a visualization is needed, generate code using the matplotlib library to create the chart."},
{"role": "user", "content": f"Based on the following data extracted from Excel spreadsheets:\n{context}\n\nPlease provide the Python code needed to execute the following task: '{query}'. Ensure that the code is derived directly from the dataset. If a chart is requested, use the matplotlib library to generate the appropriate visualization."}
]
if model.startswith("duckduckgo/"):
# Use DuckDuckGo chat with context
return get_response_from_duckduckgo(query, model, context, num_calls, temperature)
elif model == "@cf/meta/llama-3.1-8b-instruct":
# Use Cloudflare API
return get_response_from_cloudflare(prompt="", context=context, query=query, num_calls=num_calls, temperature=temperature, search_type="excel")
else:
# Use Hugging Face API
client = InferenceClient(model, token=huggingface_token)
response = ""
for i in range(num_calls):
logging.info(f"API call {i+1}/{num_calls}")
for message in client.chat_completion(
messages=messages,
max_tokens=20000,
temperature=temperature,
stream=True,
top_p=0.2,
):
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
chunk = message.choices[0].delta.content
response += chunk
yield response # Yield partial response
logging.info("Finished generating response for Excel data")
def truncate_context(context, max_chars=10000):
"""Truncate context to a maximum number of characters."""
if len(context) <= max_chars:
return context
return context[:max_chars] + "..."
def get_response_from_llama(query, model, selected_docs, file_type, num_calls=1, temperature=0.2):
logging.info(f"Getting response from Llama using model: {model}")
# Initialize the Hugging Face client
client = InferenceClient(model, token=huggingface_token)
if file_type == "excel":
# Excel functionality
system_instruction = """You are a highly specialized Python programmer with deep expertise in data analysis and visualization using Excel spreadsheets.
Your primary goal is to generate accurate and efficient Python code to perform calculations or create visualizations based on the user's requests.
Strictly use the data provided to write code that identifies key metrics, trends, and significant details relevant to the query.
Do not make assumptions or include any information that is not explicitly supported by the dataset.
If the user requests a calculation, provide the appropriate Python code to execute it, and if a visualization is needed, generate code using the matplotlib library to create the chart."""
# Get the context from selected Excel documents
embed = get_embeddings()
office_database = FAISS.load_local("office_faiss_database", embed, allow_dangerous_deserialization=True)
retriever = office_database.as_retriever(search_kwargs={"k": 20})
relevant_docs = retriever.get_relevant_documents(query)
context = "\n".join([doc.page_content for doc in relevant_docs if doc.metadata["source"] in selected_docs])
# Truncate context
context = truncate_context(context)
messages = [
{"role": "system", "content": system_instruction},
{"role": "user", "content": f"Based on the following data extracted from Excel spreadsheets:\n{context}\n\nPlease provide the Python code needed to execute the following task: '{query}'. Ensure that the code is derived directly from the dataset. If a chart is requested, use the matplotlib library to generate the appropriate visualization."}
]
elif file_type == "pdf":
# PDF functionality
embed = get_embeddings()
pdf_database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
retriever = pdf_database.as_retriever(search_kwargs={"k": 10})
relevant_docs = retriever.get_relevant_documents(query)
context_str = "\n".join([doc.page_content for doc in relevant_docs if doc.metadata["source"] in selected_docs])
# Truncate context
context_str = truncate_context(context_str)
system_instruction = """You are an AI assistant designed to provide detailed, step-by-step responses. Your outputs should follow this structure:
1. Begin with a <thinking> section. Everything in this section is invisible to the user.
2. Inside the thinking section:
a. Briefly analyze the question and outline your approach.
b. Present a clear plan of steps to solve the problem.
c. Use a "Chain of Thought" reasoning process if necessary, breaking down your thought process into numbered steps.
3. Include a <reflection> section for each idea where you:
a. Review your reasoning.
b. Check for potential errors or oversights.
c. Confirm or adjust your conclusion if necessary.
4. Be sure to close all reflection sections.
5. Close the thinking section with </thinking>.
6. Provide your final answer in an <output> section.
Always use these tags in your responses. Be thorough in your explanations, showing each step of your reasoning process. Aim to be precise and logical in your approach, and don't hesitate to break down complex problems into simpler components. Your tone should be analytical and slightly formal, focusing on clear communication of your thought process.
Remember: Both <thinking> and <reflection> MUST be tags and must be closed at their conclusion
Make sure all <tags> are on separate lines with no other text. Do not include other text on a line containing a tag."""
messages = [
{"role": "system", "content": system_instruction},
{"role": "user", "content": f"Using the following context from the PDF documents:\n{context_str}\n\nPlease generate a step-by-step reasoning before arriving at a comprehensive and accurate summary addressing the following question: '{query}'. Ensure your response is strictly based on the provided context, highlighting key metrics, trends, and significant details relevant to the query. Avoid any speculative or unverified information."}
]
else:
raise ValueError("Invalid file type. Use 'excel' or 'pdf'.")
full_response = ""
for _ in range(num_calls):
try:
# Generate content with streaming enabled
for response in client.chat_completion(
messages=messages, # Pass messages in the required format
max_tokens=3000, # Reduced to ensure we stay within token limits
temperature=temperature,
stream=True,
top_p=0.9,
):
# Check the structure of the response object
if isinstance(response, dict) and "choices" in response:
for choice in response["choices"]:
if "delta" in choice and "content" in choice["delta"]:
chunk = choice["delta"]["content"]
full_response += chunk
yield full_response # Yield the accumulated response so far
else:
logging.error("Unexpected response format or missing attributes in the response object.")
break
except Exception as e:
logging.error(f"Error during API call: {str(e)}")
yield f"An error occurred with the Llama model: {str(e)}. Please try again."
if not full_response:
logging.warning("No response generated from the Llama model")
yield "No response generated from the Llama model."
# Modify the existing respond function to handle both PDF and web search
def respond(message, history, model, temperature, num_calls, use_web_search, selected_docs):
logging.info(f"User Query: {message}")
logging.info(f"Model Used: {model}")
logging.info(f"Selected Documents: {selected_docs}")
logging.info(f"Use Web Search: {use_web_search}")
if use_web_search:
original_query = message
rephrased_query = rephrase_query(message, conversation_manager)
logging.info(f"Original query: {original_query}")
logging.info(f"Rephrased query: {rephrased_query}")
final_summary = ""
for _ in range(num_calls):
search_results = get_web_search_results(rephrased_query)
if not search_results:
final_summary += f"No search results found for the query: {rephrased_query}\n\n"
elif "error" in search_results[0]:
final_summary += search_results[0]["error"] + "\n\n"
else:
summary = summarize_web_results(rephrased_query, search_results, conversation_manager)
final_summary += summary + "\n\n"
if final_summary:
conversation_manager.add_interaction(original_query, final_summary)
yield final_summary
else:
yield "Unable to generate a response. Please try a different query."
else:
try:
embed = get_embeddings()
pdf_database = None
office_database = None
if os.path.exists("faiss_database"):
pdf_database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
if os.path.exists("office_faiss_database"):
office_database = FAISS.load_local("office_faiss_database", embed, allow_dangerous_deserialization=True)
if not pdf_database and not office_database:
yield "No documents available. Please upload documents to answer questions."
return
all_relevant_docs = []
if pdf_database:
pdf_retriever = pdf_database.as_retriever(search_kwargs={"k": 10})
all_relevant_docs.extend(pdf_retriever.get_relevant_documents(message))
if office_database:
office_retriever = office_database.as_retriever(search_kwargs={"k": 10})
all_relevant_docs.extend(office_retriever.get_relevant_documents(message))
relevant_docs = [doc for doc in all_relevant_docs if doc.metadata["source"] in selected_docs]
if not relevant_docs:
yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
return
# Separate Excel documents from others
excel_docs = [doc for doc in relevant_docs if doc.metadata["source"].lower().endswith(('.xlsx', '.xls'))]
other_docs = [doc for doc in relevant_docs if not doc.metadata["source"].lower().endswith(('.xlsx', '.xls'))]
excel_context = "\n".join([doc.page_content for doc in excel_docs])
other_context = "\n".join([doc.page_content for doc in other_docs])
logging.info(f"Excel context length: {len(excel_context)}")
logging.info(f"Other context length: {len(other_context)}")
# Process Excel documents
if excel_docs:
file_type = "excel"
if model == "gemini-1.5-flash":
for chunk in get_response_from_gemini(message, model, selected_docs, file_type, num_calls, temperature):
yield chunk
elif "llama" in model.lower():
for chunk in get_response_from_llama(message, model, selected_docs, file_type, num_calls, temperature):
yield chunk
else:
for response in get_response_from_excel(message, model, excel_context, num_calls, temperature):
yield response
# Process other documents (PDF, Word)
if other_docs:
file_type = "pdf"
if model == "gemini-1.5-flash":
for chunk in get_response_from_gemini(message, model, selected_docs, file_type, num_calls, temperature):
yield chunk
elif model == "@cf/meta/llama-3.1-8b-instruct":
for response in get_response_from_cloudflare(prompt="", context=other_context, query=message, num_calls=num_calls, temperature=temperature, search_type="document"):
yield response
elif "llama" in model.lower():
for chunk in get_response_from_llama(message, model, selected_docs, file_type, num_calls, temperature):
yield chunk
else:
for response in get_response_from_pdf(message, model, selected_docs, num_calls, temperature):
yield response
except Exception as e:
logging.error(f"Error with {model}: {str(e)}")
if "microsoft/Phi-3-mini-4k-instruct" in model:
logging.info("Falling back to Mistral model due to Phi-3 error")
fallback_model = "mistralai/Mistral-7B-Instruct-v0.3"
yield from respond(message, history, fallback_model, temperature, num_calls, use_web_search, selected_docs)
else:
yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model."
logging.basicConfig(level=logging.DEBUG)
def get_response_from_cloudflare(prompt, context, query, num_calls=3, temperature=0.2, search_type="pdf"):
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json"
}
model = "@cf/meta/llama-3.1-8b-instruct"
if search_type == "pdf":
instruction = f"""Using the following context from the PDF documents:
{context}
Write a detailed and complete response that answers the following user question: '{query}'"""
else: # web search
instruction = f"""Using the following context:
{context}
Write a detailed and complete research document that fulfills the following user request: '{query}'
After writing the document, please provide a list of sources used in your response."""
inputs = [
{"role": "system", "content": instruction},
{"role": "user", "content": query}
]
payload = {
"messages": inputs,
"stream": True,
"temperature": temperature,
"max_tokens": 32000
}
full_response = ""
for i in range(num_calls):
try:
with requests.post(f"{API_BASE_URL}{model}", headers=headers, json=payload, stream=True) as response:
if response.status_code == 200:
for line in response.iter_lines():
if line:
try:
json_response = json.loads(line.decode('utf-8').split('data: ')[1])
if 'response' in json_response:
chunk = json_response['response']
full_response += chunk
yield full_response
except (json.JSONDecodeError, IndexError) as e:
logging.error(f"Error parsing streaming response: {str(e)}")
continue
else:
logging.error(f"HTTP Error: {response.status_code}, Response: {response.text}")
yield f"I apologize, but I encountered an HTTP error: {response.status_code}. Please try again later."
except Exception as e:
logging.error(f"Error in generating response from Cloudflare: {str(e)}")
yield f"I apologize, but an error occurred: {str(e)}. Please try again later."
if not full_response:
yield "I apologize, but I couldn't generate a response at this time. Please try again later."
def create_web_search_vectors(search_results):
embed = get_embeddings()
documents = []
for result in search_results:
if 'body' in result:
content = f"{result['title']}\n{result['body']}\nSource: {result['href']}"
documents.append(Document(page_content=content, metadata={"source": result['href']}))
return FAISS.from_documents(documents, embed)
def get_response_from_pdf(query, model, selected_docs, num_calls=3, temperature=0.2):
logging.info(f"Entering get_response_from_pdf with query: {query}, model: {model}, selected_docs: {selected_docs}")
embed = get_embeddings()
if os.path.exists("faiss_database"):
logging.info("Loading FAISS database")
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
else:
logging.warning("No FAISS database found")
yield "No documents available. Please upload PDF documents to answer questions."
return
# Pre-filter the documents
filtered_docs = []
for doc_id, doc in database.docstore._dict.items():
if isinstance(doc, Document) and doc.metadata.get("source") in selected_docs:
filtered_docs.append(doc)
logging.info(f"Number of documents after pre-filtering: {len(filtered_docs)}")
if not filtered_docs:
logging.warning(f"No documents found for the selected sources: {selected_docs}")
yield "No relevant information found in the selected documents. Please try selecting different documents or rephrasing your query."
return
# Create a new FAISS index with only the selected documents
filtered_db = FAISS.from_documents(filtered_docs, embed)
retriever = filtered_db.as_retriever(search_kwargs={"k": 10})
logging.info(f"Retrieving relevant documents for query: {query}")
relevant_docs = retriever.get_relevant_documents(query)
logging.info(f"Number of relevant documents retrieved: {len(relevant_docs)}")
for doc in relevant_docs:
logging.info(f"Document source: {doc.metadata['source']}")
logging.info(f"Document content preview: {doc.page_content[:100]}...") # Log first 100 characters of each document
context_str = "\n".join([doc.page_content for doc in relevant_docs])
logging.info(f"Total context length: {len(context_str)}")
if model == "@cf/meta/llama-3.1-8b-instruct":
logging.info("Using Cloudflare API")
# Use Cloudflare API with the retrieved context
for response in get_response_from_cloudflare(prompt="", context=context_str, query=query, num_calls=num_calls, temperature=temperature, search_type="pdf"):
yield response
else:
logging.info("Using Hugging Face API")
# Use Hugging Face API
messages = [
{"role": "system", "content": """You are an AI assistant designed to provide detailed, step-by-step responses. Your outputs should follow this structure:
1. Begin with a <thinking> section. Everything in this section is invisible to the user.
2. Inside the thinking section:
a. Briefly analyze the question and outline your approach.
b. Present a clear plan of steps to solve the problem.
c. Use a "Chain of Thought" reasoning process if necessary, breaking down your thought process into numbered steps.
3. Include a <reflection> section for each idea where you:
a. Review your reasoning.
b. Check for potential errors or oversights.
c. Confirm or adjust your conclusion if necessary.
4. Be sure to close all reflection sections.
5. Close the thinking section with </thinking>.
6. Provide your final answer in an <output> section.
Always use these tags in your responses. Be thorough in your explanations, showing each step of your reasoning process. Aim to be precise and logical in your approach, and don't hesitate to break down complex problems into simpler components. Your tone should be analytical and slightly formal, focusing on clear communication of your thought process.
Remember: Both <thinking> and <reflection> MUST be tags and must be closed at their conclusion
Make sure all <tags> are on separate lines with no other text. Do not include other text on a line containing a tag."""},
{"role": "user", "content": f"Using the following context from the PDF documents:\n{context_str}\n\nPlease generate a step-by-step reasoning before arriving at a comprehensive and accurate summary addressing the following question: '{query}'. Ensure your response is strictly based on the provided context, highlighting key financial metrics, trends, and significant details relevant to the query. Avoid any speculative or unverified information."}
]
client = InferenceClient(model, token=huggingface_token)
response = ""
for i in range(num_calls):
logging.info(f"API call {i+1}/{num_calls}")
for message in client.chat_completion(
messages=messages,
max_tokens=20000,
temperature=temperature,
stream=True,
top_p=0.8,
):
if message.choices and message.choices[0].delta and message.choices[0].delta.content:
chunk = message.choices[0].delta.content
response += chunk
yield response # Yield partial response
logging.info("Finished generating response")
def vote(data: gr.LikeData):
if data.liked:
print(f"You upvoted this response: {data.value}")
else:
print(f"You downvoted this response: {data.value}")
css = """
/* Fine-tune chatbox size */
.chatbot-container {
height: 600px !important;
width: 100% !important;
}
.chatbot-container > div {
height: 100%;
width: 100%;
}
"""
uploaded_documents = []
def display_documents():
return gr.CheckboxGroup(
choices=[doc["name"] for doc in uploaded_documents],
value=[doc["name"] for doc in uploaded_documents if doc["selected"]],
label="Select documents to query or delete"
)
def initial_conversation():
return [
(None, "Welcome! I'm your AI assistant for web search and PDF analysis. Here's how you can use me:\n\n"
"1. Set the toggle for Web Search and PDF Search from the checkbox in Additional Inputs drop down window\n"
"2. Use web search to find information\n"
"3. Upload the documents and ask questions about uploaded PDF documents by selecting your respective document\n"
"4. For any queries feel free to reach out @[email protected] or discord - shreyas094\n\n"
"To get started, upload some PDFs or ask me a question!")
]
# Add this new function
def refresh_documents():
global uploaded_documents
uploaded_documents = load_documents()
return display_documents()
# Define the checkbox outside the demo block
document_selector = gr.CheckboxGroup(label="Select documents to query")
use_web_search = gr.Checkbox(label="Use Web Search", value=False)
custom_placeholder = "Ask a question (Note: You can toggle between Web Search and PDF Chat in Additional Inputs below)"
# Update the demo interface
# Update the Gradio interface
demo = gr.ChatInterface(
respond,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=True, render=False),
additional_inputs=[
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
gr.Checkbox(label="Use Web Search", value=True),
gr.CheckboxGroup(label="Select documents to query")
],
title="AI-powered PDF Chat and Web Search Assistant",
description="Chat with your PDFs or use web search to answer questions.",
theme=gr.Theme.from_hub("allenai/gradio-theme"),
css=css,
examples=[
["Tell me about the contents of the uploaded PDFs."],
["What are the main topics discussed in the documents?"],
["Can you summarize the key points from the PDFs?"],
["What's the latest news about artificial intelligence?"]
],
cache_examples=False,
analytics_enabled=False,
textbox=gr.Textbox(placeholder="Ask a question about the uploaded PDFs or any topic", container=False, scale=7),
chatbot = gr.Chatbot(
show_copy_button=True,
likeable=True,
layout="bubble",
height=400,
value=initial_conversation()
)
)
# Add file upload functionality
with demo:
gr.Markdown("## Upload and Manage PDF Documents")
with gr.Row():
file_input = gr.Files(label="Upload your documents", file_types=[".pdf", ".docx", ".xlsx", ".xls"])
parser_dropdown = gr.Dropdown(choices=["pypdf", "llamaparse", "office"], label="Select PDF Parser", value="llamaparse")
update_button = gr.Button("Upload Document")
refresh_button = gr.Button("Refresh Document List")
update_output = gr.Textbox(label="Update Status")
delete_button = gr.Button("Delete Selected Documents")
# Update both the output text and the document selector
update_button.click(
update_vectors,
inputs=[file_input, parser_dropdown],
outputs=[update_output, demo.additional_inputs[-1]] # Use the CheckboxGroup from additional_inputs
)
# Add the refresh button functionality
refresh_button.click(
refresh_documents,
inputs=[],
outputs=[demo.additional_inputs[-1]] # Use the CheckboxGroup from additional_inputs
)
# Add the delete button functionality
delete_button.click(
delete_documents,
inputs=[demo.additional_inputs[-1]], # Use the CheckboxGroup from additional_inputs
outputs=[update_output, demo.additional_inputs[-1]]
)
gr.Markdown(
"""
## How to use
1. Upload PDF documents using the file input at the top.
2. Select the PDF parser (pypdf or llamaparse) and click "Upload Document" to update the vector store.
3. Select the documents you want to query using the checkboxes.
4. Ask questions in the chat interface.
5. Toggle "Use Web Search" to switch between PDF chat and web search.
6. Adjust Temperature and Number of API Calls to fine-tune the response generation.
7. Use the provided examples or ask your own questions.
"""
)
if __name__ == "__main__":
demo.launch(share=True) |