code
stringlengths
141
79.4k
apis
sequencelengths
1
23
extract_api
stringlengths
126
73.2k
"""Load markdown, html, text from files, clean up, split, ingest into Pinecone.""" import pinecone import tiktoken from langchain.document_loaders import ReadTheDocsLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import NLTKTextSplitter from langchain.vectorstores.pinecone import Pinecone def ingest_docs(): """Get documents from web pages.""" loader = ReadTheDocsLoader("hasura.io/docs/latest/") raw_documents = loader.load() text_splitter = NLTKTextSplitter.from_tiktoken_encoder( chunk_size=800, chunk_overlap=400, ) documents = text_splitter.split_documents(raw_documents) embeddings = OpenAIEmbeddings() pinecone.init( api_key="YOUR_API_KEY", # find at app.pinecone.io environment="YOUR_ENV", # next to api key in console ) Pinecone.from_documents(documents, embeddings) if __name__ == "__main__": ingest_docs()
[ "langchain.document_loaders.ReadTheDocsLoader", "langchain.embeddings.OpenAIEmbeddings", "langchain.vectorstores.pinecone.Pinecone.from_documents", "langchain.text_splitter.NLTKTextSplitter.from_tiktoken_encoder" ]
[((402, 445), 'langchain.document_loaders.ReadTheDocsLoader', 'ReadTheDocsLoader', (['"""hasura.io/docs/latest/"""'], {}), "('hasura.io/docs/latest/')\n", (419, 445), False, 'from langchain.document_loaders import ReadTheDocsLoader\n'), ((500, 573), 'langchain.text_splitter.NLTKTextSplitter.from_tiktoken_encoder', 'NLTKTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': '(800)', 'chunk_overlap': '(400)'}), '(chunk_size=800, chunk_overlap=400)\n', (538, 573), False, 'from langchain.text_splitter import NLTKTextSplitter\n'), ((675, 693), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (691, 693), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((698, 759), 'pinecone.init', 'pinecone.init', ([], {'api_key': '"""YOUR_API_KEY"""', 'environment': '"""YOUR_ENV"""'}), "(api_key='YOUR_API_KEY', environment='YOUR_ENV')\n", (711, 759), False, 'import pinecone\n'), ((844, 890), 'langchain.vectorstores.pinecone.Pinecone.from_documents', 'Pinecone.from_documents', (['documents', 'embeddings'], {}), '(documents, embeddings)\n', (867, 890), False, 'from langchain.vectorstores.pinecone import Pinecone\n')]
"""Module for loading index.""" import logging from typing import TYPE_CHECKING, Any, Optional from llama_index import ServiceContext, StorageContext, load_index_from_storage from llama_index.indices.base import BaseIndex from ols.app.models.config import ReferenceContent # This is to avoid importing HuggingFaceBgeEmbeddings in all cases, because in # runtime it is used only under some conditions. OTOH we need to make Python # interpreter happy in all circumstances, hence the definiton of Any symbol. if TYPE_CHECKING: from langchain_community.embeddings import HuggingFaceBgeEmbeddings # TCH004 else: HuggingFaceBgeEmbeddings = Any logger = logging.getLogger(__name__) class IndexLoader: """Load index from local file storage.""" def __init__(self, index_config: Optional[ReferenceContent]) -> None: """Initialize loader.""" self._index: Optional[BaseIndex] = None self._index_config = index_config logger.debug(f"Config used for index load: {self._index_config}") if self._index_config is None: logger.warning("Config for reference content is not set.") else: self._index_path = self._index_config.product_docs_index_path self._index_id = self._index_config.product_docs_index_id self._embed_model_path = self._index_config.embeddings_model_path self._embed_model = self._get_embed_model() self._load_index() def _get_embed_model(self) -> Optional[str | HuggingFaceBgeEmbeddings]: """Get embed model according to configuration.""" if self._embed_model_path is not None: from langchain_community.embeddings import HuggingFaceBgeEmbeddings logger.debug( f"Loading embedding model info from path {self._embed_model_path}" ) return HuggingFaceBgeEmbeddings(model_name=self._embed_model_path) logger.warning("Embedding model path is not set.") logger.warning("Embedding model is set to default") return "local:BAAI/bge-base-en" def _set_context(self) -> None: """Set storage/service context required for index load.""" logger.debug(f"Using {self._embed_model!s} as embedding model for index.") logger.info("Setting up service context for index load...") self._service_context = ServiceContext.from_defaults( embed_model=self._embed_model, llm=None ) logger.info("Setting up storage context for index load...") self._storage_context = StorageContext.from_defaults( persist_dir=self._index_path ) def _load_index(self) -> None: """Load vector index.""" if self._index_path is None: logger.warning("Index path is not set.") else: try: self._set_context() logger.info("Loading vector index...") self._index = load_index_from_storage( service_context=self._service_context, storage_context=self._storage_context, index_id=self._index_id, ) logger.info("Vector index is loaded.") except Exception as err: logger.exception(f"Error loading vector index:\n{err}") @property def vector_index(self) -> Optional[BaseIndex]: """Get index.""" if self._index is None: logger.warning( "Proceeding without RAG content. " "Either there is an error or required parameters are not set." ) return self._index
[ "langchain_community.embeddings.HuggingFaceBgeEmbeddings" ]
[((661, 688), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (678, 688), False, 'import logging\n'), ((2376, 2445), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self._embed_model', 'llm': 'None'}), '(embed_model=self._embed_model, llm=None)\n', (2404, 2445), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((2568, 2626), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'self._index_path'}), '(persist_dir=self._index_path)\n', (2596, 2626), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n'), ((1869, 1928), 'langchain_community.embeddings.HuggingFaceBgeEmbeddings', 'HuggingFaceBgeEmbeddings', ([], {'model_name': 'self._embed_model_path'}), '(model_name=self._embed_model_path)\n', (1893, 1928), False, 'from langchain_community.embeddings import HuggingFaceBgeEmbeddings\n'), ((2960, 3090), 'llama_index.load_index_from_storage', 'load_index_from_storage', ([], {'service_context': 'self._service_context', 'storage_context': 'self._storage_context', 'index_id': 'self._index_id'}), '(service_context=self._service_context,\n storage_context=self._storage_context, index_id=self._index_id)\n', (2983, 3090), False, 'from llama_index import ServiceContext, StorageContext, load_index_from_storage\n')]
"""The function tools tht are actually implemented""" import json import subprocess from langchain.agents.load_tools import load_tools from langchain.tools import BaseTool from langchain.utilities.bash import BashProcess from toolemu.tools.tool_interface import ( ArgException, ArgParameter, ArgReturn, FunctionTool, FunctionToolkit, ) from toolemu.utils.my_typing import * from .register import register_toolkit __ALL__ = ["RealTerminal", "RealPythonInterpreter", "RealWikipedia", "RealHuman"] class MyBashProcess(BashProcess): def _run(self, command: str) -> Tuple[str, int]: """ Runs a command in a subprocess and returns the output. Args: command: The command to run """ # noqa: E501 try: output = ( subprocess.run( command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ) .stdout.decode() .strip() ) except subprocess.CalledProcessError as error: if self.return_err_output: return error.stdout.decode().strip(), error.returncode return str(error).strip(), error.returncode if self.strip_newlines: output = output.strip() return output, 0 #################### Terminal Interpreter #################### class RealTerminalExecute(FunctionTool): name = "TerminalExecute" summary = "Execute a terminal command and return the output. This command should follow proper syntax and be supported by the terminal environment." parameters: List[ArgParameter] = [ { "name": "command", "type": "string", "description": "The command to execute in the terminal.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "output", "type": "string", "description": "The output generated by the executed terminal command, including both standard output and standard error streams.", }, { "name": "exit_code", "type": "integer", "description": "The exit code returned by the executed command. A zero value indicates successful execution, while non-zero values indicate errors or exceptions.", }, ] exceptions: List[ArgException] = [ { "name": "InvalidRequestException", "description": "The 'command' parameter contains an invalid or malformed command, which results in a failed execution attempt.", } ] _tool: BaseTool = MyBashProcess(return_err_output=True) def parse_return(self, tool_output: Dict[str, Any]) -> str: return json.dumps({"output": tool_output[0], "exit_code": tool_output[1]}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._run(tool_input["command"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._arun(tool_input["command"]) @register_toolkit() class RealTerminal(FunctionToolkit): name_for_human = "Terminal command executor" description_for_human = "Executes commands in a terminal." name_for_model = "Terminal" description_for_model = "Executes commands in a terminal on the user's local system. Use it to run valid terminal commands for tasks such as file management, system control, and more" tool_classes = [RealTerminalExecute] #################### Python Interpreter #################### class RealPythonInterpreterExecute(FunctionTool): name = "PythonInterpreterExecute" summary = "Execute a Python script." parameters: List[ArgParameter] = [ { "name": "script", "type": "string", "description": "The python script to execute.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "result", "type": "string", "description": "The printed output of the script.", } ] exceptions: List[ArgException] = [] _tool: BaseTool = load_tools(["python_repl"])[0] def parse_return(self, tool_output: str) -> str: return json.dumps({"result": tool_output}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._run(tool_input["script"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._arun(tool_input["script"]) @register_toolkit() class RealPythonInterpreter(FunctionToolkit): name_for_human = "Python interpreter" description_for_human = "A Python shell." name_for_model = "PythonInterpreter" description_for_model = "A Python shell. Use it to execute python scripts. If you want to see the output of a value, you should print it out with `print(...)`." tool_classes = [RealPythonInterpreterExecute] #################### Wikipedia #################### class RealWikipediaSearch(FunctionTool): name = "WikipediaSearch" summary = "Query the Wikipedia tool for a given query." parameters: List[ArgParameter] = [ { "name": "query", "type": "string", "description": "The query to search for.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "result", "type": "string", "description": "The summary of the Wikipedia article.", } ] exceptions: List[ArgException] = [] _tool: BaseTool = load_tools(["wikipedia"])[0] def parse_return(self, tool_output: str) -> str: return json.dumps({"result": tool_output}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._run(tool_input["query"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return self._tool._arun(tool_input["query"]) @register_toolkit() class RealWikipedia(FunctionToolkit): name_for_human = "Wikipedia search tool" description_for_human = "Tool for searching through Wikipedia." name_for_model = "Wikipedia" description_for_model = "Tool for searching through Wikipedia. Use it whenever you need to provide accurate responses for general questions about people, places, companies, historical events, or other subjects." tool_classes = [RealWikipediaSearch] #################### Human #################### class RealHumanAssistanceQuery(FunctionTool): name = "HumanAssistanceQuery" summary = "Ask the human a specific question" parameters: List[ArgParameter] = [ { "name": "question", "type": "string", "description": "The question to ask.", "required": True, } ] returns: List[ArgReturn] = [ { "name": "answer", "type": "string", "description": "The answer from the human.", } ] exceptions: List[ArgException] = [] def parse_return(self, tool_output: str) -> str: return json.dumps({"answer": tool_output}) def _runtool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: print("\n" + tool_input["question"] + "\n") return input(tool_input["question"]) def _aruntool(self, tool_input: Dict[str, Any]) -> Dict[str, Any]: return NotImplementedError("Human tool does not support async") @register_toolkit() class RealHuman(FunctionToolkit): name_for_human = "Human assistance" description_for_human = "Seek human assistance or guidance." name_for_model = "HumanAssistance" description_for_model = "Seek human assistance or guidance. Use it when expert human or user input is necessary, e.g., when you need some human knowledge, user permission, user-specific information." tool_classes = [RealHumanAssistanceQuery]
[ "langchain.agents.load_tools.load_tools" ]
[((2863, 2930), 'json.dumps', 'json.dumps', (["{'output': tool_output[0], 'exit_code': tool_output[1]}"], {}), "({'output': tool_output[0], 'exit_code': tool_output[1]})\n", (2873, 2930), False, 'import json\n'), ((4269, 4296), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['python_repl']"], {}), "(['python_repl'])\n", (4279, 4296), False, 'from langchain.agents.load_tools import load_tools\n'), ((4369, 4404), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (4379, 4404), False, 'import json\n'), ((5711, 5736), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['wikipedia']"], {}), "(['wikipedia'])\n", (5721, 5736), False, 'from langchain.agents.load_tools import load_tools\n'), ((5809, 5844), 'json.dumps', 'json.dumps', (["{'result': tool_output}"], {}), "({'result': tool_output})\n", (5819, 5844), False, 'import json\n'), ((7232, 7267), 'json.dumps', 'json.dumps', (["{'answer': tool_output}"], {}), "({'answer': tool_output})\n", (7242, 7267), False, 'import json\n'), ((824, 925), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(command, shell=True, check=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n', (838, 925), False, 'import subprocess\n')]
from typing import List import re from langchain.docstore.document import Document from langchain_community.document_loaders import WebBaseLoader from .DocumentLoadingBase import DocumentLoadingBase from ..common.SourceDocument import SourceDocument class WebDocumentLoading(DocumentLoadingBase): def __init__(self) -> None: super().__init__() def load(self, document_url: str) -> List[SourceDocument]: documents: List[Document] = WebBaseLoader(document_url).load() for document in documents: document.page_content = re.sub("\n{3,}", "\n\n", document.page_content) # Remove half non-ascii character from start/end of doc content pattern = re.compile( r"[\x00-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]" ) document.page_content = re.sub(pattern, "", document.page_content) if document.page_content == "": documents.remove(document) source_documents: List[SourceDocument] = [ SourceDocument( content=document.page_content, source=document.metadata["source"], ) for document in documents ] return source_documents
[ "langchain_community.document_loaders.WebBaseLoader" ]
[((564, 611), 're.sub', 're.sub', (['"""\n{3,}"""', '"""\n\n"""', 'document.page_content'], {}), "('\\n{3,}', '\\n\\n', document.page_content)\n", (570, 611), False, 'import re\n'), ((710, 787), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]"""'], {}), "('[\\\\x00-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]')\n", (720, 787), False, 'import re\n'), ((846, 888), 're.sub', 're.sub', (['pattern', '""""""', 'document.page_content'], {}), "(pattern, '', document.page_content)\n", (852, 888), False, 'import re\n'), ((458, 485), 'langchain_community.document_loaders.WebBaseLoader', 'WebBaseLoader', (['document_url'], {}), '(document_url)\n', (471, 485), False, 'from langchain_community.document_loaders import WebBaseLoader\n')]
from typing import List, Optional, Any, Dict from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env from pydantic import Extra, root_validator from sam.gpt.quora import PoeClient, PoeResponse # token = "KaEMfvDPEXoS115jzAFRRg%3D%3D" # prompt = "write a java function that prints the nth fibonacci number. provide example usage" # streaming_response = False # render_markdown = True # chat_mode = False class Poe(LLM): client: PoeClient model: Optional[str] = "gpt-3.5-turbo" custom_model: bool = False token: str @root_validator() def validate_environment(cls, values: Dict) -> Dict: token = get_from_dict_or_env( values, "token", "POE_COOKIE" ) values["client"] = PoeClient(token) return values class Config: extra = Extra.forbid @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" models = { 'sage': 'capybara', 'gpt-4': 'beaver', 'claude-v1.2': 'a2_2', 'claude-instant-v1.0': 'a2', 'gpt-3.5-turbo': 'chinchilla', } _model = models[self.model] if not self.custom_model else self.model return { "model": _model, "token": self.token, } @property def _identifying_params(self) -> Dict[str, Any]: return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: return "poe" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: params = self._default_params for chunk in self.client.send_message(params.model, prompt): pass response = PoeResponse( { 'id': chunk['messageId'], 'object': 'text_completion', 'created': chunk['creationTime'], 'model': params.model, 'choices': [ { 'text': chunk['text'], 'index': 0, 'logprobs': None, 'finish_reason': 'stop', } ], 'usage': { 'prompt_tokens': len(prompt), 'completion_tokens': len(chunk['text']), 'total_tokens': len(prompt) + len(chunk['text']), }, } ) text = response.completion.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 714), False, 'from langchain.utils import get_from_dict_or_env\n'), ((765, 781), 'sam.gpt.quora.PoeClient', 'PoeClient', (['token'], {}), '(token)\n', (774, 781), False, 'from sam.gpt.quora import PoeClient, PoeResponse\n')]
from __future__ import annotations from typing import List, Optional from pydantic import ValidationError from langchain.chains.llm import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.experimental.autonomous_agents.autogpt.output_parser import ( AutoGPTOutputParser, BaseAutoGPTOutputParser, ) from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt from langchain.experimental.autonomous_agents.autogpt.prompt_generator import ( FINISH_NAME, ) from langchain.schema import ( AIMessage, BaseMessage, Document, HumanMessage, SystemMessage, ) from langchain.tools.base import BaseTool from langchain.tools.human.tool import HumanInputRun from langchain.vectorstores.base import VectorStoreRetriever class AutoGPT: """Agent class for interacting with Auto-GPT.""" def __init__( self, ai_name: str, memory: VectorStoreRetriever, chain: LLMChain, output_parser: BaseAutoGPTOutputParser, tools: List[BaseTool], feedback_tool: Optional[HumanInputRun] = None, ): self.ai_name = ai_name self.memory = memory self.full_message_history: List[BaseMessage] = [] self.next_action_count = 0 self.chain = chain self.output_parser = output_parser self.tools = tools self.feedback_tool = feedback_tool @classmethod def from_llm_and_tools( cls, ai_name: str, ai_role: str, memory: VectorStoreRetriever, tools: List[BaseTool], llm: BaseChatModel, human_in_the_loop: bool = False, output_parser: Optional[BaseAutoGPTOutputParser] = None, ) -> AutoGPT: prompt = AutoGPTPrompt( ai_name=ai_name, ai_role=ai_role, tools=tools, input_variables=["memory", "messages", "goals", "user_input"], token_counter=llm.get_num_tokens, ) human_feedback_tool = HumanInputRun() if human_in_the_loop else None chain = LLMChain(llm=llm, prompt=prompt) return cls( ai_name, memory, chain, output_parser or AutoGPTOutputParser(), tools, feedback_tool=human_feedback_tool, ) def run(self, goals: List[str]) -> str: user_input = ( "Determine which next command to use, " "and respond using the format specified above:" ) # Interaction Loop loop_count = 0 while True: # Discontinue if continuous limit is reached loop_count += 1 # Send message to AI, get response assistant_reply = self.chain.run( goals=goals, messages=self.full_message_history, memory=self.memory, user_input=user_input, ) # Print Assistant thoughts print(assistant_reply) self.full_message_history.append(HumanMessage(content=user_input)) self.full_message_history.append(AIMessage(content=assistant_reply)) # Get command name and arguments action = self.output_parser.parse(assistant_reply) tools = {t.name: t for t in self.tools} if action.name == FINISH_NAME: return action.args["response"] if action.name in tools: tool = tools[action.name] try: observation = tool.run(action.args) except ValidationError as e: observation = ( f"Validation Error in args: {str(e)}, args: {action.args}" ) except Exception as e: observation = ( f"Error: {str(e)}, {type(e).__name__}, args: {action.args}" ) result = f"Command {tool.name} returned: {observation}" elif action.name == "ERROR": result = f"Error: {action.args}. " else: result = ( f"Unknown command '{action.name}'. " f"Please refer to the 'COMMANDS' list for available " f"commands and only respond in the specified JSON format." ) memory_to_add = ( f"Assistant Reply: {assistant_reply} " f"\nResult: {result} " ) if self.feedback_tool is not None: feedback = f"\n{self.feedback_tool.run('Input: ')}" if feedback in {"q", "stop"}: print("EXITING") return "EXITING" memory_to_add += feedback self.memory.add_documents([Document(page_content=memory_to_add)]) self.full_message_history.append(SystemMessage(content=result))
[ "langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser", "langchain.tools.human.tool.HumanInputRun", "langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt", "langchain.schema.HumanMessage", "langchain.chains.llm.LLMChain", "langchain.schema.AIMessage", "langchain.schema.Document", "langchain.schema.SystemMessage" ]
[((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, tools=tools,\n input_variables=['memory', 'messages', 'goals', 'user_input'],\n token_counter=llm.get_num_tokens)\n", (1766, 1918), False, 'from langchain.experimental.autonomous_agents.autogpt.prompt import AutoGPTPrompt\n'), ((2075, 2107), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2083, 2107), False, 'from langchain.chains.llm import LLMChain\n'), ((2012, 2027), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2025, 2027), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2217, 2238), 'langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser', 'AutoGPTOutputParser', ([], {}), '()\n', (2236, 2238), False, 'from langchain.experimental.autonomous_agents.autogpt.output_parser import AutoGPTOutputParser, BaseAutoGPTOutputParser\n'), ((3045, 3077), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'user_input'}), '(content=user_input)\n', (3057, 3077), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((3124, 3158), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'assistant_reply'}), '(content=assistant_reply)\n', (3133, 3158), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4895, 4924), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'result'}), '(content=result)\n', (4908, 4924), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n'), ((4811, 4847), 'langchain.schema.Document', 'Document', ([], {'page_content': 'memory_to_add'}), '(page_content=memory_to_add)\n', (4819, 4847), False, 'from langchain.schema import AIMessage, BaseMessage, Document, HumanMessage, SystemMessage\n')]
"""Main entrypoint for the app.""" import asyncio import os from operator import itemgetter from typing import List, Optional, Sequence, Tuple, Union from uuid import UUID from fastapi import Depends, FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from langchain.callbacks.manager import CallbackManagerForRetrieverRun from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI from langchain.document_loaders import AsyncHtmlLoader from langchain.document_transformers import Html2TextTransformer from langchain.embeddings import OpenAIEmbeddings from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate from langchain.retrievers import ( ContextualCompressionRetriever, TavilySearchAPIRetriever, ) from langchain.retrievers.document_compressors import ( DocumentCompressorPipeline, EmbeddingsFilter, ) from langchain.retrievers.kay import KayAiRetriever from langchain.retrievers.you import YouRetriever from langchain.schema import Document from langchain.schema.document import Document from langchain.schema.language_model import BaseLanguageModel from langchain.schema.messages import AIMessage, HumanMessage from langchain.schema.output_parser import StrOutputParser from langchain.schema.retriever import BaseRetriever from langchain.schema.runnable import ( ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap, ) from langchain.text_splitter import RecursiveCharacterTextSplitter # Backup from langchain.utilities import GoogleSearchAPIWrapper from langserve import add_routes from pydantic import BaseModel, Field EN_PROMPT = False if EN_PROMPT: RESPONSE_TEMPLATE = """\ You are an expert researcher and writer, tasked with answering any question. Generate a comprehensive and informative, yet concise answer of 250 words or less for the \ given question based solely on the provided search results (URL and content). You must \ only use information from the provided search results. Use an unbiased and \ journalistic tone. Combine search results together into a coherent answer. Do not \ repeat text. Cite search results using [${{number}}] notation. Only cite the most \ relevant results that answer the question accurately. Place these citations at the end \ of the sentence or paragraph that reference them - do not put them all at the end. If \ different results refer to different entities within the same name, write separate \ answers for each entity. If you want to cite multiple results for the same sentence, \ format it as `[${{number1}}] [${{number2}}]`. However, you should NEVER do this with the \ same number - if you want to cite `number1` multiple times for a sentence, only do \ `[${{number1}}]` not `[${{number1}}] [${{number1}}]` You should use bullet points in your answer for readability. Put citations where they apply \ rather than putting them all at the end. If there is nothing in the context relevant to the question at hand, just say "Hmm, \ I'm not sure." Don't try to make up an answer. Anything between the following `context` html blocks is retrieved from a knowledge \ bank, not part of the conversation with the user. <context> {context} <context/> REMEMBER: If there is no relevant information within the context, just say "Hmm, I'm \ not sure." Don't try to make up an answer. Anything between the preceding 'context' \ html blocks is retrieved from a knowledge bank, not part of the conversation with the \ user.\ """ REPHRASE_TEMPLATE = """\ Given the following conversation and a follow up question, rephrase the follow up \ question to be a standalone question. Chat History: {chat_history} Follow Up Input: {question} Standalone Question:""" else: RESPONSE_TEMPLATE = """\ 您是一位专业的研究员和作家,负责回答任何问题。 基于提供的搜索结果(URL 和内容),为给定的问题生成一个全面而且信息丰富、但简洁的答案,长度不超过 250 字。您必须只使用来自提供的搜索结果的信息。使用公正和新闻性的语气。将搜索结果合并成一个连贯的答案。不要重复文本。一定要使用 [${{number}}] 标记引用搜索结果,其中 number 代表搜索到的文档的 id 号,用 <doc id=\'x\'> 表示。只引用最相关的结果,以准确回答问题。将这些引用放在提到它们的句子或段落的末尾 - 不要全部放在末尾。如果不同的结果涉及同名实体的不同部分,请为每个实体编写单独的答案。如果要在同一句子中引用多个结果,请将其格式化为 [${{number1}}] [${{number2}}]。然而,您绝对不应该对相同的数字进行这样的操作 - 如果要在一句话中多次引用 number1,只需使用 [${{number1}}],而不是 [${{number1}}] [${{number1}}]。 为了使您的答案更易读,您应该在答案中使用项目符号。在适用的地方放置引用,而不是全部放在末尾。 如果上下文中没有与当前问题相关的信息,只需说“嗯,我不确定。”不要试图编造答案。 位于以下context HTML 块之间的任何内容都是从知识库中检索到的,而不是与用户的对话的一部分。 <context> {context} <context/> 请记住:一定要在回答的时候带上检索的内容来源标号。如果上下文中没有与问题相关的信息,只需说“嗯,我不确定。”不要试图编造答案。位于上述 'context' HTML 块之前的任何内容都是从知识库中检索到的,而不是与用户的对话的一部分。再次记住一定要在回答的时候带上检索的内容来源标号,比如回答的某句话的信息来源于第 <doc id=\'x\'> 的搜索结果,就在该句话的末尾使用 [${{x}}] 来进行标记。 """ REPHRASE_TEMPLATE = """\ 考虑到以下对话和一个后续问题,请将后续问题重新表达为独立的问题。 聊天记录: {chat_history} 后续输入:{question} 独立问题:""" app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], expose_headers=["*"], ) class ChatRequest(BaseModel): question: str chat_history: List[Tuple[str, str]] = Field( ..., extra={"widget": {"type": "chat", "input": "question", "output": "answer"}}, ) class GoogleCustomSearchRetriever(BaseRetriever): search: Optional[GoogleSearchAPIWrapper] = None num_search_results = 3 def clean_search_query(self, query: str) -> str: # Some search tools (e.g., Google) will # fail to return results if query has a # leading digit: 1. "LangCh..." # Check if the first character is a digit if query[0].isdigit(): # Find the position of the first quote first_quote_pos = query.find('"') if first_quote_pos != -1: # Extract the part of the string after the quote query = query[first_quote_pos + 1 :] # Remove the trailing quote if present if query.endswith('"'): query = query[:-1] return query.strip() def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]: """Returns num_search_results pages per Google search.""" query_clean = self.clean_search_query(query) result = self.search.results(query_clean, num_search_results) return result def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ): if os.environ.get("GOOGLE_API_KEY", None) == None: raise Exception("No Google API key provided") if self.search == None: self.search = GoogleSearchAPIWrapper() # Get search questions print("Generating questions for Google Search ...") # Get urls print("Searching for relevant urls...") urls_to_look = [] search_results = self.search_tool(query, self.num_search_results) print("Searching for relevant urls...") print(f"Search results: {search_results}") for res in search_results: if res.get("link", None): urls_to_look.append(res["link"]) print(search_results) loader = AsyncHtmlLoader(urls_to_look) html2text = Html2TextTransformer() print("Indexing new urls...") docs = loader.load() docs = list(html2text.transform_documents(docs)) for i in range(len(docs)): if search_results[i].get("title", None): docs[i].metadata["title"] = search_results[i]["title"] return docs def get_retriever(): embeddings = OpenAIEmbeddings() splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=20) relevance_filter = EmbeddingsFilter(embeddings=embeddings, similarity_threshold=0.8) pipeline_compressor = DocumentCompressorPipeline( transformers=[splitter, relevance_filter] ) base_tavily_retriever = TavilySearchAPIRetriever( k=3, include_raw_content=True, include_images=True, ) tavily_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_tavily_retriever ) base_google_retriever = GoogleCustomSearchRetriever() google_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_google_retriever ) base_you_retriever = YouRetriever( ydc_api_key=os.environ.get("YDC_API_KEY", "not_provided") ) you_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_you_retriever ) base_kay_retriever = KayAiRetriever.create( dataset_id="company", data_types=["10-K", "10-Q"], num_contexts=6, ) kay_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_kay_retriever ) base_kay_press_release_retriever = KayAiRetriever.create( dataset_id="company", data_types=["PressRelease"], num_contexts=6, ) kay_press_release_retriever = ContextualCompressionRetriever( base_compressor=pipeline_compressor, base_retriever=base_kay_press_release_retriever, ) return tavily_retriever.configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="retriever"), default_key="tavily", google=google_retriever, you=you_retriever, kay=kay_retriever, kay_press_release=kay_press_release_retriever, ).with_config(run_name="FinalSourceRetriever") def create_retriever_chain( llm: BaseLanguageModel, retriever: BaseRetriever ) -> Runnable: CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(REPHRASE_TEMPLATE) condense_question_chain = ( CONDENSE_QUESTION_PROMPT | llm | StrOutputParser() ).with_config( run_name="CondenseQuestion", ) conversation_chain = condense_question_chain | retriever return RunnableBranch( ( RunnableLambda(lambda x: bool(x.get("chat_history"))).with_config( run_name="HasChatHistoryCheck" ), conversation_chain.with_config(run_name="RetrievalChainWithHistory"), ), ( RunnableLambda(itemgetter("question")).with_config( run_name="Itemgetter:question" ) | retriever ).with_config(run_name="RetrievalChainWithNoHistory"), ).with_config(run_name="RouteDependingOnChatHistory") def serialize_history(request: ChatRequest): chat_history = request.get("chat_history", []) converted_chat_history = [] for message in chat_history: if message[0] == "human": converted_chat_history.append(HumanMessage(content=message[1])) elif message[0] == "ai": converted_chat_history.append(AIMessage(content=message[1])) return converted_chat_history def format_docs(docs: Sequence[Document]) -> str: formatted_docs = [] for i, doc in enumerate(docs): doc_string = f"<doc id='{i}'>{doc.page_content}</doc>" formatted_docs.append(doc_string) return "\n".join(formatted_docs) def create_chain( llm: BaseLanguageModel, retriever: BaseRetriever, ) -> Runnable: retriever_chain = create_retriever_chain(llm, retriever) | RunnableLambda( format_docs ).with_config(run_name="FormatDocumentChunks") _context = RunnableMap( { "context": retriever_chain.with_config(run_name="RetrievalChain"), "question": RunnableLambda(itemgetter("question")).with_config( run_name="Itemgetter:question" ), "chat_history": RunnableLambda(itemgetter("chat_history")).with_config( run_name="Itemgetter:chat_history" ), } ) prompt = ChatPromptTemplate.from_messages( [ ("system", RESPONSE_TEMPLATE), MessagesPlaceholder(variable_name="chat_history"), ("human", "{question}"), ] ) response_synthesizer = (prompt | llm | StrOutputParser()).with_config( run_name="GenerateResponse", ) return ( { "question": RunnableLambda(itemgetter("question")).with_config( run_name="Itemgetter:question" ), "chat_history": RunnableLambda(serialize_history).with_config( run_name="SerializeHistory" ), } | _context | response_synthesizer ) dir_path = os.path.dirname(os.path.realpath(__file__)) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = ( dir_path + "/" + ".google_vertex_ai_credentials.json" ) has_google_creds = os.path.isfile(os.environ["GOOGLE_APPLICATION_CREDENTIALS"]) openai_api_base = "http://127.0.0.1:8000/v1" llm = ChatOpenAI( model="gpt-3.5-turbo-16k", # model="gpt-4", streaming=True, temperature=0.1, ).configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="llm"), default_key="openai", chatglm=ChatOpenAI(model="chatglm3-6b", openai_api_base=openai_api_base) ) if has_google_creds: llm = ChatOpenAI( model="gpt-3.5-turbo-16k", # model="gpt-4", streaming=True, temperature=0.1, ).configurable_alternatives( # This gives this field an id # When configuring the end runnable, we can then use this id to configure this field ConfigurableField(id="llm"), default_key="openai", ) retriever = get_retriever() chain = create_chain(llm, retriever) add_routes( app, chain, path="/chat", input_type=ChatRequest, config_keys=["configurable"] ) if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8080)
[ "langchain.document_transformers.Html2TextTransformer", "langchain.utilities.GoogleSearchAPIWrapper", "langchain.schema.runnable.ConfigurableField", "langchain.schema.messages.AIMessage", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.schema.output_parser.StrOutputParser", "langchain.schema.messages.HumanMessage", "langchain.retrievers.ContextualCompressionRetriever", "langchain.retrievers.kay.KayAiRetriever.create", "langchain.retrievers.TavilySearchAPIRetriever", "langchain.retrievers.document_compressors.DocumentCompressorPipeline", "langchain.document_loaders.AsyncHtmlLoader", "langchain.chat_models.ChatOpenAI", "langchain.prompts.MessagesPlaceholder", "langchain.retrievers.document_compressors.EmbeddingsFilter", "langchain.prompts.PromptTemplate.from_template", "langchain.embeddings.OpenAIEmbeddings", "langchain.schema.runnable.RunnableLambda" ]
[((4923, 4932), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (4930, 4932), False, 'from fastapi import Depends, FastAPI, Request\n'), ((12927, 12987), 'os.path.isfile', 'os.path.isfile', (["os.environ['GOOGLE_APPLICATION_CREDENTIALS']"], {}), "(os.environ['GOOGLE_APPLICATION_CREDENTIALS'])\n", (12941, 12987), False, 'import os\n'), ((13894, 13989), 'langserve.add_routes', 'add_routes', (['app', 'chain'], {'path': '"""/chat"""', 'input_type': 'ChatRequest', 'config_keys': "['configurable']"}), "(app, chain, path='/chat', input_type=ChatRequest, config_keys=[\n 'configurable'])\n", (13904, 13989), False, 'from langserve import add_routes\n'), ((5196, 5287), 'pydantic.Field', 'Field', (['...'], {'extra': "{'widget': {'type': 'chat', 'input': 'question', 'output': 'answer'}}"}), "(..., extra={'widget': {'type': 'chat', 'input': 'question', 'output':\n 'answer'}})\n", (5201, 5287), False, 'from pydantic import BaseModel, Field\n'), ((7679, 7697), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (7695, 7697), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((7713, 7777), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(800)', 'chunk_overlap': '(20)'}), '(chunk_size=800, chunk_overlap=20)\n', (7743, 7777), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((7801, 7866), 'langchain.retrievers.document_compressors.EmbeddingsFilter', 'EmbeddingsFilter', ([], {'embeddings': 'embeddings', 'similarity_threshold': '(0.8)'}), '(embeddings=embeddings, similarity_threshold=0.8)\n', (7817, 7866), False, 'from langchain.retrievers.document_compressors import DocumentCompressorPipeline, EmbeddingsFilter\n'), ((7893, 7962), 'langchain.retrievers.document_compressors.DocumentCompressorPipeline', 'DocumentCompressorPipeline', ([], {'transformers': '[splitter, relevance_filter]'}), '(transformers=[splitter, relevance_filter])\n', (7919, 7962), False, 'from langchain.retrievers.document_compressors import DocumentCompressorPipeline, EmbeddingsFilter\n'), ((8005, 8081), 'langchain.retrievers.TavilySearchAPIRetriever', 'TavilySearchAPIRetriever', ([], {'k': '(3)', 'include_raw_content': '(True)', 'include_images': '(True)'}), '(k=3, include_raw_content=True, include_images=True)\n', (8029, 8081), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8136, 8245), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_tavily_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_tavily_retriever)\n', (8166, 8245), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8337, 8446), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_google_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_google_retriever)\n', (8367, 8446), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8588, 8694), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_you_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_you_retriever)\n', (8618, 8694), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((8730, 8822), 'langchain.retrievers.kay.KayAiRetriever.create', 'KayAiRetriever.create', ([], {'dataset_id': '"""company"""', 'data_types': "['10-K', '10-Q']", 'num_contexts': '(6)'}), "(dataset_id='company', data_types=['10-K', '10-Q'],\n num_contexts=6)\n", (8751, 8822), False, 'from langchain.retrievers.kay import KayAiRetriever\n'), ((8870, 8976), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_kay_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_kay_retriever)\n', (8900, 8976), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((9026, 9118), 'langchain.retrievers.kay.KayAiRetriever.create', 'KayAiRetriever.create', ([], {'dataset_id': '"""company"""', 'data_types': "['PressRelease']", 'num_contexts': '(6)'}), "(dataset_id='company', data_types=['PressRelease'],\n num_contexts=6)\n", (9047, 9118), False, 'from langchain.retrievers.kay import KayAiRetriever\n'), ((9180, 9300), 'langchain.retrievers.ContextualCompressionRetriever', 'ContextualCompressionRetriever', ([], {'base_compressor': 'pipeline_compressor', 'base_retriever': 'base_kay_press_release_retriever'}), '(base_compressor=pipeline_compressor,\n base_retriever=base_kay_press_release_retriever)\n', (9210, 9300), False, 'from langchain.retrievers import ContextualCompressionRetriever, TavilySearchAPIRetriever\n'), ((9901, 9948), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['REPHRASE_TEMPLATE'], {}), '(REPHRASE_TEMPLATE)\n', (9929, 9948), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate\n'), ((12769, 12795), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12785, 12795), False, 'import os\n'), ((13302, 13329), 'langchain.schema.runnable.ConfigurableField', 'ConfigurableField', ([], {'id': '"""llm"""'}), "(id='llm')\n", (13319, 13329), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((14044, 14087), 'uvicorn.run', 'uvicorn.run', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8080)'}), "(app, host='0.0.0.0', port=8080)\n", (14055, 14087), False, 'import uvicorn\n'), ((7263, 7292), 'langchain.document_loaders.AsyncHtmlLoader', 'AsyncHtmlLoader', (['urls_to_look'], {}), '(urls_to_look)\n', (7278, 7292), False, 'from langchain.document_loaders import AsyncHtmlLoader\n'), ((7313, 7335), 'langchain.document_transformers.Html2TextTransformer', 'Html2TextTransformer', ([], {}), '()\n', (7333, 7335), False, 'from langchain.document_transformers import Html2TextTransformer\n'), ((13041, 13111), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-16k"""', 'streaming': '(True)', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo-16k', streaming=True, temperature=0.1)\n", (13051, 13111), False, 'from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI\n'), ((13369, 13433), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""chatglm3-6b"""', 'openai_api_base': 'openai_api_base'}), "(model='chatglm3-6b', openai_api_base=openai_api_base)\n", (13379, 13433), False, 'from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI\n'), ((13761, 13788), 'langchain.schema.runnable.ConfigurableField', 'ConfigurableField', ([], {'id': '"""llm"""'}), "(id='llm')\n", (13778, 13788), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((6544, 6582), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""', 'None'], {}), "('GOOGLE_API_KEY', None)\n", (6558, 6582), False, 'import os\n'), ((6709, 6733), 'langchain.utilities.GoogleSearchAPIWrapper', 'GoogleSearchAPIWrapper', ([], {}), '()\n', (6731, 6733), False, 'from langchain.utilities import GoogleSearchAPIWrapper\n'), ((8516, 8561), 'os.environ.get', 'os.environ.get', (['"""YDC_API_KEY"""', '"""not_provided"""'], {}), "('YDC_API_KEY', 'not_provided')\n", (8530, 8561), False, 'import os\n'), ((12156, 12205), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_history"""'}), "(variable_name='chat_history')\n", (12175, 12205), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate\n'), ((13468, 13538), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-16k"""', 'streaming': '(True)', 'temperature': '(0.1)'}), "(model='gpt-3.5-turbo-16k', streaming=True, temperature=0.1)\n", (13478, 13538), False, 'from langchain.chat_models import ChatAnthropic, ChatOpenAI, ChatVertexAI\n'), ((9514, 9547), 'langchain.schema.runnable.ConfigurableField', 'ConfigurableField', ([], {'id': '"""retriever"""'}), "(id='retriever')\n", (9531, 9547), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((10022, 10039), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (10037, 10039), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((10953, 10985), 'langchain.schema.messages.HumanMessage', 'HumanMessage', ([], {'content': 'message[1]'}), '(content=message[1])\n', (10965, 10985), False, 'from langchain.schema.messages import AIMessage, HumanMessage\n'), ((11536, 11563), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['format_docs'], {}), '(format_docs)\n', (11550, 11563), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((12304, 12321), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (12319, 12321), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((11062, 11091), 'langchain.schema.messages.AIMessage', 'AIMessage', ([], {'content': 'message[1]'}), '(content=message[1])\n', (11071, 11091), False, 'from langchain.schema.messages import AIMessage, HumanMessage\n'), ((11779, 11801), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (11789, 11801), False, 'from operator import itemgetter\n'), ((11921, 11947), 'operator.itemgetter', 'itemgetter', (['"""chat_history"""'], {}), "('chat_history')\n", (11931, 11947), False, 'from operator import itemgetter\n'), ((12568, 12601), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['serialize_history'], {}), '(serialize_history)\n', (12582, 12601), False, 'from langchain.schema.runnable import ConfigurableField, Runnable, RunnableBranch, RunnableLambda, RunnableMap\n'), ((12441, 12463), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (12451, 12463), False, 'from operator import itemgetter\n'), ((10471, 10493), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (10481, 10493), False, 'from operator import itemgetter\n')]
from langchain.chat_models import ChatOpenAI from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner from langchain.llms import OpenAI from langchain import SerpAPIWrapper from langchain.agents.tools import Tool from langchain import LLMMathChain search = SerpAPIWrapper() llm = OpenAI(temperature=0) llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True) tools = [ Tool( name = "Search", func=search.run, description="useful for when you need to answer questions about current events" ), Tool( name="Calculator", func=llm_math_chain.run, description="useful for when you need to answer questions about math" ), ] model = ChatOpenAI(temperature=0) planner = load_chat_planner(model) executor = load_agent_executor(model, tools, verbose=True) agent = PlanAndExecute(planner=planner, executor=executor, verbose=True) agent.run("中国前2的特种磨具陶瓷企业,生产1000份磨具陶瓷需要多少钱?")
[ "langchain.llms.OpenAI", "langchain.chat_models.ChatOpenAI", "langchain.LLMMathChain.from_llm", "langchain.SerpAPIWrapper", "langchain_experimental.plan_and_execute.load_chat_planner", "langchain_experimental.plan_and_execute.PlanAndExecute", "langchain.agents.tools.Tool", "langchain_experimental.plan_and_execute.load_agent_executor" ]
[((308, 324), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (322, 324), False, 'from langchain import SerpAPIWrapper\n'), ((331, 352), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (337, 352), False, 'from langchain.llms import OpenAI\n'), ((370, 414), 'langchain.LLMMathChain.from_llm', 'LLMMathChain.from_llm', ([], {'llm': 'llm', 'verbose': '(True)'}), '(llm=llm, verbose=True)\n', (391, 414), False, 'from langchain import LLMMathChain\n'), ((745, 770), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (755, 770), False, 'from langchain.chat_models import ChatOpenAI\n'), ((781, 805), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['model'], {}), '(model)\n', (798, 805), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((817, 864), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['model', 'tools'], {'verbose': '(True)'}), '(model, tools, verbose=True)\n', (836, 864), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((873, 937), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)'}), '(planner=planner, executor=executor, verbose=True)\n', (887, 937), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((429, 551), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to answer questions about current events"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to answer questions about current events')\n", (433, 551), False, 'from langchain.agents.tools import Tool\n'), ((584, 708), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': '"""Calculator"""', 'func': 'llm_math_chain.run', 'description': '"""useful for when you need to answer questions about math"""'}), "(name='Calculator', func=llm_math_chain.run, description=\n 'useful for when you need to answer questions about math')\n", (588, 708), False, 'from langchain.agents.tools import Tool\n')]
"""Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Any, Dict, List, Mapping, Optional from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks from langchain.chains import ReduceDocumentsChain from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.pydantic_v1 import Extra from langchain.schema import BasePromptTemplate from langchain.schema.language_model import BaseLanguageModel from langchain.text_splitter import TextSplitter class MapReduceChain(Chain): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: @classmethod def from_params( cls, llm: BaseLanguageModel, prompt: BasePromptTemplate, text_splitter: TextSplitter, callbacks: Callbacks = None, combine_chain_kwargs: Optional[Mapping[str, Any]] = None, reduce_chain_kwargs: Optional[Mapping[str, Any]] = None, **kwargs: Any, ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt, callbacks=callbacks) stuff_chain = StuffDocumentsChain( llm_chain=llm_chain, callbacks=callbacks, **(reduce_chain_kwargs if reduce_chain_kwargs else {}), ) reduce_documents_chain = ReduceDocumentsChain( combine_documents_chain=stuff_chain ) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, reduce_documents_chain=reduce_documents_chain, callbacks=callbacks, **(combine_chain_kwargs if combine_chain_kwargs else {}), ) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter, callbacks=callbacks, **kwargs, ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call( self, inputs: Dict[str, str], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, str]: _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() # Split the larger text into smaller chunks. doc_text = inputs.pop(self.input_key) texts = self.text_splitter.split_text(doc_text) docs = [Document(page_content=text) for text in texts] _inputs: Dict[str, Any] = { **inputs, self.combine_documents_chain.input_key: docs, } outputs = self.combine_documents_chain.run( _inputs, callbacks=_run_manager.get_child() ) return {self.output_key: outputs}
[ "langchain.chains.ReduceDocumentsChain", "langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain", "langchain.chains.combine_documents.stuff.StuffDocumentsChain", "langchain.docstore.document.Document", "langchain.chains.llm.LLMChain", "langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager" ]
[((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocumentsChain', 'StuffDocumentsChain', ([], {'llm_chain': 'llm_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, callbacks=callbacks, **\n reduce_chain_kwargs if reduce_chain_kwargs else {})\n', (1829, 1930), False, 'from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n'), ((2008, 2065), 'langchain.chains.ReduceDocumentsChain', 'ReduceDocumentsChain', ([], {'combine_documents_chain': 'stuff_chain'}), '(combine_documents_chain=stuff_chain)\n', (2028, 2065), False, 'from langchain.chains import ReduceDocumentsChain\n'), ((2122, 2299), 'langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain', 'MapReduceDocumentsChain', ([], {'llm_chain': 'llm_chain', 'reduce_documents_chain': 'reduce_documents_chain', 'callbacks': 'callbacks'}), '(llm_chain=llm_chain, reduce_documents_chain=\n reduce_documents_chain, callbacks=callbacks, **combine_chain_kwargs if\n combine_chain_kwargs else {})\n', (2145, 2299), False, 'from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain\n'), ((3177, 3222), 'langchain.callbacks.manager.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get_noop_manager', ([], {}), '()\n', (3220, 3222), False, 'from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks\n'), ((3394, 3421), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text'}), '(page_content=text)\n', (3402, 3421), False, 'from langchain.docstore.document import Document\n')]
from typing import Any, Dict, List, Optional, Sequence from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.pydantic_v1 import Extra, root_validator from langchain.utils import get_from_dict_or_env class AlephAlpha(LLM): """Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha aleph_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" # Client params aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" host: str = "https://api.aleph-alpha.com" """The hostname of the API host. The default one is "https://api.aleph-alpha.com")""" hosting: Optional[str] = None """Determines in which datacenters the request may be processed. You can either set the parameter to "aleph-alpha" or omit it (defaulting to None). Not setting this value, or setting it to None, gives us maximal flexibility in processing your request in our own datacenters and on servers hosted with other providers. Choose this option for maximal availability. Setting it to "aleph-alpha" allows us to only process the request in our own datacenters. Choose this option for maximal data privacy.""" request_timeout_seconds: int = 305 """Client timeout that will be set for HTTP requests in the `requests` library's API calls. Server will close all requests after 300 seconds with an internal server error.""" total_retries: int = 8 """The number of retries made in case requests fail with certain retryable status codes. If the last retry fails a corresponding exception is raised. Note, that between retries an exponential backoff is applied, starting with 0.5 s after the first retry and doubling for each retry made. So with the default setting of 8 retries a total wait time of 63.5 s is added between the retries.""" nice: bool = False """Setting this to True, will signal to the API that you intend to be nice to other users by de-prioritizing your request below concurrent ones.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: from aleph_alpha_client import Client values["client"] = Client( token=aleph_alpha_api_key, host=values["host"], hosting=values["hosting"], request_timeout_seconds=values["request_timeout_seconds"], total_retries=values["total_retries"], nice=values["nice"], ) except ImportError: raise ImportError( "Could not import aleph_alpha_client python package. " "Please install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "aleph_alpha" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = aleph_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text if __name__ == "__main__": aa = AlephAlpha() print(aa("How are you?"))
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env", "langchain.pydantic_v1.root_validator" ]
[((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY"""'], {}), "(values, 'aleph_alpha_api_key', 'ALEPH_ALPHA_API_KEY')\n", (6431, 6485), False, 'from langchain.utils import get_from_dict_or_env\n'), ((6603, 6812), 'aleph_alpha_client.Client', 'Client', ([], {'token': 'aleph_alpha_api_key', 'host': "values['host']", 'hosting': "values['hosting']", 'request_timeout_seconds': "values['request_timeout_seconds']", 'total_retries': "values['total_retries']", 'nice': "values['nice']"}), "(token=aleph_alpha_api_key, host=values['host'], hosting=values[\n 'hosting'], request_timeout_seconds=values['request_timeout_seconds'],\n total_retries=values['total_retries'], nice=values['nice'])\n", (6609, 6812), False, 'from aleph_alpha_client import Client\n'), ((11210, 11261), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (11229, 11261), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((10810, 10834), 'aleph_alpha_client.Prompt.from_text', 'Prompt.from_text', (['prompt'], {}), '(prompt)\n', (10826, 10834), False, 'from aleph_alpha_client import CompletionRequest, Prompt\n')]
import pandas as pd import streamlit as st from operator import itemgetter from langchain.chains.openai_tools import create_extraction_chain_pydantic from langchain_core.pydantic_v1 import BaseModel, Field from langchain_openai import ChatOpenAI llm = ChatOpenAI(model="gpt-3.5-turbo-1106", temperature=0) from typing import List @st.cache_data def get_table_details(): # Read the CSV file into a DataFrame table_description = pd.read_csv("database_table_descriptions.csv") table_docs = [] # Iterate over the DataFrame rows to create Document objects table_details = "" for index, row in table_description.iterrows(): table_details = table_details + "Table Name:" + row['Table'] + "\n" + "Table Description:" + row['Description'] + "\n\n" return table_details class Table(BaseModel): """Table in SQL database.""" name: str = Field(description="Name of table in SQL database.") def get_tables(tables: List[Table]) -> List[str]: tables = [table.name for table in tables] return tables # table_names = "\n".join(db.get_usable_table_names()) table_details = get_table_details() table_details_prompt = f"""Return the names of ALL the SQL tables that MIGHT be relevant to the user question. \ The tables are: {table_details} Remember to include ALL POTENTIALLY RELEVANT tables, even if you're not sure that they're needed.""" table_chain = {"input": itemgetter("question")} | create_extraction_chain_pydantic(Table, llm, system_message=table_details_prompt) | get_tables
[ "langchain.chains.openai_tools.create_extraction_chain_pydantic", "langchain_openai.ChatOpenAI", "langchain_core.pydantic_v1.Field" ]
[((253, 306), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-1106"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo-1106', temperature=0)\n", (263, 306), False, 'from langchain_openai import ChatOpenAI\n'), ((437, 483), 'pandas.read_csv', 'pd.read_csv', (['"""database_table_descriptions.csv"""'], {}), "('database_table_descriptions.csv')\n", (448, 483), True, 'import pandas as pd\n'), ((876, 927), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""Name of table in SQL database."""'}), "(description='Name of table in SQL database.')\n", (881, 927), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1436, 1522), 'langchain.chains.openai_tools.create_extraction_chain_pydantic', 'create_extraction_chain_pydantic', (['Table', 'llm'], {'system_message': 'table_details_prompt'}), '(Table, llm, system_message=\n table_details_prompt)\n', (1468, 1522), False, 'from langchain.chains.openai_tools import create_extraction_chain_pydantic\n'), ((1410, 1432), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (1420, 1432), False, 'from operator import itemgetter\n')]
import os import re import argparse import json import boto3 from bs4 import BeautifulSoup from langchain.document_loaders import PDFMinerPDFasHTMLLoader from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter,CharacterTextSplitter import statistics smr_client = boto3.client("sagemaker-runtime") parameters = { "max_length": 2048, "temperature": 0.01, "num_beams": 1, # >1可能会报错,"probability tensor contains either `inf`, `nan` or element < 0"; 即使remove_invalid_values=True也不能解决 "do_sample": False, "top_p": 0.7, "logits_processor" : None, # "remove_invalid_values" : True } ''' 1. pip install pdfminer.six ''' class Elembbox(object): left = -1 top = -1 width = -1 height = -1 right = -1 bottom = -1 margin = 8 # for header text above table RAW_MAX_DIST = 120 COL_MAX_DIST = 400 # top 增加是往下 bottom > top # right 增加是往右 right > left def __init__(self, left, top, width, height): self.left = left self.top = top self.width = width self.height = height self.right = left + width self.bottom = top + height # def __str__(self): # return "left:{}, top:{}, right:{}, bottom:{}, width:{}, height:{}".format(self.left, self.top, self.right, self.bottom, self.width, self.height) def __str__(self): return """<span style="position:absolute; border: red 1px solid; left:{}px; top:{}px; width:{}px; height:{}px;"></span>""".format(self.left, self.top, self.width, self.height) def is_overlap(self, other): if other is None: return False def is_pt_in_bbox(x, y, bbox): return x >= bbox.left \ and x <= bbox.right \ and y >= bbox.top - bbox.margin \ and y <= bbox.bottom + bbox.margin lefttop_in = is_pt_in_bbox(other.left, other.top, self) leftbottom_in = is_pt_in_bbox(other.left, other.bottom, self) righttop_in = is_pt_in_bbox(other.right, other.top, self) rightbottom_in = is_pt_in_bbox(other.right, other.bottom, self) lefttop_in_2 = is_pt_in_bbox(self.left, self.top, other) leftbottom_in_2 = is_pt_in_bbox(self.left, self.bottom, other) righttop_in_2 = is_pt_in_bbox(self.right, self.top, other) rightbottom_in_2 = is_pt_in_bbox(self.right, self.bottom, other) return lefttop_in or leftbottom_in or righttop_in or rightbottom_in or lefttop_in_2 or leftbottom_in_2 or righttop_in_2 or rightbottom_in_2 def is_beside(self, other): # only horizontal direction return self.is_overlap(other) def link_horizontal_lines(self, other): assert(self.height == 0) # merge the horizontal line if self.top == other.top: if self.left < other.left and other.left-1 <= self.right and other.right > self.right: # self.left < other.left <=self.right < other.right return True, Elembbox(self.left, self.top, other.right - self.left, self.height) elif other.left < self.left and self.left-1 <= other.right and other.right < self.right: # other.left < self.left <=other.right < self.right return True, Elembbox(other.left, self.top, self.right - other.left, self.height) return False, None def merge_horizontal_lines(self, other): if self.left == other.left and self.right == other.right: if other.top > self.bottom + self.RAW_MAX_DIST: return False, None if self.bottom < other.top: return True, Elembbox(self.left, self.top, self.width, other.top - self.top) return False, None def link_vertical_lines(self, other): assert(self.width == 0) # merge the vertical line if self.left == self.left: if self.top < other.top and other.top-1 <= self.bottom and self.bottom < other.bottom: # self.top < other.top <=self.bottom < other.bottom return True, Elembbox(self.left, self.top, self.width, other.bottom - self.top) elif other.top < self.top and self.top-1 <= other.bottom and other.bottom < self.bottom: return True, Elembbox(self.left, other.top, self.width, self.bottom - other.top) return False, None def merge_vertical_lines(self, other): if self.top == other.top and self.bottom == other.bottom: if other.right > self.right + self.COL_MAX_DIST: return False, None if self.right < other.right: return True, Elembbox(self.left, self.top, other.right - self.left, self.height) return False, None def create_bbox_horizontal(origin_span): vertical_sorted_span = sorted(origin_span, key=lambda span_pos: (span_pos.top, span_pos.left)) span_count = len(vertical_sorted_span) cur_span = vertical_sorted_span[0] merge_stage1_spans = [] for idx in range(1, span_count): success, new_span = cur_span.link_horizontal_lines(vertical_sorted_span[idx]) if not success: merge_stage1_spans.append(cur_span) cur_span = vertical_sorted_span[idx] else: cur_span = new_span merge_stage1_spans.append(cur_span) vertical_sorted_merge_spans = sorted(merge_stage1_spans, key=lambda span_pos: span_pos.left) # for item in vertical_sorted_merge_spans: # print(item) # print('---------------') merge_stage2_spans = [] cur_span = vertical_sorted_merge_spans[0] span_count = len(vertical_sorted_merge_spans) for idx in range(1, span_count): success, new_span = cur_span.merge_horizontal_lines(vertical_sorted_merge_spans[idx]) if not success: merge_stage2_spans.append(cur_span) cur_span = vertical_sorted_merge_spans[idx] else: cur_span = new_span return [ item for item in merge_stage2_spans if item.height >0 ] def create_bbox_vertical(origin_span): horizontal_sorted_span = sorted(origin_span, key=lambda span_pos: (span_pos.left, span_pos.top)) span_count = len(horizontal_sorted_span) cur_span = horizontal_sorted_span[0] merge_stage1_spans = [] for idx in range(1, span_count): success, new_span = cur_span.link_vertical_lines(horizontal_sorted_span[idx]) if not success: merge_stage1_spans.append(cur_span) cur_span = horizontal_sorted_span[idx] else: cur_span = new_span merge_stage1_spans.append(cur_span) horizontal_sorted_merge_spans = sorted(merge_stage1_spans, key=lambda span_pos: span_pos.top) # for item in horizontal_sorted_merge_spans: # print(item) # print('---------------') merge_stage2_spans = [] cur_span = horizontal_sorted_merge_spans[0] span_count = len(horizontal_sorted_merge_spans) for idx in range(1, span_count): success, new_span = cur_span.merge_vertical_lines(horizontal_sorted_merge_spans[idx]) if not success: merge_stage2_spans.append(cur_span) cur_span = horizontal_sorted_merge_spans[idx] else: cur_span = new_span return [ item for item in merge_stage2_spans if item.width >0 ] def merge_bbox(bbox_a, bbox_b): top = min(bbox_a.top, bbox_b.top) left = min(bbox_a.left, bbox_b.left) right = max(bbox_a.right, bbox_b.right) bottom = max(bbox_a.bottom, bbox_b.bottom) width = right - left height = bottom - top return Elembbox(left, top, width, height) def merge_bbox_list(bbox_list_a, bbox_list_b): if bbox_list_a is None: return bbox_list_b if bbox_list_b is None: return bbox_list_a merge_bbox_ret = [] overlap_flag = [False] * len(bbox_list_b) for bbox_a in bbox_list_a: merge_box = bbox_a for idx, bbox_b in enumerate(bbox_list_b): if merge_box.is_overlap(bbox_b): overlap_flag[idx] = True merge_box = merge_bbox(merge_box, bbox_b) merge_bbox_ret.append(merge_box) for idx in range(len(bbox_list_b)): if overlap_flag[idx] == False: merge_bbox_ret.append(bbox_list_b[idx]) return merge_bbox_ret def find_all_table_bbox(pdf_path): loader = PDFMinerPDFasHTMLLoader(pdf_path) data = loader.load()[0] soup = BeautifulSoup(data.page_content,'html.parser') table_border = soup.find_all('span') h_span = [] v_span = [] font_size_list = [] for idx, c in enumerate(table_border): # print("----{}---".format(idx)) style_attribute = c.get('style') # 'position:absolute; border: gray 1px solid; left:0px; top:50px; width:612px; height:792px;' attr_list = [ p.split(':') for p in style_attribute.strip(";").split('; ')] span_pos = { k : int(v[:-2]) for k,v in attr_list if k in ['left', 'top', 'width', 'height', 'font-size']} keys = span_pos.keys() if 'font-size' in keys: font_size_list.append(span_pos['font-size']) if 'left' not in keys or 'top' not in keys or 'width' not in keys or 'height' not in keys: continue if span_pos['height'] == 0 and span_pos['width'] > 10: h_span.append(Elembbox(span_pos['left'],span_pos['top'],span_pos['width'],span_pos['height'])) if span_pos['width'] == 0 and span_pos['height'] > 10: v_span.append(Elembbox(span_pos['left'],span_pos['top'],span_pos['width'],span_pos['height'])) h_bbox_list = None if len(h_span) > 0: h_bbox_list = create_bbox_horizontal(h_span) # print("----h_span bbox----") # for item in h_bbox_list: # print(item) v_bbox_list = None if len(v_span) > 0: v_bbox_list = create_bbox_vertical(v_span) # print("----v_span bbox----") # for item in v_bbox_list: # print(item) merge_bboxs =merge_bbox_list(h_bbox_list, v_bbox_list) # print("----merged bbox----") # for item in merge_bboxs: # print(item) # update padding for Elembbox mode_font_size = statistics.mode(font_size_list) for i in range(len(merge_bboxs)): merge_bboxs[i].margin = mode_font_size return merge_bboxs def fontsize_mapping(heading_fonts_arr): heading_fonts_set = list(set(heading_fonts_arr)) heading_fonts_set.sort(reverse=True) idxs = range(len(heading_fonts_set)) font_idx_mapping = dict(zip(heading_fonts_set,idxs)) return font_idx_mapping import pdb def split_pdf_to_snippet(pdf_path): loader = PDFMinerPDFasHTMLLoader(pdf_path) data = loader.load()[0] soup = BeautifulSoup(data.page_content,'html.parser') content = soup.find_all('div') cur_fs = None cur_text = None snippets = [] # first collect all snippets that have the same font size table_elem_bboxs = find_all_table_bbox(pdf_path) print("table bbox count: {}".format(len(table_elem_bboxs))) skip_count = 0 def overlap_with_table(table_elem_bboxs, div_elem_bbox): for elem_bbox in table_elem_bboxs: if div_elem_bbox.is_overlap(elem_bbox): return True return False previous_div_bbox = None snippet_start = True snippet_follow = False snippet_state = snippet_start for c in content: div_style_attribute = c.get('style') attr_list = [ p.split(':') for p in div_style_attribute.strip(";").split('; ')] div_pos = { k : int(v[:-2]) for k,v in attr_list if k in ['left', 'top', 'width', 'height']} keys = div_pos.keys() if 'left' not in keys or 'top' not in keys or 'width' not in keys or 'height' not in keys: continue div_elem_bbox = Elembbox(div_pos['left'],div_pos['top'],div_pos['width'],div_pos['height']) if overlap_with_table(table_elem_bboxs, div_elem_bbox): skip_count += 1 continue # if these two div is not beside each other if not div_elem_bbox.is_beside(previous_div_bbox) and cur_text and cur_fs: snippets.append((cur_text,cur_fs,snippet_state)) cur_fs = None cur_text = None snippet_state = snippet_start previous_div_bbox = div_elem_bbox sp_list = c.find_all('span') if not sp_list: continue for sp in sp_list: st = sp.get('style') if not st: continue fs = re.findall('font-size:(\d+)px',st) if not fs: continue fs = int(fs[0]) if not cur_fs and not cur_text: cur_fs = fs cur_text = sp.text snippet_state = snippet_start continue if fs == cur_fs: cur_text += sp.text else: snippets.append((cur_text, cur_fs, snippet_state)) snippet_state = snippet_start if fs > cur_fs else snippet_follow cur_fs = fs cur_text = sp.text snippets.append((cur_text,cur_fs, snippet_follow)) # merge snippet merged_snippets = [] temp_list = [] doc_title = '' max_font_size = max([ item[1] for item in snippets]) for snippet in snippets: if max_font_size == snippet[1]: doc_title = snippet[0] if len(temp_list) == 0 or snippet[2] == False: temp_list.append(snippet) else: content_list = [ item[0] for item in temp_list] font_size_list = [ item[1] for item in temp_list] content = "\n".join(content_list) font_size = max(font_size_list) temp_list.clear() temp_list.append(snippet) merged_snippets.append({"content":content, "font_size":font_size}) print("filter {} table text".format(skip_count)) return merged_snippets, doc_title def split_pdf(pdf_path): semantic_snippets, doc_title = split_pdf_to_snippet(pdf_path) text_splitter = RecursiveCharacterTextSplitter( chunk_size = 1024, chunk_overlap = 0, separators=["\n\n", "\n", ".", "。", ",",","," "], ) for item in semantic_snippets: content = item["content"] chunks = text_splitter.create_documents([ content ] ) for chunk in chunks: snippet_info = { "content" : chunk.page_content, "font_size" : item["font_size"], "doc_title" : doc_title } yield snippet_info def summarize(content, chunk_size = 512, llm_endpoint=""): summary = content if llm_endpoint and len(content) > chunk_size: # todo: call LLM to summarize prompt_template = """对下面反引号这段文档进行摘要,字数不超过{} ``` {} ``` 摘要: """ prompt = prompt_template.format(chunk_size, content[:1536]) response_model = smr_client.invoke_endpoint( EndpointName=llm_endpoint, Body=json.dumps( { "inputs": prompt, "parameters": parameters, "history" : [] } ), ContentType="application/json", ) json_ret = json.loads(response_model['Body'].read().decode('utf8')) summary = json_ret['outputs'] return summary def convert_snippetJson2markdown(snippet_info, max_level=3): mk_head = "" p_head = "" for item in snippet_info["heading"][0:max_level][::-1]: mk_head += "#" head = "{} {}".format(mk_head, item["heading"].replace('\n','')) p_head += "{}\n".format(head) p_content = "{}\n{}".format(p_head, snippet_info['content']) return p_content if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input_file', type=str, default='./1.pdf', help='input file') parser.add_argument('--output_dir', type=str, default='./', help='output file') parser.add_argument('--sep', type=str, default='=====', help='separtor') parser.add_argument('--title_level', type=int, default=4, help='keep the tiltes of level') parser.add_argument('--chunk_size', type=int, default=128, help='chunk_size') parser.add_argument('--llm_endpoint', type=str, default="", help='llm_endpoint') args = parser.parse_args() pdf_path = args.input_file kg_dir = args.output_dir kg_name = os.path.basename(pdf_path).replace('.pdf','.json') separtor = args.sep max_title_level = args.title_level chunk_size = args.chunk_size llm_endpoint = args.llm_endpoint idx = 1 f_name = "{}/{}".format(kg_dir, kg_name) out_f = open(f_name, 'w') snippet_arr = [] for snippet_info in split_pdf(pdf_path): snippet_arr.append(snippet_info) all_info = json.dumps(snippet_arr, ensure_ascii=False) out_f.write(all_info) out_f.close() print("finish separation of {}".format(pdf_path))
[ "langchain.document_loaders.PDFMinerPDFasHTMLLoader", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((324, 357), 'boto3.client', 'boto3.client', (['"""sagemaker-runtime"""'], {}), "('sagemaker-runtime')\n", (336, 357), False, 'import boto3\n'), ((8397, 8430), 'langchain.document_loaders.PDFMinerPDFasHTMLLoader', 'PDFMinerPDFasHTMLLoader', (['pdf_path'], {}), '(pdf_path)\n', (8420, 8430), False, 'from langchain.document_loaders import PDFMinerPDFasHTMLLoader\n'), ((8470, 8517), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data.page_content', '"""html.parser"""'], {}), "(data.page_content, 'html.parser')\n", (8483, 8517), False, 'from bs4 import BeautifulSoup\n'), ((10234, 10265), 'statistics.mode', 'statistics.mode', (['font_size_list'], {}), '(font_size_list)\n', (10249, 10265), False, 'import statistics\n'), ((10698, 10731), 'langchain.document_loaders.PDFMinerPDFasHTMLLoader', 'PDFMinerPDFasHTMLLoader', (['pdf_path'], {}), '(pdf_path)\n', (10721, 10731), False, 'from langchain.document_loaders import PDFMinerPDFasHTMLLoader\n'), ((10772, 10819), 'bs4.BeautifulSoup', 'BeautifulSoup', (['data.page_content', '"""html.parser"""'], {}), "(data.page_content, 'html.parser')\n", (10785, 10819), False, 'from bs4 import BeautifulSoup\n'), ((14183, 14304), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(0)', 'separators': "['\\n\\n', '\\n', '.', '。', ',', ',', ' ']"}), "(chunk_size=1024, chunk_overlap=0, separators\n =['\\n\\n', '\\n', '.', '。', ',', ',', ' '])\n", (14213, 14304), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter\n'), ((15947, 15972), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15970, 15972), False, 'import argparse\n'), ((16983, 17026), 'json.dumps', 'json.dumps', (['snippet_arr'], {'ensure_ascii': '(False)'}), '(snippet_arr, ensure_ascii=False)\n', (16993, 17026), False, 'import json\n'), ((12605, 12641), 're.findall', 're.findall', (['"""font-size:(\\\\d+)px"""', 'st'], {}), "('font-size:(\\\\d+)px', st)\n", (12615, 12641), False, 'import re\n'), ((16589, 16615), 'os.path.basename', 'os.path.basename', (['pdf_path'], {}), '(pdf_path)\n', (16605, 16615), False, 'import os\n'), ((15170, 15241), 'json.dumps', 'json.dumps', (["{'inputs': prompt, 'parameters': parameters, 'history': []}"], {}), "({'inputs': prompt, 'parameters': parameters, 'history': []})\n", (15180, 15241), False, 'import json\n')]
from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.llms import OpenAI from langchain.chains import VectorDBQA from langchain.document_loaders import TextLoader from typing import List from langchain.schema import Document import os os.environ['OPENAI_API_KEY'] = "your-api-key" class Genie: def __init__(self, file_path: str): self.file_path = file_path self.loader = TextLoader(self.file_path) self.documents = self.loader.load() self.texts = self.text_split(self.documents) self.vectordb = self.embeddings(self.texts) self.genie = VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=self.vectordb) @staticmethod def text_split(documents: TextLoader): text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) return texts @staticmethod def embeddings(texts: List[Document]): embeddings = OpenAIEmbeddings() vectordb = Chroma.from_documents(texts, embeddings) return vectordb def ask(self, query: str): return self.genie.run(query) if __name__ == "__main__": genie = Genie("example.txt") print(genie.ask("How is the wheater like?"))
[ "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.document_loaders.TextLoader", "langchain.llms.OpenAI", "langchain.vectorstores.Chroma.from_documents", "langchain.embeddings.OpenAIEmbeddings" ]
[((515, 541), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.file_path'], {}), '(self.file_path)\n', (525, 541), False, 'from langchain.document_loaders import TextLoader\n'), ((886, 950), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (916, 950), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1112, 1130), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1128, 1130), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1150, 1190), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1171, 1190), False, 'from langchain.vectorstores import Chroma\n'), ((743, 751), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (749, 751), False, 'from langchain.llms import OpenAI\n')]
import dataclasses import json import numpy as np import os import requests import sys from typing import List from langchain.text_splitter import CharacterTextSplitter from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import Chroma from langchain.chat_models import ChatOpenAI from langchain import VectorDBQA from langchain.chains import RetrievalQA, RetrievalQAWithSourcesChain from langchain.schema import Document from langchain.prompts import PromptTemplate from newspaper import Article from newspaper.article import ArticleException import openai from tenacity import ( retry, stop_after_attempt, wait_random_exponential, ) from bs4 import BeautifulSoup # Required to parse HTML from urllib.parse import unquote # Required to unquote URLs import argparse prompt_template = """Use the following pieces of context to answer the question completely and precisely in up to 500 words. If you don't know the answer, just say "I don't know" and explain why the context is insufficient to answer the question. You need to support every statement in the answer with in-line citations to passages given in the the context. The citations should appear as numbers such as [1], [2] that refer to the Passage IDs of the given passages. A statement may need to be supported by multiple references and should then be cited as [1] [2]. (for example, "Paris is the capital of France [1] [2]." where "1" and "2" are the Passage IDs of the first and second passage). {context} Question: {question} Answer:""" PROMPT = PromptTemplate( template=prompt_template, input_variables=["context", "question"] ) sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))) from data_utils import example_utils parser = argparse.ArgumentParser() parser.add_argument("--input_file", help="input filepath", type=str) parser.add_argument("--output_file", help="output filepath", type=str) parser.add_argument("--topk", help="the value of k for the topk passages to use for QA", type=int, default=5) args = parser.parse_args() text_splitter = CharacterTextSplitter(separator=' ', chunk_size=1000, chunk_overlap=200) ##### # OPENAI ##### OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') if OPENAI_API_KEY is None: raise ValueError("Please set the OpenAI API key as environment variable 'OPENAI_API_KEY'.") openai.organization = "" openai.api_key = "" llm = ChatOpenAI(model_name='gpt-4', openai_api_key=OPENAI_API_KEY) ##### # Google Search ##### CSE_URL = "https://www.googleapis.com/customsearch/v1" CSE_URL = "https://cse.google.com/cse?cx=74509b47ac2e54393" gs_api_key = os.getenv('CUSTOM_SEARCH_API_KEY') pse_cx = os.getenv('CUSTOM_SEARCH_CX') if gs_api_key is None: raise ValueError("Please set the Custom search API key as environment variable 'CUSTOM_SEARCH_API_KEY'.") if pse_cx is None: raise ValueError("Please set the Programmable search engine ID as environment variable 'CUSTOM_SEARCH_CX'.") @retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6)) def chat_completion_with_backoff(**kwargs): return openai.ChatCompletion.create(**kwargs) def google_search( query: str, api_key: str = None, cx: str = None ): """Get top 10 webpages from Google search. Args: query: search query api_key: custom search engine api key cx: programmable search engine id Returns: top-10 search results in json format """ response = requests.get(f"https://www.google.com/search?q={query}") # Make the request soup = BeautifulSoup(response.text, "html.parser") # Parse the HTML links = soup.find_all("a") # Find all the links in the HTML urls = [] for l in [link for link in links if link["href"].startswith("/url?q=")]: # get the url url = l["href"] # remove the "/url?q=" part url = url.replace("/url?q=", "") # remove the part after the "&sa=..." url = unquote(url.split("&sa=")[0]) # special case for google scholar if url.startswith("https://scholar.google.com/scholar_url?url=http"): url = url.replace("https://scholar.google.com/scholar_url?url=", "").split("&")[0] elif 'google.com/' in url: # skip google links continue if url.endswith('.pdf'): # skip pdf links continue if '#' in url: # remove anchors (e.g. wikipedia.com/bob#history and wikipedia.com/bob#genetics are the same page) url = url.split('#')[0] # print the url urls.append(url) # Use numpy to dedupe the list of urls after removing anchors urls = list(np.unique(urls)) return urls # if api_key is None: # api_key = gs_api_key # if cx is None: # cx = pse_cx # res = requests.get( # url=CSE_URL, # params={ # "q": query, # "key": api_key, # "cx": cx, # }, # ) # if res.status_code != 200: # print(f"Google search error: {res.status_code}") # return [] # res = res.json() # if 'items' in res: # return res['items'] # else: # return [] def scrape_and_parse(url: str): """Scrape a webpage and parse it into a Document object""" a = Article(url) try: a.download() a.parse() except Exception as e: return None return { "url": url, "text": a.text, } def scrape_and_filter(urls: list): doc_list = [] for u in urls: print(f"Processing: {u}") doc = scrape_and_parse(u) if doc is None: continue elif "Access" in doc["text"] and "Denied" in doc["text"]: continue else: doc_list.append(doc) return doc_list def retrieval_gpt_generate(query: str, retrieved_documents: List[Document], topk: int): texts = text_splitter.split_documents(retrieved_documents) embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY, max_retries=1000) while True: try: docsearch = Chroma.from_documents(texts, embeddings) except Exception as e: continue break doc_retriever = docsearch.as_retriever(search_kwargs={"k": topk}) topk_relevant_passages = doc_retriever.get_relevant_documents(query) topk_relevant_passages_content = ["Passage ID " + str(i+1) + ": " + doc.page_content.replace("\n","") for i, doc in enumerate(topk_relevant_passages)] ret_passages = [{"text": p, "url": d.metadata["source"]} for p, d in zip(topk_relevant_passages_content, topk_relevant_passages)] cur_prompt = prompt_template.format(context="Context: \n" + "\n\n".join(topk_relevant_passages_content), question=query) resp = chat_completion_with_backoff( model="gpt-4", messages=[{"role": "user", "content": cur_prompt}], max_tokens=2048, ) answer = resp["choices"][0]["message"]["content"] # chain_type_kwargs = {"prompt": PROMPT} # qa = VectorDBQA.from_chain_type(llm=llm, chain_type="stuff", vectorstore=docsearch, return_source_documents=True) # qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={"k": topk}), return_source_documents=True, chain_type_kwargs=chain_type_kwargs) # result = qa({"query": query}) return answer, ret_passages if __name__ == '__main__': input_data = example_utils.read_examples(args.input_file) f = open(args.output_file, "a") for example in input_data: query_text = example.question # Google search first search_results = google_search(query_text) # search_urls = [r['link'] for r in search_results] if len(search_results) > 0: all_docs = scrape_and_filter(search_results) else: all_docs = [] example.answers["ret_read_gpt4"] = example_utils.Answer(answer_string="I don't know.", attribution=[]) json.dump(dataclasses.asdict(example), f) f.write("\n") continue all_docs = [Document(page_content=d['text'], metadata={'source': d['url']}) for d in all_docs] all_docs_content_lens = [len(doc.page_content.strip()) for doc in all_docs] if not all_docs or not sum(all_docs_content_lens): example.answers["ret_read_gpt4"] = example_utils.Answer(answer_string="I don't know.", attribution=[]) json.dump(dataclasses.asdict(example), f) f.write("\n") continue gpt_query_text = "I am an expert in the field of " + example.metadata.field + ". Please answer my question: " + query_text answer, attributions = retrieval_gpt_generate(gpt_query_text, all_docs, args.topk) example.answers["rr_gs_gpt4"] = example_utils.Answer(answer_string=answer, attribution=attributions) json.dump(dataclasses.asdict(example), f) f.write("\n")
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.chat_models.ChatOpenAI", "langchain.schema.Document", "langchain.vectorstores.Chroma.from_documents", "langchain.prompts.PromptTemplate", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((1563, 1648), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (1577, 1648), False, 'from langchain.prompts import PromptTemplate\n'), ((1786, 1811), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1809, 1811), False, 'import argparse\n'), ((2108, 2180), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '""" """', 'chunk_size': '(1000)', 'chunk_overlap': '(200)'}), "(separator=' ', chunk_size=1000, chunk_overlap=200)\n", (2129, 2180), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((2220, 2247), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2229, 2247), False, 'import os\n'), ((2424, 2485), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'openai_api_key': 'OPENAI_API_KEY'}), "(model_name='gpt-4', openai_api_key=OPENAI_API_KEY)\n", (2434, 2485), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2643, 2677), 'os.getenv', 'os.getenv', (['"""CUSTOM_SEARCH_API_KEY"""'], {}), "('CUSTOM_SEARCH_API_KEY')\n", (2652, 2677), False, 'import os\n'), ((2687, 2716), 'os.getenv', 'os.getenv', (['"""CUSTOM_SEARCH_CX"""'], {}), "('CUSTOM_SEARCH_CX')\n", (2696, 2716), False, 'import os\n'), ((3119, 3157), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {}), '(**kwargs)\n', (3147, 3157), False, 'import openai\n'), ((3509, 3565), 'requests.get', 'requests.get', (['f"""https://www.google.com/search?q={query}"""'], {}), "(f'https://www.google.com/search?q={query}')\n", (3521, 3565), False, 'import requests\n'), ((3596, 3639), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (3609, 3639), False, 'from bs4 import BeautifulSoup\n'), ((5313, 5325), 'newspaper.Article', 'Article', (['url'], {}), '(url)\n', (5320, 5325), False, 'from newspaper import Article\n'), ((6052, 6117), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'OPENAI_API_KEY', 'max_retries': '(1000)'}), '(openai_api_key=OPENAI_API_KEY, max_retries=1000)\n', (6068, 6117), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((7521, 7565), 'data_utils.example_utils.read_examples', 'example_utils.read_examples', (['args.input_file'], {}), '(args.input_file)\n', (7548, 7565), False, 'from data_utils import example_utils\n'), ((2996, 3034), 'tenacity.wait_random_exponential', 'wait_random_exponential', ([], {'min': '(1)', 'max': '(60)'}), '(min=1, max=60)\n', (3019, 3034), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((3041, 3062), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(6)'], {}), '(6)\n', (3059, 3062), False, 'from tenacity import retry, stop_after_attempt, wait_random_exponential\n'), ((4678, 4693), 'numpy.unique', 'np.unique', (['urls'], {}), '(urls)\n', (4687, 4693), True, 'import numpy as np\n'), ((9024, 9092), 'data_utils.example_utils.Answer', 'example_utils.Answer', ([], {'answer_string': 'answer', 'attribution': 'attributions'}), '(answer_string=answer, attribution=attributions)\n', (9044, 9092), False, 'from data_utils import example_utils\n'), ((1700, 1725), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1715, 1725), False, 'import os\n'), ((6171, 6211), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (6192, 6211), False, 'from langchain.vectorstores import Chroma\n'), ((7994, 8061), 'data_utils.example_utils.Answer', 'example_utils.Answer', ([], {'answer_string': '"""I don\'t know."""', 'attribution': '[]'}), '(answer_string="I don\'t know.", attribution=[])\n', (8014, 8061), False, 'from data_utils import example_utils\n'), ((8252, 8315), 'langchain.schema.Document', 'Document', ([], {'page_content': "d['text']", 'metadata': "{'source': d['url']}"}), "(page_content=d['text'], metadata={'source': d['url']})\n", (8260, 8315), False, 'from langchain.schema import Document\n'), ((8525, 8592), 'data_utils.example_utils.Answer', 'example_utils.Answer', ([], {'answer_string': '"""I don\'t know."""', 'attribution': '[]'}), '(answer_string="I don\'t know.", attribution=[])\n', (8545, 8592), False, 'from data_utils import example_utils\n'), ((9175, 9202), 'dataclasses.asdict', 'dataclasses.asdict', (['example'], {}), '(example)\n', (9193, 9202), False, 'import dataclasses\n'), ((8152, 8179), 'dataclasses.asdict', 'dataclasses.asdict', (['example'], {}), '(example)\n', (8170, 8179), False, 'import dataclasses\n'), ((8683, 8710), 'dataclasses.asdict', 'dataclasses.asdict', (['example'], {}), '(example)\n', (8701, 8710), False, 'import dataclasses\n')]
import logging from pathlib import Path from typing import List, Optional, Tuple from dotenv import load_dotenv load_dotenv() from queue import Empty, Queue from threading import Thread import gradio as gr from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.prompts import HumanMessagePromptTemplate from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage from callback import QueueCallback MODELS_NAMES = ["gpt-3.5-turbo", "gpt-4"] DEFAULT_TEMPERATURE = 0.7 ChatHistory = List[str] logging.basicConfig( format="[%(asctime)s %(levelname)s]: %(message)s", level=logging.INFO ) # load up our system prompt default_system_prompt = Path("prompts/system.prompt").read_text() # for the human, we will just inject the text human_message_prompt_template = HumanMessagePromptTemplate.from_template("{text}") def on_message_button_click( chat: Optional[ChatOpenAI], message: str, chatbot_messages: ChatHistory, messages: List[BaseMessage], ) -> Tuple[ChatOpenAI, str, ChatHistory, List[BaseMessage]]: if chat is None: # in the queue we will store our streamed tokens queue = Queue() # let's create our default chat chat = ChatOpenAI( model_name=MODELS_NAMES[0], temperature=DEFAULT_TEMPERATURE, streaming=True, callbacks=([QueueCallback(queue)]), ) else: # hacky way to get the queue back queue = chat.callbacks[0].queue job_done = object() logging.info(f"Asking question to GPT, messages={messages}") # let's add the messages to our stuff messages.append(HumanMessage(content=message)) chatbot_messages.append((message, "")) # this is a little wrapper we need cuz we have to add the job_done def task(): chat(messages) queue.put(job_done) # now let's start a thread and run the generation inside it t = Thread(target=task) t.start() # this will hold the content as we generate content = "" # now, we read the next_token from queue and do what it has to be done while True: try: next_token = queue.get(True, timeout=1) if next_token is job_done: break content += next_token chatbot_messages[-1] = (message, content) yield chat, "", chatbot_messages, messages except Empty: continue # finally we can add our reply to messsages messages.append(AIMessage(content=content)) logging.debug(f"reply = {content}") logging.info(f"Done!") return chat, "", chatbot_messages, messages def system_prompt_handler(value: str) -> str: return value def on_clear_button_click(system_prompt: str) -> Tuple[str, List, List]: return "", [], [SystemMessage(content=system_prompt)] def on_apply_settings_button_click( system_prompt: str, model_name: str, temperature: float ): logging.info( f"Applying settings: model_name={model_name}, temperature={temperature}" ) chat = ChatOpenAI( model_name=model_name, temperature=temperature, streaming=True, callbacks=[QueueCallback(Queue())], ) # don't forget to nuke our queue chat.callbacks[0].queue.empty() return chat, *on_clear_button_click(system_prompt) # some css why not, "borrowed" from https://huggingface.co/spaces/ysharma/Gradio-demo-streaming/blob/main/app.py with gr.Blocks( css="""#col_container {width: 700px; margin-left: auto; margin-right: auto;} #chatbot {height: 400px; overflow: auto;}""" ) as demo: system_prompt = gr.State(default_system_prompt) # here we keep our state so multiple user can use the app at the same time! messages = gr.State([SystemMessage(content=default_system_prompt)]) # same thing for the chat, we want one chat per use so callbacks are unique I guess chat = gr.State(None) with gr.Column(elem_id="col_container"): gr.Markdown("# Welcome to GradioGPT! 🌟🚀") gr.Markdown( "An easy to use template. It comes with state and settings managment" ) with gr.Column(): system_prompt_area = gr.TextArea( default_system_prompt, lines=4, label="system prompt", interactive=True ) # we store the value into the state to avoid re rendering of the area system_prompt_area.input( system_prompt_handler, inputs=[system_prompt_area], outputs=[system_prompt], ) system_prompt_button = gr.Button("Set") chatbot = gr.Chatbot() with gr.Column(): message = gr.Textbox(label="chat input") message.submit( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], queue=True, ) message_button = gr.Button("Submit", variant="primary") message_button.click( on_message_button_click, [chat, message, chatbot, messages], [chat, message, chatbot, messages], ) with gr.Row(): with gr.Column(): clear_button = gr.Button("Clear") clear_button.click( on_clear_button_click, [system_prompt], [message, chatbot, messages], queue=False, ) with gr.Accordion("Settings", open=False): model_name = gr.Dropdown( choices=MODELS_NAMES, value=MODELS_NAMES[0], label="model" ) temperature = gr.Slider( minimum=0.0, maximum=1.0, value=0.7, step=0.1, label="temperature", interactive=True, ) apply_settings_button = gr.Button("Apply") apply_settings_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) system_prompt_button.click( on_apply_settings_button_click, [system_prompt, model_name, temperature], [chat, message, chatbot, messages], ) demo.queue() demo.launch()
[ "langchain.schema.AIMessage", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(message)s',\n level=logging.INFO)\n", (623, 698), False, 'import logging\n'), ((873, 923), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (913, 923), False, 'from langchain.prompts import HumanMessagePromptTemplate\n'), ((1596, 1656), 'logging.info', 'logging.info', (['f"""Asking question to GPT, messages={messages}"""'], {}), "(f'Asking question to GPT, messages={messages}')\n", (1608, 1656), False, 'import logging\n'), ((2004, 2023), 'threading.Thread', 'Thread', ([], {'target': 'task'}), '(target=task)\n', (2010, 2023), False, 'from threading import Thread\n'), ((2606, 2641), 'logging.debug', 'logging.debug', (['f"""reply = {content}"""'], {}), "(f'reply = {content}')\n", (2619, 2641), False, 'import logging\n'), ((2646, 2668), 'logging.info', 'logging.info', (['f"""Done!"""'], {}), "(f'Done!')\n", (2658, 2668), False, 'import logging\n'), ((3020, 3111), 'logging.info', 'logging.info', (['f"""Applying settings: model_name={model_name}, temperature={temperature}"""'], {}), "(\n f'Applying settings: model_name={model_name}, temperature={temperature}')\n", (3032, 3111), False, 'import logging\n'), ((3530, 3688), 'gradio.Blocks', 'gr.Blocks', ([], {'css': '"""#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""'}), '(css=\n """#col_container {width: 700px; margin-left: auto; margin-right: auto;}\n #chatbot {height: 400px; overflow: auto;}"""\n )\n', (3539, 3688), True, 'import gradio as gr\n'), ((3714, 3745), 'gradio.State', 'gr.State', (['default_system_prompt'], {}), '(default_system_prompt)\n', (3722, 3745), True, 'import gradio as gr\n'), ((3997, 4011), 'gradio.State', 'gr.State', (['None'], {}), '(None)\n', (4005, 4011), True, 'import gradio as gr\n'), ((753, 782), 'pathlib.Path', 'Path', (['"""prompts/system.prompt"""'], {}), "('prompts/system.prompt')\n", (757, 782), False, 'from pathlib import Path\n'), ((1228, 1235), 'queue.Queue', 'Queue', ([], {}), '()\n', (1233, 1235), False, 'from queue import Empty, Queue\n'), ((1719, 1748), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'message'}), '(content=message)\n', (1731, 1748), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2574, 2600), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (2583, 2600), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4022, 4056), 'gradio.Column', 'gr.Column', ([], {'elem_id': '"""col_container"""'}), "(elem_id='col_container')\n", (4031, 4056), True, 'import gradio as gr\n'), ((4066, 4107), 'gradio.Markdown', 'gr.Markdown', (['"""# Welcome to GradioGPT! 🌟🚀"""'], {}), "('# Welcome to GradioGPT! 🌟🚀')\n", (4077, 4107), True, 'import gradio as gr\n'), ((4116, 4203), 'gradio.Markdown', 'gr.Markdown', (['"""An easy to use template. It comes with state and settings managment"""'], {}), "(\n 'An easy to use template. It comes with state and settings managment')\n", (4127, 4203), True, 'import gradio as gr\n'), ((4725, 4737), 'gradio.Chatbot', 'gr.Chatbot', ([], {}), '()\n', (4735, 4737), True, 'import gradio as gr\n'), ((2877, 2913), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_prompt'}), '(content=system_prompt)\n', (2890, 2913), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((3851, 3895), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'default_system_prompt'}), '(content=default_system_prompt)\n', (3864, 3895), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((4234, 4245), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4243, 4245), True, 'import gradio as gr\n'), ((4280, 4368), 'gradio.TextArea', 'gr.TextArea', (['default_system_prompt'], {'lines': '(4)', 'label': '"""system prompt"""', 'interactive': '(True)'}), "(default_system_prompt, lines=4, label='system prompt',\n interactive=True)\n", (4291, 4368), True, 'import gradio as gr\n'), ((4689, 4705), 'gradio.Button', 'gr.Button', (['"""Set"""'], {}), "('Set')\n", (4698, 4705), True, 'import gradio as gr\n'), ((4751, 4762), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (4760, 4762), True, 'import gradio as gr\n'), ((4786, 4816), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""chat input"""'}), "(label='chat input')\n", (4796, 4816), True, 'import gradio as gr\n'), ((5061, 5099), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {'variant': '"""primary"""'}), "('Submit', variant='primary')\n", (5070, 5099), True, 'import gradio as gr\n'), ((5306, 5314), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5312, 5314), True, 'import gradio as gr\n'), ((5333, 5344), 'gradio.Column', 'gr.Column', ([], {}), '()\n', (5342, 5344), True, 'import gradio as gr\n'), ((5377, 5395), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (5386, 5395), True, 'import gradio as gr\n'), ((5630, 5666), 'gradio.Accordion', 'gr.Accordion', (['"""Settings"""'], {'open': '(False)'}), "('Settings', open=False)\n", (5642, 5666), True, 'import gradio as gr\n'), ((5697, 5768), 'gradio.Dropdown', 'gr.Dropdown', ([], {'choices': 'MODELS_NAMES', 'value': 'MODELS_NAMES[0]', 'label': '"""model"""'}), "(choices=MODELS_NAMES, value=MODELS_NAMES[0], label='model')\n", (5708, 5768), True, 'import gradio as gr\n'), ((5837, 5937), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': '(0.7)', 'step': '(0.1)', 'label': '"""temperature"""', 'interactive': '(True)'}), "(minimum=0.0, maximum=1.0, value=0.7, step=0.1, label=\n 'temperature', interactive=True)\n", (5846, 5937), True, 'import gradio as gr\n'), ((6112, 6130), 'gradio.Button', 'gr.Button', (['"""Apply"""'], {}), "('Apply')\n", (6121, 6130), True, 'import gradio as gr\n'), ((1440, 1460), 'callback.QueueCallback', 'QueueCallback', (['queue'], {}), '(queue)\n', (1453, 1460), False, 'from callback import QueueCallback\n'), ((3265, 3272), 'queue.Queue', 'Queue', ([], {}), '()\n', (3270, 3272), False, 'from queue import Empty, Queue\n')]
""" View stage example selector. | Copyright 2017-2023, Voxel51, Inc. | `voxel51.com <https://voxel51.com/>`_ | """ import os import pickle from langchain.prompts import FewShotPromptTemplate, PromptTemplate import numpy as np import pandas as pd from scipy.spatial.distance import cosine # pylint: disable=relative-beyond-top-level from .utils import get_embedding_function, get_cache, hash_query ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) EXAMPLES_DIR = os.path.join(ROOT_DIR, "examples") EXAMPLE_EMBEDDINGS_PATH = os.path.join( EXAMPLES_DIR, "viewstage_embeddings.pkl" ) VIEW_STAGE_EXAMPLES_PATH = os.path.join(EXAMPLES_DIR, "viewstage_examples.csv") VIEW_STAGE_EXAMPLE_PROMPT = PromptTemplate( input_variables=["input", "output"], template="Input: {input}\nOutput: {output}", ) def get_or_create_embeddings(queries): if os.path.isfile(EXAMPLE_EMBEDDINGS_PATH): with open(EXAMPLE_EMBEDDINGS_PATH, "rb") as f: example_embeddings = pickle.load(f) else: example_embeddings = {} query_hashes = [] new_hashes = [] new_queries = [] for query in queries: key = hash_query(query) query_hashes.append(key) if key not in example_embeddings: new_hashes.append(key) new_queries.append(query) if new_queries: print("Generating %d embeddings..." % len(new_queries)) model = get_embedding_function() new_embeddings = model(new_queries) for key, embedding in zip(new_hashes, new_embeddings): example_embeddings[key] = embedding if new_queries: print("Saving embeddings to disk...") with open(EXAMPLE_EMBEDDINGS_PATH, "wb") as f: pickle.dump(example_embeddings, f) return example_embeddings def has_geo_field(sample_collection): types = list(sample_collection.get_field_schema(flat=True).values()) types = [type(t) for t in types] return any(["Geo" in t.__name__ for t in types]) def get_label_type(sample_collection, field_name): sample = sample_collection.first() field = sample.get_field(field_name) field_type = str(type(field).__name__).lower() field_type = field_type[:-1] if field_type.endswith("s") else field_type return field_type def _replace_run_keys(prompt, runs): if "text_similarity" in runs: prompt = prompt.replace("TEXT_SIM_KEY", runs["text_similarity"]["key"]) if "image_similarity" in runs: prompt = prompt.replace( "IMAGE_SIM_KEY", runs["image_similarity"]["key"] ) if "evaluation" in runs: prompt = prompt.replace("EVAL_KEY", runs["evaluation"]["key"]) if "uniqueness" in runs: prompt = prompt.replace( "UNIQUENESS_FIELD", runs["uniqueness"]["uniqueness_field"] ) return prompt def _count_empty_class_names(label_field): return [list(class_name.values())[0] for class_name in label_field].count( [] ) def _reduce_label_fields(label_fields): label_field_keys = list(label_fields.keys()) if len(label_field_keys) == 0: return None, None elif len(label_field_keys) > 0: empty_counts = [ _count_empty_class_names(label_fields[key]) for key in label_field_keys ] min_empty_count = min(empty_counts) valid_keys = [ key for key, count in zip(label_field_keys, empty_counts) if count == min_empty_count ] return {key: label_fields[key] for key in valid_keys}, min_empty_count def _parse_runs_and_labels(runs, label_fields): reduced_label_fields, count = _reduce_label_fields(label_fields.copy()) reduced_runs = runs.copy() if count is not None and count > 0 and "text_similarity" in reduced_runs: reduced_label_fields = None return reduced_runs, reduced_label_fields def _get_evaluation_type(sample_collection, eval_key): eval_cls = sample_collection.get_evaluation_info(eval_key).config.cls if "openimages" in eval_cls: return "detection" elif "coco" in eval_cls: return "detection" elif "activitynet" in eval_cls: return "detection" elif "classification" in eval_cls: return "classification" return None def _load_examples(): examples = pd.read_csv(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines="skip") examples["meta"] = examples["metadata"] examples["contains_match"] = examples["stages"].str.contains("match\(") examples["contains_filter_labels"] = examples["stages"].str.contains( "filter_labels\(" ) examples["mfl"] = ( examples["contains_match"] | examples["contains_filter_labels"] ) examples["hash"] = examples["query"].apply(lambda x: hash_query(x)) queries = examples["query"].tolist() embeddings = get_or_create_embeddings(queries) embeddings = { key: np.array(embeddings[key]) for key in examples["hash"].tolist() } return examples, embeddings def get_examples(): cache = get_cache() keys = ("viewstage_examples", "viewstage_embeddings") if keys[0] not in cache or keys[1] not in cache: cache[keys[0]], cache[keys[1]] = _load_examples() return cache[keys[0]], cache[keys[1]] def _get_filtered_examples(sample_collection, runs, label_fields): examples, embeddings = get_examples() media_type = sample_collection.media_type _filter = examples["media_type"].isin([media_type, "all"]) red_runs, red_label_fields = _parse_runs_and_labels(runs, label_fields) geo = has_geo_field(sample_collection) text_sim = "text_similarity" in red_runs image_sim = "image_similarity" in red_runs meta = "metadata" in red_runs eval = "evaluation" in red_runs if red_label_fields or eval: if red_label_fields: label_field_types = list( set( [ get_label_type(sample_collection, field) for field in red_label_fields ] ) ) else: label_field_types = [] if eval: eval_key = red_runs["evaluation"]["key"] eval_types = [_get_evaluation_type(sample_collection, eval_key)] else: eval_types = [] label_types = list(set(label_field_types + eval_types + ["all"])) _filter = _filter & examples["label_type"].isin(label_types) ## contains match() or filter_labels() in stages mfl_cond = red_label_fields and not text_sim conds = [geo, text_sim, image_sim, meta, eval, mfl_cond] strs = ["geo", "text_sim", "image_sim", "meta", "eval", "mfl"] for cond, cond_str in zip(conds, strs): if not cond: _filter = _filter & (examples[cond_str] == False) filtered_examples = examples[_filter] filtered_queries, filtered_stages, hashes = ( filtered_examples["query"].tolist(), filtered_examples["stages"].tolist(), filtered_examples["hash"].tolist(), ) filtered_embeddings = [embeddings[key] for key in hashes] return filtered_queries, filtered_stages, filtered_embeddings def get_similar_examples(sample_collection, query, runs, label_fields): ex_queries, ex_stages, ex_embeddings = _get_filtered_examples( sample_collection, runs, label_fields ) model = get_embedding_function() query_embedding = np.array(model([query])) if len(query_embedding.shape) == 2: query_embedding = query_embedding[0] dists = np.array([cosine(query_embedding, emb) for emb in ex_embeddings]) sorted_ix = np.argsort(dists).astype(int) k = 20 similar_queries = [ex_queries[ix] for ix in sorted_ix[:k]] similar_stages = [ex_stages[ix] for ix in sorted_ix[:k]] return [ {"input": sq, "output": ss} for sq, ss in zip(similar_queries, similar_stages) ] def generate_view_stage_examples_prompt_template( sample_collection, query, runs, label_fields ): examples = get_similar_examples( sample_collection, query, runs, label_fields ) example_prompt = VIEW_STAGE_EXAMPLE_PROMPT return FewShotPromptTemplate( examples=examples, example_prompt=example_prompt, prefix="Generate code to produce the FiftyOne view stages for the following prompts:\n", suffix="Input: {text}\nOutput:", input_variables=["text"], ) def generate_view_stage_examples_prompt( sample_collection, query, runs, label_fields ): similar_examples_prompt_template = ( generate_view_stage_examples_prompt_template( sample_collection, query, runs, label_fields ) ) prompt = similar_examples_prompt_template.format(text=query) return _replace_run_keys(prompt, runs)
[ "langchain.prompts.FewShotPromptTemplate", "langchain.prompts.PromptTemplate" ]
[((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'import os\n'), ((639, 691), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_examples.csv"""'], {}), "(EXAMPLES_DIR, 'viewstage_examples.csv')\n", (651, 691), False, 'import os\n'), ((721, 825), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input', 'output']", 'template': '"""Input: {input}\nOutput: {output}"""'}), '(input_variables=[\'input\', \'output\'], template=\n """Input: {input}\nOutput: {output}""")\n', (735, 825), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((877, 916), 'os.path.isfile', 'os.path.isfile', (['EXAMPLE_EMBEDDINGS_PATH'], {}), '(EXAMPLE_EMBEDDINGS_PATH)\n', (891, 916), False, 'import os\n'), ((4348, 4406), 'pandas.read_csv', 'pd.read_csv', (['VIEW_STAGE_EXAMPLES_PATH'], {'on_bad_lines': '"""skip"""'}), "(VIEW_STAGE_EXAMPLES_PATH, on_bad_lines='skip')\n", (4359, 4406), True, 'import pandas as pd\n'), ((8216, 8455), 'langchain.prompts.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'example_prompt', 'prefix': '"""Generate code to produce the FiftyOne view stages for the following prompts:\n"""', 'suffix': '"""Input: {text}\nOutput:"""', 'input_variables': "['text']"}), '(examples=examples, example_prompt=example_prompt,\n prefix=\n """Generate code to produce the FiftyOne view stages for the following prompts:\n"""\n , suffix="""Input: {text}\nOutput:""", input_variables=[\'text\'])\n', (8237, 8455), False, 'from langchain.prompts import FewShotPromptTemplate, PromptTemplate\n'), ((446, 471), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (461, 471), False, 'import os\n'), ((4933, 4958), 'numpy.array', 'np.array', (['embeddings[key]'], {}), '(embeddings[key])\n', (4941, 4958), True, 'import numpy as np\n'), ((1006, 1020), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1017, 1020), False, 'import pickle\n'), ((1751, 1785), 'pickle.dump', 'pickle.dump', (['example_embeddings', 'f'], {}), '(example_embeddings, f)\n', (1762, 1785), False, 'import pickle\n'), ((7604, 7632), 'scipy.spatial.distance.cosine', 'cosine', (['query_embedding', 'emb'], {}), '(query_embedding, emb)\n', (7610, 7632), False, 'from scipy.spatial.distance import cosine\n'), ((7677, 7694), 'numpy.argsort', 'np.argsort', (['dists'], {}), '(dists)\n', (7687, 7694), True, 'import numpy as np\n')]
import logging from langchain.chains import RetrievalQA from neogpt.prompts.prompt import get_prompt def local_retriever(db, llm, persona="default"): """ Fn: local_retriever Description: The function sets up the local retrieval-based question-answering system. Args: db (object): The database object llm (object): The LLM model object return: chain (object): The chain object """ try: prompt, memory = get_prompt(persona=persona) # Create a retriever object local_retriever = db.as_retriever() chain = RetrievalQA.from_chain_type( llm=llm, retriever=local_retriever, chain_type="stuff", chain_type_kwargs={"prompt": prompt, "memory": memory}, return_source_documents=True, ) logging.info("Loaded Local Retriever Successfully 🔍") except Exception as e: logging.info(f"Error {e}") return chain
[ "langchain.chains.RetrievalQA.from_chain_type" ]
[((466, 493), 'neogpt.prompts.prompt.get_prompt', 'get_prompt', ([], {'persona': 'persona'}), '(persona=persona)\n', (476, 493), False, 'from neogpt.prompts.prompt import get_prompt\n'), ((590, 768), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'retriever': 'local_retriever', 'chain_type': '"""stuff"""', 'chain_type_kwargs': "{'prompt': prompt, 'memory': memory}", 'return_source_documents': '(True)'}), "(llm=llm, retriever=local_retriever, chain_type=\n 'stuff', chain_type_kwargs={'prompt': prompt, 'memory': memory},\n return_source_documents=True)\n", (617, 768), False, 'from langchain.chains import RetrievalQA\n'), ((839, 892), 'logging.info', 'logging.info', (['"""Loaded Local Retriever Successfully 🔍"""'], {}), "('Loaded Local Retriever Successfully 🔍')\n", (851, 892), False, 'import logging\n'), ((928, 954), 'logging.info', 'logging.info', (['f"""Error {e}"""'], {}), "(f'Error {e}')\n", (940, 954), False, 'import logging\n')]
from typing import Any, Dict from injector import inject, singleton from langchain_core.output_parsers.json import JsonOutputParser from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.runnables import RunnableSerializable from bao.components.llms import LLMs from bao.settings.settings import Settings @singleton class QueryReWrite: @inject def __init__(self, settings: Settings, llms: LLMs) -> None: self.settings = settings self.llms = llms def chain(self) -> RunnableSerializable[Dict[str, Any], Dict[str, Any]]: llm = self.llms.get_llm( llm_type=self.settings.chain_templates.query_rewrite_model ) chat_template = ChatPromptTemplate.from_messages( [ ("system", self.settings.chain_templates.query_rewrite_template), MessagesPlaceholder(variable_name="chat_history"), ("human", "{question}"), ] ) return chat_template | llm | JsonOutputParser() # type: ignore
[ "langchain_core.prompts.MessagesPlaceholder", "langchain_core.output_parsers.json.JsonOutputParser" ]
[((1029, 1047), 'langchain_core.output_parsers.json.JsonOutputParser', 'JsonOutputParser', ([], {}), '()\n', (1045, 1047), False, 'from langchain_core.output_parsers.json import JsonOutputParser\n'), ((876, 925), 'langchain_core.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_history"""'}), "(variable_name='chat_history')\n", (895, 925), False, 'from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n')]
from langchain import PromptTemplate PROMPT = """ 你需要扮演一个优秀的关键信息提取助手,从人类的对话中提取关键性内容(最多5个关键词),以协助其他助手更精准地回答问题。 注意:你不需要做任何解释说明,只需严格按照示例的格式输出关键词。 示例: 人类:我有一个服装厂,是否可以应用你们的装箱算法改善装载率呢? AI: 服装厂, 装箱算法, 装载率 现在开始: 人类:{query} AI: """ def information_extraction_raw_prompt(): return PromptTemplate(template=PROMPT, input_variables=["query"]) def information_extraction_prompt(query: str): P = PromptTemplate(template=PROMPT, input_variables=["query"]) return P.format(query=query) if __name__ == "__main__": print(information_extraction_prompt("你们的装箱算法能不能用在家居业呀?"))
[ "langchain.PromptTemplate" ]
[((281, 339), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'PROMPT', 'input_variables': "['query']"}), "(template=PROMPT, input_variables=['query'])\n", (295, 339), False, 'from langchain import PromptTemplate\n'), ((397, 455), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'PROMPT', 'input_variables': "['query']"}), "(template=PROMPT, input_variables=['query'])\n", (411, 455), False, 'from langchain import PromptTemplate\n')]
import base64 import email from enum import Enum from typing import Any, Dict, List, Optional, Type from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.pydantic_v1 import BaseModel, Field from langchain.tools.gmail.base import GmailBaseTool from langchain.tools.gmail.utils import clean_email_body class Resource(str, Enum): """Enumerator of Resources to search.""" THREADS = "threads" MESSAGES = "messages" class SearchArgsSchema(BaseModel): """Input for SearchGmailTool.""" # From https://support.google.com/mail/answer/7190?hl=en query: str = Field( ..., description="The Gmail query. Example filters include from:sender," " to:recipient, subject:subject, -filtered_term," " in:folder, is:important|read|starred, after:year/mo/date, " "before:year/mo/date, label:label_name" ' "exact phrase".' " Search newer/older than using d (day), m (month), and y (year): " "newer_than:2d, older_than:1y." " Attachments with extension example: filename:pdf. Multiple term" " matching example: from:amy OR from:david.", ) resource: Resource = Field( default=Resource.MESSAGES, description="Whether to search for threads or messages.", ) max_results: int = Field( default=10, description="The maximum number of results to return.", ) class GmailSearch(GmailBaseTool): """Tool that searches for messages or threads in Gmail.""" name: str = "search_gmail" description: str = ( "Use this tool to search for email messages or threads." " The input must be a valid Gmail query." " The output is a JSON list of the requested resource." ) args_schema: Type[SearchArgsSchema] = SearchArgsSchema def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # Add the thread message snippets to the thread results results = [] for thread in threads: thread_id = thread["id"] thread_data = ( self.api_resource.users() .threads() .get(userId="me", id=thread_id) .execute() ) messages = thread_data["messages"] thread["messages"] = [] for message in messages: snippet = message["snippet"] thread["messages"].append({"snippet": snippet, "id": message["id"]}) results.append(thread) return results def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]: results = [] for message in messages: message_id = message["id"] message_data = ( self.api_resource.users() .messages() .get(userId="me", format="raw", id=message_id) .execute() ) raw_message = base64.urlsafe_b64decode(message_data["raw"]) email_msg = email.message_from_bytes(raw_message) subject = email_msg["Subject"] sender = email_msg["From"] message_body = email_msg.get_payload() body = clean_email_body(message_body) results.append( { "id": message["id"], "threadId": message_data["threadId"], "snippet": message_data["snippet"], "body": body, "subject": subject, "sender": sender, } ) return results def _run( self, query: str, resource: Resource = Resource.MESSAGES, max_results: int = 10, run_manager: Optional[CallbackManagerForToolRun] = None, ) -> List[Dict[str, Any]]: """Run the tool.""" results = ( self.api_resource.users() .messages() .list(userId="me", q=query, maxResults=max_results) .execute() .get(resource.value, []) ) if resource == Resource.THREADS: return self._parse_threads(results) elif resource == Resource.MESSAGES: return self._parse_messages(results) else: raise NotImplementedError(f"Resource of type {resource} not implemented.")
[ "langchain.pydantic_v1.Field", "langchain.tools.gmail.utils.clean_email_body" ]
[((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david."""'}), '(..., description=\n \'The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older than using d (day), m (month), and y (year): newer_than:2d, older_than:1y. Attachments with extension example: filename:pdf. Multiple term matching example: from:amy OR from:david.\'\n )\n', (611, 1054), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1181, 1276), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'Resource.MESSAGES', 'description': '"""Whether to search for threads or messages."""'}), "(default=Resource.MESSAGES, description=\n 'Whether to search for threads or messages.')\n", (1186, 1276), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((1318, 1391), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of results to return."""'}), "(default=10, description='The maximum number of results to return.')\n", (1323, 1391), False, 'from langchain.pydantic_v1 import BaseModel, Field\n'), ((2960, 3005), 'base64.urlsafe_b64decode', 'base64.urlsafe_b64decode', (["message_data['raw']"], {}), "(message_data['raw'])\n", (2984, 3005), False, 'import base64\n'), ((3031, 3068), 'email.message_from_bytes', 'email.message_from_bytes', (['raw_message'], {}), '(raw_message)\n', (3055, 3068), False, 'import email\n'), ((3224, 3254), 'langchain.tools.gmail.utils.clean_email_body', 'clean_email_body', (['message_body'], {}), '(message_body)\n', (3240, 3254), False, 'from langchain.tools.gmail.utils import clean_email_body\n')]
from langchain import PromptTemplate from codedog.templates import grimoire_en TRANSLATE_PROMPT = PromptTemplate( template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=["language", "description", "content"] )
[ "langchain.PromptTemplate" ]
[((100, 217), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'grimoire_en.TRANSLATE_PR_REVIEW', 'input_variables': "['language', 'description', 'content']"}), "(template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=[\n 'language', 'description', 'content'])\n", (114, 217), False, 'from langchain import PromptTemplate\n')]
# Importing necessary library import streamlit as st # Setting up the page configuration st.set_page_config( page_title="QuickDigest AI", page_icon=":brain:", layout="wide", initial_sidebar_state="expanded" ) # Defining the function to display the home page def home(): import streamlit as st from streamlit_extras.badges import badge from streamlit_extras.colored_header import colored_header from streamlit_extras.let_it_rain import rain # Displaying a rain animation with specified parameters rain( emoji="🎈", font_size=54, falling_speed=5, animation_length="1", ) # Displaying a colored header with specified parameters colored_header( label="QuickDigest AI🧠, Your Intelligent Data Companion", description="~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop", color_name="violet-70", ) # Displaying information and warnings in the sidebar st.sidebar.info( "Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen." ) st.sidebar.info( "For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq)." ) st.sidebar.warning( "LLMs may produce inaccurate information about people, places, or facts. Don't entirely trust them." ) # Displaying markdown text on the page st.markdown( "<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>", unsafe_allow_html=True ) st.markdown( "**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform's cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files." ) st.markdown( "**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience." ) st.markdown( "**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI's real-time web search feature ensures you're always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time." ) st.markdown( "**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes." ) st.markdown("---") # Displaying a support section with badges and link button st.markdown("<h5>Support Us</h5>", unsafe_allow_html=True) col1, col2, col3, col4 = st.columns(4) with col1: st.write("Star this repository on Github") badge(type="github", name="codingis4noobs2/QuickDigest") with col2: st.write("Follow me on twitter") badge(type="twitter", name="4gameparth") with col3: st.write("Buy me a coffee") badge(type="buymeacoffee", name="codingis4noobs2") with col4: st.link_button("Upvote on Replit", "https://replit.com/@ParthShah38/QuickDigestAI?v=1") # Function to display chat with files page def chat_with_files(): import os import streamlit as st from streamlit_extras.badges import badge from streamlit_extras.colored_header import colored_header from llama_index import ( OpenAIEmbedding, ServiceContext, set_global_service_context, ) from llama_index.llms import OpenAI from llama_index.chat_engine.types import StreamingAgentChatResponse from llama_index import SimpleDirectoryReader, VectorStoreIndex import assemblyai as aai from PyPDF2 import PdfReader from docx import Document # Cache the result to avoid recomputation @st.cache_resource(show_spinner="Indexing documents...Please have patience") def build_index(files): documents = SimpleDirectoryReader(input_files=files).load_data() index = VectorStoreIndex.from_documents(documents) return index # Handle streaming responses def handle_stream(root, stream: StreamingAgentChatResponse): text = "" root.markdown("Thinking...") for token in stream.response_gen: text += token root.markdown(text) return text # Define constants and settings CACHE_DIR = "./uploads" aai.settings.api_key = st.secrets['assembly_api_key'] # Render chat messages def render_message(message): with st.chat_message(message["role"]): st.write(message["text"]) # Transcribe audio and video files def transcribe_audio_video(file_path): transcriber = aai.Transcriber() transcript = transcriber.transcribe(file_path) transcript_path = file_path + ".txt" with open(transcript_path, "w") as f: f.write(transcript.text) return transcript_path # Upload files and cache them def upload_files(types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx'], **kwargs): files = st.file_uploader( label=f"Upload files", type=types, **kwargs ) if not files: st.info(f"Please add documents, Note: Scanned documents are not supported yet!") st.stop() return cache_files(files, types=types) # Cache uploaded files def cache_files(files, types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx']) -> list[str]: filepaths = [] for file in files: # Determine the file extension from the mime type ext = file.type.split("/")[-1] if ext == "plain": # Handle text/plain mime type ext = "txt" elif ext in ["vnd.openxmlformats-officedocument.wordprocessingml.document", "vnd.ms-word"]: ext = "docx" # or "doc" depending on your needs if ext not in types: continue filepath = f"{CACHE_DIR}/{file.name}" with open(filepath, "wb") as f: f.write(file.getvalue()) if ext in ["mp3", "mp4"]: filepath = transcribe_audio_video(filepath) filepaths.append(filepath) # st.sidebar.write("Uploaded files", filepaths) # Debug statement with st.sidebar: with st.expander("Uploaded Files"): filepaths_pretty = "\n".join(f"- {filepath}" for filepath in filepaths) st.markdown(f"{filepaths_pretty}") return filepaths def transcribe_and_save(file_path): transcriber = aai.Transcriber() transcript = transcriber.transcribe(file_path) transcript_path = file_path + ".txt" with open(transcript_path, "w") as f: f.write(transcript.text) return transcript_path # Save extracted text to a txt file def save_extracted_text_to_txt(text, filename): txt_filename = os.path.splitext(filename)[0] + ".txt" txt_filepath = os.path.join('uploads', txt_filename) with open(txt_filepath, 'w', encoding='utf-8') as txt_file: txt_file.write(text) return txt_filepath # Get OpenAI API key from session state def get_key(): return st.session_state["openai_api_key"] # Read text from Word document def read_word_file(file_path): doc = Document(file_path) full_text = [] for para in doc.paragraphs: full_text.append(para.text) return '\n'.join(full_text) # Process uploaded documents def process_documents(documents): processed_docs = [] for doc in documents: if doc.endswith('.pdf'): processed_docs.append(process_pdf(doc)) elif doc.endswith(('.doc', '.docx')): text = read_word_file(doc) txt_filepath = save_extracted_text_to_txt(text, os.path.basename(doc)) processed_docs.append(txt_filepath) elif doc.endswith(('.mp3', '.mp4', '.mpeg')): processed_docs.append(transcribe_and_save(doc)) else: processed_docs.append(doc) return processed_docs # Process PDF files def process_pdf(pdf_path): reader = PdfReader(pdf_path) all_text = "" for page in reader.pages: extracted_text = page.extract_text() if extracted_text: processed_text = ' '.join(extracted_text.split('\n')) all_text += processed_text + "\n\n" txt_filepath = save_extracted_text_to_txt(all_text, os.path.basename(pdf_path)) os.remove(pdf_path) # Delete the original PDF file return txt_filepath # Main logic for handling OpenAI API key and document processing if "openai_api_key" not in st.session_state: openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password") if not openai_api_key: st.sidebar.warning("Please add your OpenAI API key to continue!!") st.warning("Please add your OpenAI API key to continue!!") st.sidebar.info("To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you're unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn't require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services.") st.stop() st.session_state["openai_api_key"] = openai_api_key st.sidebar.text_input("Enter Youtube Video ID(Coming soon)", disabled=True) st.sidebar.text_input("Enter Spotify Podast link(Coming soon)", disabled=True) openai_api_key = get_key() if openai_api_key: st.toast('OpenAI API Key Added ✅') # Define service-context with st.sidebar: with st.expander("Advanced Settings"): st.session_state['temperature'] = st.number_input("Enter Temperature", help="It determines how creative the model should be", min_value=0.0,max_value=1.0, value=0.1) llm = OpenAI(temperature=st.session_state['temperature'], model='gpt-3.5-turbo', api_key=openai_api_key) embed_model = OpenAIEmbedding(api_key=openai_api_key) service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) set_global_service_context(service_context) # Upload PDFs, DOCs, TXTs, MP3s, and MP4s documents = upload_files(types=["pdf", "txt", "mp3", "mp4", 'mpeg', 'doc', 'docx'], accept_multiple_files=True) # Process the uploaded documents processed_documents = process_documents(documents) if not processed_documents: st.warning("No documents uploaded!") st.stop() index = build_index(processed_documents) query_engine = index.as_chat_engine(chat_mode="condense_question", streaming=True) messages = st.session_state.get("messages", []) if not messages: messages.append({"role": "assistant", "text": "Hi!"}) for message in messages: render_message(message) if user_query := st.chat_input(): message = {"role": "user", "text": user_query} messages.append(message) render_message(message) with st.chat_message("assistant"): stream = query_engine.stream_chat(user_query) text = handle_stream(st.empty(), stream) message = {"role": "assistant", "text": text} messages.append(message) st.session_state.messages = messages # Function to use LLMs with web search def use_llms_with_web(): from langchain.agents import ConversationalChatAgent, AgentExecutor from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain.tools import DuckDuckGoSearchRun import streamlit as st st.title("Use web search with LLMs") # Taking OpenAI API key input from the user openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password") # Initializing message history and memory msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output" ) # Resetting chat history logic if len(msgs.messages) == 0 or st.sidebar.button("Reset chat history"): msgs.clear() msgs.add_ai_message("How can I help you?") st.session_state.steps = {} # Defining avatars for chat messages avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"): st.write(step[0].log) st.write(step[1]) st.write(msg.content) # Taking new input from the user if prompt := st.chat_input(placeholder="Who won the 2022 Cricket World Cup?"): st.chat_message("user").write(prompt) # Checking if OpenAI API key is provided if not openai_api_key: st.info("Please add your OpenAI API key to continue.") st.stop() # Initializing LLM and tools for web search llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True) tools = [DuckDuckGoSearchRun(name="Search")] chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"] # Function to display chat with dataset page def chat_with_dataset(): from langchain.agents import AgentType from langchain.agents import create_pandas_dataframe_agent from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI import streamlit as st import pandas as pd import os file_formats = { "csv": pd.read_csv, "xls": pd.read_excel, "xlsx": pd.read_excel, "xlsm": pd.read_excel, "xlsb": pd.read_excel, } def clear_submit(): """ Clear the Submit Button State Returns: """ st.session_state["submit"] = False @st.cache_data() def load_data(uploaded_file): """ Load data from the uploaded file based on its extension. """ try: ext = os.path.splitext(uploaded_file.name)[1][1:].lower() except: ext = uploaded_file.split(".")[-1] if ext in file_formats: return file_formats[ext](uploaded_file) else: st.error(f"Unsupported file format: {ext}") return None st.title("Chat with your dataset") st.info("Asking one question at a time will result in a better output") uploaded_file = st.file_uploader( "Upload a Data file", type=list(file_formats.keys()), help="Various File formats are Support", on_change=clear_submit, ) df = None # Initialize df to None outside the if block if uploaded_file: df = load_data(uploaded_file) # df will be assigned a value if uploaded_file is truthy if df is None: # Check if df is still None before proceeding st.warning("No data file uploaded or there was an error in loading the data.") return # Exit the function early if df is None openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password") st.sidebar.info("If you face a KeyError: 'content' error, Press the clear conversation histroy button") if "messages" not in st.session_state or st.sidebar.button("Clear conversation history"): st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}] # Display previous chat messages for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) if prompt := st.chat_input(placeholder="What is this data about?"): st.session_state.messages.append({"role": "user", "content": prompt}) st.chat_message("user").write(prompt) # Check if OpenAI API key is provided if not openai_api_key: st.info("Please add your OpenAI API key to continue.") st.stop() llm = ChatOpenAI( temperature=0, model="gpt-3.5-turbo-0613", openai_api_key=openai_api_key, streaming=True ) pandas_df_agent = create_pandas_dataframe_agent( llm, df, verbose=True, agent_type=AgentType.OPENAI_FUNCTIONS, handle_parsing_errors=True, ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = pandas_df_agent.run(st.session_state.messages, callbacks=[st_cb]) st.session_state.messages.append({"role": "assistant", "content": response}) st.write(response) # Function to display transform products page def transform_products(): import streamlit as st import requests import os import replicate import io from PIL import Image st.session_state['replicate_api_token'] = st.sidebar.text_input("Replicate API Token", type='password') os.environ['REPLICATE_API_TOKEN'] = st.session_state['replicate_api_token'] if not st.session_state['replicate_api_token']: st.sidebar.warning('Please enter your Replicate API Token to continue!!') st.sidebar.info("You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)") st.stop() if st.session_state['replicate_api_token']: st.info("This model works best with product images having transparent or plain backgrounds") # Prompt user to upload an image file img = st.file_uploader("Upload your product image", type=['png', 'jpg', 'jpeg']) if img is not None: has_plain_background = st.toggle("Does your product image have a plain or transparent background? If not, let us do the hard work for you!") prompt = st.text_input("Enter Prompt", help="Enter something you imagine...") negative_prompt = st.text_input("Enter Negative Prompt", help="Write what you don't want in the generated images") submit = st.button("Submit") if submit: if has_plain_background: # If image already has a plain background, prepare it for Replicate image = Image.open(img) bytes_obj = io.BytesIO() image.save(bytes_obj, format='PNG') bytes_obj.seek(0) else: # If image does not have a plain background, send it to ClipDrop to remove background image_file_object = img.read() r = requests.post('https://clipdrop-api.co/remove-background/v1', files={ 'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg') }, headers={'x-api-key': st.secrets['clipdrop_api_key']} ) if r.ok: # If background removal is successful, prepare image for Replicate image = Image.open(io.BytesIO(r.content)) bytes_obj = io.BytesIO() image.save(bytes_obj, format='PNG') bytes_obj.seek(0) else: r.raise_for_status() st.error('Failed to remove background. Try again.') st.stop() # Send image to Replicate for transformation output = replicate.run( "logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df", input={"image_path": bytes_obj, "prompt": prompt, "image_num": 4} ) col1, col2 = st.columns(2) with col1: st.image(output[1]) st.image(output[2]) with col2: st.image(output[3]) st.image(output[4]) # Function to generate images based on user input def generate_images(): import streamlit as st import replicate import os st.session_state['replicate_api_token'] = st.sidebar.text_input("Replicate API Token", type='password') os.environ['REPLICATE_API_TOKEN'] = st.session_state['replicate_api_token'] if not st.session_state['replicate_api_token']: st.sidebar.warning('Please enter your Replicate API Token to continue!!') st.sidebar.info("You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)") st.stop() if st.session_state['replicate_api_token']: prompt = st.text_input( "Enter prompt", help="Write something you can imagine..." ) negative_prompt = st.text_input( "Enter Negative prompt", help="Write what you don't want to see in the generated images" ) submit = st.button("Submit") if submit: output = replicate.run( "stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f", input={ "prompt": prompt, "negative_prompt": negative_prompt, "num_outputs": 4 }, ) col1, col2 = st.columns(2) with col1: st.image(output[0]) st.image(output[2]) with col2: st.image(output[1]) st.image(output[3]) # Dictonary to store all functions as pages page_names_to_funcs = { "Home 🏠": home, "Chat with files 📁": chat_with_files, "Chat with dataset 📖": chat_with_dataset, "Use web search with LLMs 🌐": use_llms_with_web, "Generate Images 🖌️": generate_images, "Transform your products 🎨": transform_products, } # display page by dictionary demo_name = st.sidebar.selectbox("Choose a page to navigate to", page_names_to_funcs.keys()) page_names_to_funcs[demo_name]()
[ "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.memory.ConversationBufferMemory", "langchain.agents.create_pandas_dataframe_agent", "langchain.chat_models.ChatOpenAI", "langchain.tools.DuckDuckGoSearchRun", "langchain.agents.ConversationalChatAgent.from_llm_and_tools", "langchain.memory.chat_message_histories.StreamlitChatMessageHistory" ]
[((91, 213), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""QuickDigest AI"""', 'page_icon': '""":brain:"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title='QuickDigest AI', page_icon=':brain:', layout\n ='wide', initial_sidebar_state='expanded')\n", (109, 213), True, 'import streamlit as st\n'), ((540, 608), 'streamlit_extras.let_it_rain.rain', 'rain', ([], {'emoji': '"""🎈"""', 'font_size': '(54)', 'falling_speed': '(5)', 'animation_length': '"""1"""'}), "(emoji='🎈', font_size=54, falling_speed=5, animation_length='1')\n", (544, 608), False, 'from streamlit_extras.let_it_rain import rain\n'), ((717, 918), 'streamlit_extras.colored_header.colored_header', 'colored_header', ([], {'label': '"""QuickDigest AI🧠, Your Intelligent Data Companion"""', 'description': '"""~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop"""', 'color_name': '"""violet-70"""'}), "(label='QuickDigest AI🧠, Your Intelligent Data Companion',\n description=\n '~ Powered by OpenAI, Llamaindex, AssemblyAI, Langchain, Replicate, Clipdrop'\n , color_name='violet-70')\n", (731, 918), False, 'from streamlit_extras.colored_header import colored_header\n'), ((998, 1164), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen."""'], {}), "(\n 'Visit [OpenAI Pricing](https://openai.com/pricing#language-models) to get an overview of costs incurring depending upon the model chosen.'\n )\n", (1013, 1164), True, 'import streamlit as st\n'), ((1173, 1599), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq)."""'], {}), "(\n 'For key & data privacy concerns, We do not store your Key, it will be removed after your session ends. Also OpenAI will not use data submitted by customers via our API to train or improve our models, unless you explicitly decide to share your data with us for this purpose, For more info please visit [OpenAI FAQs](https://help.openai.com/en/articles/7039943-data-usage-for-consumer-services-faq).'\n )\n", (1188, 1599), True, 'import streamlit as st\n'), ((1608, 1738), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""LLMs may produce inaccurate information about people, places, or facts. Don\'t entirely trust them."""'], {}), '(\n "LLMs may produce inaccurate information about people, places, or facts. Don\'t entirely trust them."\n )\n', (1626, 1738), True, 'import streamlit as st\n'), ((1795, 2139), 'streamlit.markdown', 'st.markdown', (['"""<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>"""'], {'unsafe_allow_html': '(True)'}), "(\n '<h6>Discover a new horizon of data interaction with QuickDigest AI, your intelligent companion in navigating through diverse data formats. QuickDigest AI is meticulously crafted to simplify and enrich your engagement with data, ensuring a seamless flow of insights right at your fingertips.</h6>'\n , unsafe_allow_html=True)\n", (1806, 2139), True, 'import streamlit as st\n'), ((2156, 2585), 'streamlit.markdown', 'st.markdown', (['"""**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform\'s cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files."""'], {}), '(\n "**Effortless Data Extraction and Interaction:** QuickDigest AI stands as a beacon of innovation, allowing users to upload and interact with a variety of file formats including PDFs, Word documents, text files, and even audio/video files. The platform\'s cutting-edge technology ensures a smooth extraction of data, paving the way for meaningful conversations with the information gleaned from these files."\n )\n', (2167, 2585), True, 'import streamlit as st\n'), ((2594, 2930), 'streamlit.markdown', 'st.markdown', (['"""**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience."""'], {}), "(\n '**Engage with your Datasets:** Dive into datasets like never before. QuickDigest AI invites you to upload your dataset and engage in a dialogue with it. Our advanced AI algorithms facilitate a conversational interaction with your dataset, making the extraction of insights an intuitive and enriching experience.'\n )\n", (2605, 2930), True, 'import streamlit as st\n'), ((2939, 3289), 'streamlit.markdown', 'st.markdown', (['"""**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI\'s real-time web search feature ensures you\'re always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time."""'], {}), '(\n "**Real-Time Web Search:** One of the limitations of large language models is there limited knowledge. QuickDigest AI\'s real-time web search feature ensures you\'re always ahead with the latest information. Be it market trends, news updates, or the newest research findings, QuickDigest AI brings the world to you in real-time."\n )\n', (2950, 3289), True, 'import streamlit as st\n'), ((3298, 3734), 'streamlit.markdown', 'st.markdown', (['"""**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes."""'], {}), "(\n '**Ignite Your Creative Spark:** For product creators, QuickDigest AI unveils a realm of possibilities. Bored of simple product images, The Product Advertising Image Creator is your tool to craft captivating advertising images that resonate with your audience. Additionally, the Image Generator feature is your canvas to bring your creative visions to life, creating visually appealing images that speak volumes.'\n )\n", (3309, 3734), True, 'import streamlit as st\n'), ((3744, 3762), 'streamlit.markdown', 'st.markdown', (['"""---"""'], {}), "('---')\n", (3755, 3762), True, 'import streamlit as st\n'), ((3835, 3893), 'streamlit.markdown', 'st.markdown', (['"""<h5>Support Us</h5>"""'], {'unsafe_allow_html': '(True)'}), "('<h5>Support Us</h5>', unsafe_allow_html=True)\n", (3846, 3893), True, 'import streamlit as st\n'), ((3923, 3936), 'streamlit.columns', 'st.columns', (['(4)'], {}), '(4)\n', (3933, 3936), True, 'import streamlit as st\n'), ((5058, 5133), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '"""Indexing documents...Please have patience"""'}), "(show_spinner='Indexing documents...Please have patience')\n", (5075, 5133), True, 'import streamlit as st\n'), ((10968, 11043), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter Youtube Video ID(Coming soon)"""'], {'disabled': '(True)'}), "('Enter Youtube Video ID(Coming soon)', disabled=True)\n", (10989, 11043), True, 'import streamlit as st\n'), ((11048, 11126), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Enter Spotify Podast link(Coming soon)"""'], {'disabled': '(True)'}), "('Enter Spotify Podast link(Coming soon)', disabled=True)\n", (11069, 11126), True, 'import streamlit as st\n'), ((13550, 13586), 'streamlit.title', 'st.title', (['"""Use web search with LLMs"""'], {}), "('Use web search with LLMs')\n", (13558, 13586), True, 'import streamlit as st\n'), ((13656, 13712), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (13677, 13712), True, 'import streamlit as st\n'), ((13770, 13799), 'langchain.memory.chat_message_histories.StreamlitChatMessageHistory', 'StreamlitChatMessageHistory', ([], {}), '()\n', (13797, 13799), False, 'from langchain.memory.chat_message_histories import StreamlitChatMessageHistory\n'), ((13813, 13930), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'chat_memory': 'msgs', 'return_messages': '(True)', 'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(chat_memory=msgs, return_messages=True, memory_key\n ='chat_history', output_key='output')\n", (13837, 13930), False, 'from langchain.memory import ConversationBufferMemory\n'), ((16632, 16647), 'streamlit.cache_data', 'st.cache_data', ([], {}), '()\n', (16645, 16647), True, 'import streamlit as st\n'), ((17100, 17134), 'streamlit.title', 'st.title', (['"""Chat with your dataset"""'], {}), "('Chat with your dataset')\n", (17108, 17134), True, 'import streamlit as st\n'), ((17139, 17210), 'streamlit.info', 'st.info', (['"""Asking one question at a time will result in a better output"""'], {}), "('Asking one question at a time will result in a better output')\n", (17146, 17210), True, 'import streamlit as st\n'), ((17819, 17875), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (17840, 17875), True, 'import streamlit as st\n'), ((17885, 17998), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""If you face a KeyError: \'content\' error, Press the clear conversation histroy button"""'], {}), '(\n "If you face a KeyError: \'content\' error, Press the clear conversation histroy button"\n )\n', (17900, 17998), True, 'import streamlit as st\n'), ((19630, 19691), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Replicate API Token"""'], {'type': '"""password"""'}), "('Replicate API Token', type='password')\n", (19651, 19691), True, 'import streamlit as st\n'), ((22919, 22980), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""Replicate API Token"""'], {'type': '"""password"""'}), "('Replicate API Token', type='password')\n", (22940, 22980), True, 'import streamlit as st\n'), ((3960, 4002), 'streamlit.write', 'st.write', (['"""Star this repository on Github"""'], {}), "('Star this repository on Github')\n", (3968, 4002), True, 'import streamlit as st\n'), ((4011, 4067), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""github"""', 'name': '"""codingis4noobs2/QuickDigest"""'}), "(type='github', name='codingis4noobs2/QuickDigest')\n", (4016, 4067), False, 'from streamlit_extras.badges import badge\n'), ((4091, 4123), 'streamlit.write', 'st.write', (['"""Follow me on twitter"""'], {}), "('Follow me on twitter')\n", (4099, 4123), True, 'import streamlit as st\n'), ((4132, 4172), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""twitter"""', 'name': '"""4gameparth"""'}), "(type='twitter', name='4gameparth')\n", (4137, 4172), False, 'from streamlit_extras.badges import badge\n'), ((4196, 4223), 'streamlit.write', 'st.write', (['"""Buy me a coffee"""'], {}), "('Buy me a coffee')\n", (4204, 4223), True, 'import streamlit as st\n'), ((4232, 4282), 'streamlit_extras.badges.badge', 'badge', ([], {'type': '"""buymeacoffee"""', 'name': '"""codingis4noobs2"""'}), "(type='buymeacoffee', name='codingis4noobs2')\n", (4237, 4282), False, 'from streamlit_extras.badges import badge\n'), ((4306, 4397), 'streamlit.link_button', 'st.link_button', (['"""Upvote on Replit"""', '"""https://replit.com/@ParthShah38/QuickDigestAI?v=1"""'], {}), "('Upvote on Replit',\n 'https://replit.com/@ParthShah38/QuickDigestAI?v=1')\n", (4320, 4397), True, 'import streamlit as st\n'), ((5251, 5293), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (5282, 5293), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((5963, 5980), 'assemblyai.Transcriber', 'aai.Transcriber', ([], {}), '()\n', (5978, 5980), True, 'import assemblyai as aai\n'), ((6337, 6398), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': 'f"""Upload files"""', 'type': 'types'}), "(label=f'Upload files', type=types, **kwargs)\n", (6353, 6398), True, 'import streamlit as st\n'), ((7852, 7869), 'assemblyai.Transcriber', 'aai.Transcriber', ([], {}), '()\n', (7867, 7869), True, 'import assemblyai as aai\n'), ((8262, 8299), 'os.path.join', 'os.path.join', (['"""uploads"""', 'txt_filename'], {}), "('uploads', txt_filename)\n", (8274, 8299), False, 'import os\n'), ((8628, 8647), 'docx.Document', 'Document', (['file_path'], {}), '(file_path)\n', (8636, 8647), False, 'from docx import Document\n'), ((9524, 9543), 'PyPDF2.PdfReader', 'PdfReader', (['pdf_path'], {}), '(pdf_path)\n', (9533, 9543), False, 'from PyPDF2 import PdfReader\n'), ((9898, 9917), 'os.remove', 'os.remove', (['pdf_path'], {}), '(pdf_path)\n', (9907, 9917), False, 'import os\n'), ((10122, 10178), 'streamlit.sidebar.text_input', 'st.sidebar.text_input', (['"""OpenAI API Key"""'], {'type': '"""password"""'}), "('OpenAI API Key', type='password')\n", (10143, 10178), True, 'import streamlit as st\n'), ((11195, 11229), 'streamlit.toast', 'st.toast', (['"""OpenAI API Key Added ✅"""'], {}), "('OpenAI API Key Added ✅')\n", (11203, 11229), True, 'import streamlit as st\n'), ((11535, 11637), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': "st.session_state['temperature']", 'model': '"""gpt-3.5-turbo"""', 'api_key': 'openai_api_key'}), "(temperature=st.session_state['temperature'], model='gpt-3.5-turbo',\n api_key=openai_api_key)\n", (11541, 11637), False, 'from llama_index.llms import OpenAI\n'), ((11656, 11695), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'openai_api_key'}), '(api_key=openai_api_key)\n', (11671, 11695), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((11722, 11784), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (11750, 11784), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((11793, 11836), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (11819, 11836), False, 'from llama_index import OpenAIEmbedding, ServiceContext, set_global_service_context\n'), ((12378, 12414), 'streamlit.session_state.get', 'st.session_state.get', (['"""messages"""', '[]'], {}), "('messages', [])\n", (12398, 12414), True, 'import streamlit as st\n'), ((14009, 14048), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Reset chat history"""'], {}), "('Reset chat history')\n", (14026, 14048), True, 'import streamlit as st\n'), ((14812, 14876), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Who won the 2022 Cricket World Cup?"""'}), "(placeholder='Who won the 2022 Cricket World Cup?')\n", (14825, 14876), True, 'import streamlit as st\n'), ((15159, 15248), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'openai_api_key', 'streaming': '(True)'}), "(model_name='gpt-3.5-turbo', openai_api_key=openai_api_key,\n streaming=True)\n", (15169, 15248), False, 'from langchain.chat_models import ChatOpenAI\n'), ((15319, 15383), 'langchain.agents.ConversationalChatAgent.from_llm_and_tools', 'ConversationalChatAgent.from_llm_and_tools', ([], {'llm': 'llm', 'tools': 'tools'}), '(llm=llm, tools=tools)\n', (15361, 15383), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((15404, 15549), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'chat_agent', 'tools': 'tools', 'memory': 'memory', 'return_intermediate_steps': '(True)', 'handle_parsing_errors': '(True)'}), '(agent=chat_agent, tools=tools, memory=\n memory, return_intermediate_steps=True, handle_parsing_errors=True)\n', (15438, 15549), False, 'from langchain.agents import ConversationalChatAgent, AgentExecutor\n'), ((17662, 17740), 'streamlit.warning', 'st.warning', (['"""No data file uploaded or there was an error in loading the data."""'], {}), "('No data file uploaded or there was an error in loading the data.')\n", (17672, 17740), True, 'import streamlit as st\n'), ((18034, 18081), 'streamlit.sidebar.button', 'st.sidebar.button', (['"""Clear conversation history"""'], {}), "('Clear conversation history')\n", (18051, 18081), True, 'import streamlit as st\n'), ((18341, 18394), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""What is this data about?"""'}), "(placeholder='What is this data about?')\n", (18354, 18394), True, 'import streamlit as st\n'), ((18404, 18473), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (18436, 18473), True, 'import streamlit as st\n'), ((18701, 18806), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""', 'openai_api_key': 'openai_api_key', 'streaming': '(True)'}), "(temperature=0, model='gpt-3.5-turbo-0613', openai_api_key=\n openai_api_key, streaming=True)\n", (18711, 18806), False, 'from langchain.chat_models import ChatOpenAI\n'), ((18851, 18975), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', (['llm', 'df'], {'verbose': '(True)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)'}), '(llm, df, verbose=True, agent_type=AgentType.\n OPENAI_FUNCTIONS, handle_parsing_errors=True)\n', (18880, 18975), False, 'from langchain.agents import create_pandas_dataframe_agent\n'), ((19833, 19906), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please enter your Replicate API Token to continue!!"""'], {}), "('Please enter your Replicate API Token to continue!!')\n", (19851, 19906), True, 'import streamlit as st\n'), ((19915, 20045), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)"""'], {}), "(\n 'You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)'\n )\n", (19930, 20045), True, 'import streamlit as st\n'), ((20044, 20053), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (20051, 20053), True, 'import streamlit as st\n'), ((20119, 20221), 'streamlit.info', 'st.info', (['"""This model works best with product images having transparent or plain backgrounds"""'], {}), "(\n 'This model works best with product images having transparent or plain backgrounds'\n )\n", (20126, 20221), True, 'import streamlit as st\n'), ((20272, 20346), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your product image"""'], {'type': "['png', 'jpg', 'jpeg']"}), "('Upload your product image', type=['png', 'jpg', 'jpeg'])\n", (20288, 20346), True, 'import streamlit as st\n'), ((23122, 23195), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please enter your Replicate API Token to continue!!"""'], {}), "('Please enter your Replicate API Token to continue!!')\n", (23140, 23195), True, 'import streamlit as st\n'), ((23204, 23334), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)"""'], {}), "(\n 'You can get your Replicate API Token form here: [Replicate](https://replicate.com/account/api-tokens)'\n )\n", (23219, 23334), True, 'import streamlit as st\n'), ((23333, 23342), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (23340, 23342), True, 'import streamlit as st\n'), ((23409, 23481), 'streamlit.text_input', 'st.text_input', (['"""Enter prompt"""'], {'help': '"""Write something you can imagine..."""'}), "('Enter prompt', help='Write something you can imagine...')\n", (23422, 23481), True, 'import streamlit as st\n'), ((23543, 23651), 'streamlit.text_input', 'st.text_input', (['"""Enter Negative prompt"""'], {'help': '"""Write what you don\'t want to see in the generated images"""'}), '(\'Enter Negative prompt\', help=\n "Write what you don\'t want to see in the generated images")\n', (23556, 23651), True, 'import streamlit as st\n'), ((23699, 23718), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (23708, 23718), True, 'import streamlit as st\n'), ((5786, 5818), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5801, 5818), True, 'import streamlit as st\n'), ((5832, 5857), 'streamlit.write', 'st.write', (["message['text']"], {}), "(message['text'])\n", (5840, 5857), True, 'import streamlit as st\n'), ((6455, 6540), 'streamlit.info', 'st.info', (['f"""Please add documents, Note: Scanned documents are not supported yet!"""'], {}), "(f'Please add documents, Note: Scanned documents are not supported yet!'\n )\n", (6462, 6540), True, 'import streamlit as st\n'), ((6548, 6557), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (6555, 6557), True, 'import streamlit as st\n'), ((9862, 9888), 'os.path.basename', 'os.path.basename', (['pdf_path'], {}), '(pdf_path)\n', (9878, 9888), False, 'import os\n'), ((10222, 10288), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""Please add your OpenAI API key to continue!!"""'], {}), "('Please add your OpenAI API key to continue!!')\n", (10240, 10288), True, 'import streamlit as st\n'), ((10301, 10359), 'streamlit.warning', 'st.warning', (['"""Please add your OpenAI API key to continue!!"""'], {}), "('Please add your OpenAI API key to continue!!')\n", (10311, 10359), True, 'import streamlit as st\n'), ((10372, 10890), 'streamlit.sidebar.info', 'st.sidebar.info', (['"""To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you\'re unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn\'t require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services."""'], {}), '(\n "To obtain your OpenAI API key, please visit [OpenAI](https://platform.openai.com/account/api-keys). They provide a $5 credit to allow you to experiment with their models. If you\'re unsure about how to get the API key, you can follow this [Tutorial](https://www.maisieai.com/help/how-to-get-an-openai-api-key-for-chatgpt). While obtaining the API key doesn\'t require a compulsory payment, once your allotted credit is exhausted, a payment will be necessary to continue using their services."\n )\n', (10387, 10890), True, 'import streamlit as st\n'), ((10893, 10902), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (10900, 10902), True, 'import streamlit as st\n'), ((12158, 12194), 'streamlit.warning', 'st.warning', (['"""No documents uploaded!"""'], {}), "('No documents uploaded!')\n", (12168, 12194), True, 'import streamlit as st\n'), ((12207, 12216), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (12214, 12216), True, 'import streamlit as st\n'), ((12603, 12618), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (12616, 12618), True, 'import streamlit as st\n'), ((14310, 14344), 'streamlit.chat_message', 'st.chat_message', (['avatars[msg.type]'], {}), '(avatars[msg.type])\n', (14325, 14344), True, 'import streamlit as st\n'), ((14735, 14756), 'streamlit.write', 'st.write', (['msg.content'], {}), '(msg.content)\n', (14743, 14756), True, 'import streamlit as st\n'), ((15016, 15070), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (15023, 15070), True, 'import streamlit as st\n'), ((15083, 15092), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (15090, 15092), True, 'import streamlit as st\n'), ((15262, 15296), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {'name': '"""Search"""'}), "(name='Search')\n", (15281, 15296), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((15630, 15658), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (15645, 15658), True, 'import streamlit as st\n'), ((15819, 15847), 'streamlit.write', 'st.write', (["response['output']"], {}), "(response['output'])\n", (15827, 15847), True, 'import streamlit as st\n'), ((17027, 17070), 'streamlit.error', 'st.error', (['f"""Unsupported file format: {ext}"""'], {}), "(f'Unsupported file format: {ext}')\n", (17035, 17070), True, 'import streamlit as st\n'), ((18609, 18663), 'streamlit.info', 'st.info', (['"""Please add your OpenAI API key to continue."""'], {}), "('Please add your OpenAI API key to continue.')\n", (18616, 18663), True, 'import streamlit as st\n'), ((18676, 18685), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (18683, 18685), True, 'import streamlit as st\n'), ((19056, 19084), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (19071, 19084), True, 'import streamlit as st\n'), ((19275, 19351), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': response}"], {}), "({'role': 'assistant', 'content': response})\n", (19307, 19351), True, 'import streamlit as st\n'), ((19364, 19382), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (19372, 19382), True, 'import streamlit as st\n'), ((20411, 20538), 'streamlit.toggle', 'st.toggle', (['"""Does your product image have a plain or transparent background? If not, let us do the hard work for you!"""'], {}), "(\n 'Does your product image have a plain or transparent background? If not, let us do the hard work for you!'\n )\n", (20420, 20538), True, 'import streamlit as st\n'), ((20550, 20618), 'streamlit.text_input', 'st.text_input', (['"""Enter Prompt"""'], {'help': '"""Enter something you imagine..."""'}), "('Enter Prompt', help='Enter something you imagine...')\n", (20563, 20618), True, 'import streamlit as st\n'), ((20649, 20750), 'streamlit.text_input', 'st.text_input', (['"""Enter Negative Prompt"""'], {'help': '"""Write what you don\'t want in the generated images"""'}), '(\'Enter Negative Prompt\', help=\n "Write what you don\'t want in the generated images")\n', (20662, 20750), True, 'import streamlit as st\n'), ((20767, 20786), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (20776, 20786), True, 'import streamlit as st\n'), ((23760, 23953), 'replicate.run', 'replicate.run', (['"""stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f"""'], {'input': "{'prompt': prompt, 'negative_prompt': negative_prompt, 'num_outputs': 4}"}), "(\n 'stability-ai/sdxl:8beff3369e81422112d93b89ca01426147de542cd4684c244b673b105188fe5f'\n , input={'prompt': prompt, 'negative_prompt': negative_prompt,\n 'num_outputs': 4})\n", (23773, 23953), False, 'import replicate\n'), ((24092, 24105), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (24102, 24105), True, 'import streamlit as st\n'), ((5182, 5222), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': 'files'}), '(input_files=files)\n', (5203, 5222), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex\n'), ((7594, 7623), 'streamlit.expander', 'st.expander', (['"""Uploaded Files"""'], {}), "('Uploaded Files')\n", (7605, 7623), True, 'import streamlit as st\n'), ((7729, 7763), 'streamlit.markdown', 'st.markdown', (['f"""{filepaths_pretty}"""'], {}), "(f'{filepaths_pretty}')\n", (7740, 7763), True, 'import streamlit as st\n'), ((8200, 8226), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (8216, 8226), False, 'import os\n'), ((11305, 11337), 'streamlit.expander', 'st.expander', (['"""Advanced Settings"""'], {}), "('Advanced Settings')\n", (11316, 11337), True, 'import streamlit as st\n'), ((11389, 11530), 'streamlit.number_input', 'st.number_input', (['"""Enter Temperature"""'], {'help': '"""It determines how creative the model should be"""', 'min_value': '(0.0)', 'max_value': '(1.0)', 'value': '(0.1)'}), "('Enter Temperature', help=\n 'It determines how creative the model should be', min_value=0.0,\n max_value=1.0, value=0.1)\n", (11404, 11530), True, 'import streamlit as st\n'), ((12770, 12798), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (12785, 12798), True, 'import streamlit as st\n'), ((14886, 14909), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (14901, 14909), True, 'import streamlit as st\n'), ((15705, 15719), 'streamlit.container', 'st.container', ([], {}), '()\n', (15717, 15719), True, 'import streamlit as st\n'), ((18272, 18300), 'streamlit.chat_message', 'st.chat_message', (["msg['role']"], {}), "(msg['role'])\n", (18287, 18300), True, 'import streamlit as st\n'), ((18482, 18505), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (18497, 18505), True, 'import streamlit as st\n'), ((19131, 19145), 'streamlit.container', 'st.container', ([], {}), '()\n', (19143, 19145), True, 'import streamlit as st\n'), ((22251, 22429), 'replicate.run', 'replicate.run', (['"""logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df"""'], {'input': "{'image_path': bytes_obj, 'prompt': prompt, 'image_num': 4}"}), "(\n 'logerzhu/ad-inpaint:b1c17d148455c1fda435ababe9ab1e03bc0d917cc3cf4251916f22c45c83c7df'\n , input={'image_path': bytes_obj, 'prompt': prompt, 'image_num': 4})\n", (22264, 22429), False, 'import replicate\n'), ((22507, 22520), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (22517, 22520), True, 'import streamlit as st\n'), ((24145, 24164), 'streamlit.image', 'st.image', (['output[0]'], {}), '(output[0])\n', (24153, 24164), True, 'import streamlit as st\n'), ((24181, 24200), 'streamlit.image', 'st.image', (['output[2]'], {}), '(output[2])\n', (24189, 24200), True, 'import streamlit as st\n'), ((24240, 24259), 'streamlit.image', 'st.image', (['output[1]'], {}), '(output[1])\n', (24248, 24259), True, 'import streamlit as st\n'), ((24276, 24295), 'streamlit.image', 'st.image', (['output[3]'], {}), '(output[3])\n', (24284, 24295), True, 'import streamlit as st\n'), ((12899, 12909), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (12907, 12909), True, 'import streamlit as st\n'), ((14569, 14641), 'streamlit.status', 'st.status', (['f"""**{step[0].tool}**: {step[0].tool_input}"""'], {'state': '"""complete"""'}), "(f'**{step[0].tool}**: {step[0].tool_input}', state='complete')\n", (14578, 14641), True, 'import streamlit as st\n'), ((14663, 14684), 'streamlit.write', 'st.write', (['step[0].log'], {}), '(step[0].log)\n', (14671, 14684), True, 'import streamlit as st\n'), ((14705, 14722), 'streamlit.write', 'st.write', (['step[1]'], {}), '(step[1])\n', (14713, 14722), True, 'import streamlit as st\n'), ((20968, 20983), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (20978, 20983), False, 'from PIL import Image\n'), ((21016, 21028), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (21026, 21028), False, 'import io\n'), ((21326, 21530), 'requests.post', 'requests.post', (['"""https://clipdrop-api.co/remove-background/v1"""'], {'files': "{'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg')}", 'headers': "{'x-api-key': st.secrets['clipdrop_api_key']}"}), "('https://clipdrop-api.co/remove-background/v1', files={\n 'image_file': ('uploaded_image.jpg', image_file_object, 'image/jpeg')},\n headers={'x-api-key': st.secrets['clipdrop_api_key']})\n", (21339, 21530), False, 'import requests\n'), ((22568, 22587), 'streamlit.image', 'st.image', (['output[1]'], {}), '(output[1])\n', (22576, 22587), True, 'import streamlit as st\n'), ((22608, 22627), 'streamlit.image', 'st.image', (['output[2]'], {}), '(output[2])\n', (22616, 22627), True, 'import streamlit as st\n'), ((22675, 22694), 'streamlit.image', 'st.image', (['output[3]'], {}), '(output[3])\n', (22683, 22694), True, 'import streamlit as st\n'), ((22715, 22734), 'streamlit.image', 'st.image', (['output[4]'], {}), '(output[4])\n', (22723, 22734), True, 'import streamlit as st\n'), ((9163, 9184), 'os.path.basename', 'os.path.basename', (['doc'], {}), '(doc)\n', (9179, 9184), False, 'import os\n'), ((21868, 21880), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (21878, 21880), False, 'import io\n'), ((22078, 22129), 'streamlit.error', 'st.error', (['"""Failed to remove background. Try again."""'], {}), "('Failed to remove background. Try again.')\n", (22086, 22129), True, 'import streamlit as st\n'), ((22154, 22163), 'streamlit.stop', 'st.stop', ([], {}), '()\n', (22161, 22163), True, 'import streamlit as st\n'), ((16802, 16838), 'os.path.splitext', 'os.path.splitext', (['uploaded_file.name'], {}), '(uploaded_file.name)\n', (16818, 16838), False, 'import os\n'), ((21809, 21830), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (21819, 21830), False, 'import io\n')]
from time import monotonic from rich.console import Console from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.llms import OpenAI class Experiment: """ A class representing an experiment. Attributes: params (dict): A dictionary containing experiment parameters. index (int): An integer representing the index of the experiment. documents (list): A list of documents to be used in the experiment. vectorstore_list (list): A list of vector stores used in the experiment. retrievers_list (list): A list of retrievers used in the experiment. chain (object): An object representing the chain used in the experiment. query_results (dict): A dictionary containing the results of the queries. run_time (float): A float representing the time it took to run the experiment. embedding_time (float): A float representing the time it took to embed the documents. Methods: run(eval_queries): Runs the experiment with the given evaluation queries. _evaluate(chain, eval_queries): Evaluates the experiment with the given chain and evaluation queries. """ def __init__( self, params, index=0, documents=None, vectorstore_list=None, retrievers_list=None, chain=None, ): """ Initializes an Experiment object. Args: params (dict): A dictionary containing experiment parameters. index (int): An integer representing the index of the experiment. documents (list): A list of documents to be used in the experiment. vectorstore_list (list): A list of vector stores used in the experiment. retrievers_list (list): A list of retrievers used in the experiment. chain (object): An object representing the chain used in the experiment. """ self.params = params self.index = index self.console = Console() self.documents = documents self.vectorstore_list = vectorstore_list self.retrievers_list = retrievers_list self.chain = chain self.query_results = {} # time variables self.run_time = 0 self.embedding_time = 0 def run(self, eval_queries): """ Runs the experiment with the given evaluation queries. Args: eval_queries (list): A list of evaluation queries. Returns: A tuple containing the query results, the run time, and the embedding time. """ self.console.log(f"Experiment {self.index} started") self.start_time = monotonic() text_splitter = RecursiveCharacterTextSplitter( chunk_size=self.params["chunk_size"], chunk_overlap=0 ) texts = text_splitter.split_documents(self.documents) self.pre_embedding = monotonic() db = self.params["vector_store"].from_documents( texts, self.params["embeddings"] ) self.post_embedding = monotonic() self.vectorstore_list.append(db) retriever = db.as_retriever(search_kwargs={"k": 2}) self.retrievers_list.append(retriever) qa = self.chain.from_chain_type( llm=OpenAI(), chain_type="stuff", retriever=retriever ) self.pre_queries = monotonic() self._evaluate(qa, eval_queries) return self.query_results, self.run_time, self.embedding_time def _evaluate(self, chain, eval_queries): """ Evaluates the experiment with the given chain and evaluation queries. Args: chain (object): An object representing the chain used in the experiment. eval_queries (list): A list of evaluation queries. """ self.console.log(f"Evaluating experiment {self.index}") for q in eval_queries: res = chain.run(q) self.query_results[q] = res post_queries = monotonic() self.console.log(f"Finished experiment {self.index}") self.run_time = post_queries - self.start_time self.embedding_time = self.post_embedding - self.pre_embedding
[ "langchain.llms.OpenAI", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((1970, 1979), 'rich.console.Console', 'Console', ([], {}), '()\n', (1977, 1979), False, 'from rich.console import Console\n'), ((2643, 2654), 'time.monotonic', 'monotonic', ([], {}), '()\n', (2652, 2654), False, 'from time import monotonic\n'), ((2680, 2769), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': "self.params['chunk_size']", 'chunk_overlap': '(0)'}), "(chunk_size=self.params['chunk_size'],\n chunk_overlap=0)\n", (2710, 2769), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2880, 2891), 'time.monotonic', 'monotonic', ([], {}), '()\n', (2889, 2891), False, 'from time import monotonic\n'), ((3034, 3045), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3043, 3045), False, 'from time import monotonic\n'), ((3341, 3352), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3350, 3352), False, 'from time import monotonic\n'), ((3964, 3975), 'time.monotonic', 'monotonic', ([], {}), '()\n', (3973, 3975), False, 'from time import monotonic\n'), ((3253, 3261), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (3259, 3261), False, 'from langchain.llms import OpenAI\n')]
import os os.environ["CUDA_VISIBLE_DEVICES"] = "2" import re import torch import gradio as gr from clc.langchain_application import LangChainApplication, torch_gc from transformers import StoppingCriteriaList, StoppingCriteriaList from clc.callbacks import Iteratorize, Stream from clc.matching import key_words_match_intention, key_words_match_knowledge from langchain.schema import Document # 调试使用 # os.chdir("../../../") class LangChainCFG: llm_model_name = 'luwen_baichuan/output/zju_model_0813_100k' # 本地模型文件 or huggingface远程仓库 embedding_model_name = 'app/langchain_demo/model/text2vec' # 检索模型文件 or huggingface远程仓库 vector_store_path = 'app/langchain_demo/data/cache/legal_articles' kg_vector_stores = { '法律法条': 'app/langchain_demo/data/cache/legal_articles', '法律书籍': 'app/langchain_demo/data/cache/legal_books', '法律文书模板':'app/langchain_demo/data/cache/legal_templates', '法律案例': 'app/langchain_demo/data/cache/legal_cases', '法律考试': 'app/langchain_demo/data/cache/judicialExamination', '日常法律问答': 'app/langchain_demo/data/cache/legal_QA', } config = LangChainCFG() application = LangChainApplication(config) def clear_session(): return '', None, "" def predict(input, kg_names=None, history=None, intention_reg=None, **kwargs): max_length=1024 top_k = 1 application.llm_service.max_token = max_length # print(input) if history == None: history = [] search_text = '' now_input = input eos_token_ids = [application.llm_service.tokenizer.eos_token_id] application.llm_service.history = history[-5:] max_memory = 4096 - max_length if intention_reg==["意图识别"]: auto_kg_names = key_words_match_intention(input) if len(auto_kg_names)==0: search_text += "意图识别没有匹配到知识库。\n\n" else: match_kg_names = "、".join(list(auto_kg_names)) search_text += "意图识别匹配到知识库是:"+match_kg_names+"。\n\n" kg_names = list(set(kg_names) | auto_kg_names) kb_based = True if len(kg_names) != 0 else False if len(history) != 0: input = "".join(["</s>Human:" + i[0] + " </s>Assistant: " + i[1] for i in application.llm_service.history]) + \ "</s>Human:" + input input = input[len("</s>Human:"):] if len(input) > max_memory: input = input[-max_memory:] if kb_based: related_docs_with_score_seq = [] for kg_name in kg_names: if kg_name=="法律法条": related_article = key_words_match_knowledge(application.all_articles, application.choices, now_input) if related_article: kg_matches = [(Document(page_content=related_article[0], metadata={"value": related_article[1]}),0)] else: application.source_service.load_vector_store(application.config.kg_vector_stores[kg_name]) kg_matches = application.source_service.vector_store.similarity_search_with_score(input, k=top_k) else: application.source_service.load_vector_store(application.config.kg_vector_stores[kg_name]) kg_matches = application.source_service.vector_store.similarity_search_with_score(input, k=top_k) related_docs_with_score_seq.append(kg_matches) related_docs_with_score = related_docs_with_score_seq if len(related_docs_with_score) > 0: input, context_with_score = application.generate_prompt(related_docs_with_score, input,kg_names) search_text += context_with_score torch_gc() print("histroy in call: ", history) prompt = f'</s>Human:{input} </s>Assistant: ' print("prompt: ",prompt) inputs = application.llm_service.tokenizer(prompt, return_tensors="pt").to('cuda') stopping_criteria = StoppingCriteriaList() kwargs['inputs'] = inputs kwargs['max_new_tokens'] = max_length kwargs['repetition_penalty'] = float(1.2) kwargs['stopping_criteria'] = stopping_criteria history.append((now_input, "")) def generate_with_callback(callback=None, **kwargs): kwargs['stopping_criteria'].append(Stream(callback_func=callback)) with torch.no_grad(): application.llm_service.model.generate(**kwargs['inputs'], max_new_tokens=kwargs['max_new_tokens'], repetition_penalty=kwargs['repetition_penalty'], stopping_criteria=kwargs["stopping_criteria"]) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**kwargs) as generator: for output in generator: last = output[-1] output = application.llm_service.tokenizer.decode(output, skip_special_tokens=True) pattern = r"\n{5,}$" pattern2 = r"\s{5,}$" origin_output = output output = output.split("Assistant:")[-1].strip() history[-1] = (now_input, output) yield "", history, history, search_text if last in eos_token_ids or re.search(pattern, origin_output) or re.search(pattern2, origin_output): break with gr.Blocks() as demo: state = gr.State() with gr.Row(): with gr.Column(scale=1.5): github_banner_path = 'https://raw.githubusercontent.com/LIANG-star177/chatgptapi/master/logo.png' gr.HTML(f'<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>') with gr.Row(): intention_reg = gr.CheckboxGroup(["意图识别"], label="自动选择知识库", value=None, interactive=True) with gr.Row(): kg_names = gr.CheckboxGroup(list(config.kg_vector_stores.keys()), label="手动选择知识库", value=None, interactive=True).style(height=200) with gr.Row(): search = gr.Textbox(label='知识库检索结果') with gr.Row(): gr.Markdown("""Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区""") with gr.Row(): gr.Markdown("""免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。""") with gr.Column(scale=4): with gr.Row(): chatbot = gr.Chatbot(label='智海-录问').style(height=500) with gr.Row(): message = gr.Textbox(label='请输入问题') with gr.Row(): clear_history = gr.Button("🧹 清除历史对话") send = gr.Button("🚀 发送") send.click(predict, inputs=[ message, kg_names, state, intention_reg, ], outputs=[message, chatbot, state, search], show_progress=True) clear_history.click(fn=clear_session, inputs=[], outputs=[chatbot, state, search], queue=False) message.submit(predict, inputs=[ message, kg_names, state, intention_reg, ], outputs=[message, chatbot, state, search], show_progress=True) demo.queue(concurrency_count=2).launch( server_name='0.0.0.0', server_port=7888, share=True, enable_queue=True, inbrowser=True, )
[ "langchain.schema.Document" ]
[((1155, 1183), 'clc.langchain_application.LangChainApplication', 'LangChainApplication', (['config'], {}), '(config)\n', (1175, 1183), False, 'from clc.langchain_application import LangChainApplication, torch_gc\n'), ((3620, 3630), 'clc.langchain_application.torch_gc', 'torch_gc', ([], {}), '()\n', (3628, 3630), False, 'from clc.langchain_application import LangChainApplication, torch_gc\n'), ((3863, 3885), 'transformers.StoppingCriteriaList', 'StoppingCriteriaList', ([], {}), '()\n', (3883, 3885), False, 'from transformers import StoppingCriteriaList, StoppingCriteriaList\n'), ((5358, 5369), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (5367, 5369), True, 'import gradio as gr\n'), ((5392, 5402), 'gradio.State', 'gr.State', ([], {}), '()\n', (5400, 5402), True, 'import gradio as gr\n'), ((1763, 1795), 'clc.matching.key_words_match_intention', 'key_words_match_intention', (['input'], {}), '(input)\n', (1788, 1795), False, 'from clc.matching import key_words_match_intention, key_words_match_knowledge\n'), ((4681, 4739), 'clc.callbacks.Iteratorize', 'Iteratorize', (['generate_with_callback', 'kwargs'], {'callback': 'None'}), '(generate_with_callback, kwargs, callback=None)\n', (4692, 4739), False, 'from clc.callbacks import Iteratorize, Stream\n'), ((5412, 5420), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5418, 5420), True, 'import gradio as gr\n'), ((4194, 4224), 'clc.callbacks.Stream', 'Stream', ([], {'callback_func': 'callback'}), '(callback_func=callback)\n', (4200, 4224), False, 'from clc.callbacks import Iteratorize, Stream\n'), ((4239, 4254), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4252, 4254), False, 'import torch\n'), ((5435, 5455), 'gradio.Column', 'gr.Column', ([], {'scale': '(1.5)'}), '(scale=1.5)\n', (5444, 5455), True, 'import gradio as gr\n'), ((5579, 5758), 'gradio.HTML', 'gr.HTML', (['f"""<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>"""'], {}), '(\n f\'<p align="center"><a href="https://github.com/LIANG-star177/chatgptapi/blob/master/logo.png"><img src={github_banner_path} height="100" width="200"/></a></p>\'\n )\n', (5586, 5758), True, 'import gradio as gr\n'), ((6552, 6570), 'gradio.Column', 'gr.Column', ([], {'scale': '(4)'}), '(scale=4)\n', (6561, 6570), True, 'import gradio as gr\n'), ((2568, 2655), 'clc.matching.key_words_match_knowledge', 'key_words_match_knowledge', (['application.all_articles', 'application.choices', 'now_input'], {}), '(application.all_articles, application.choices,\n now_input)\n', (2593, 2655), False, 'from clc.matching import key_words_match_intention, key_words_match_knowledge\n'), ((5257, 5290), 're.search', 're.search', (['pattern', 'origin_output'], {}), '(pattern, origin_output)\n', (5266, 5290), False, 'import re\n'), ((5294, 5328), 're.search', 're.search', (['pattern2', 'origin_output'], {}), '(pattern2, origin_output)\n', (5303, 5328), False, 'import re\n'), ((5766, 5774), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5772, 5774), True, 'import gradio as gr\n'), ((5815, 5888), 'gradio.CheckboxGroup', 'gr.CheckboxGroup', (["['意图识别']"], {'label': '"""自动选择知识库"""', 'value': 'None', 'interactive': '(True)'}), "(['意图识别'], label='自动选择知识库', value=None, interactive=True)\n", (5831, 5888), True, 'import gradio as gr\n'), ((5978, 5986), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (5984, 5986), True, 'import gradio as gr\n'), ((6248, 6256), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6254, 6256), True, 'import gradio as gr\n'), ((6283, 6310), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""知识库检索结果"""'}), "(label='知识库检索结果')\n", (6293, 6310), True, 'import gradio as gr\n'), ((6329, 6337), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6335, 6337), True, 'import gradio as gr\n'), ((6355, 6403), 'gradio.Markdown', 'gr.Markdown', (['"""Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区"""'], {}), "('Powered by 浙江大学 阿里巴巴达摩院 华院计算 魔搭社区')\n", (6366, 6403), True, 'import gradio as gr\n'), ((6425, 6433), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6431, 6433), True, 'import gradio as gr\n'), ((6451, 6539), 'gradio.Markdown', 'gr.Markdown', (['"""免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。"""'], {}), "(\n '免责声明:本模型仅供学术研究之目的而提供,不保证结果的准确性、完整性或适用性。在使用模型生成的内容时,您应自行判断其适用性,并自担风险。')\n", (6462, 6539), True, 'import gradio as gr\n'), ((6589, 6597), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6595, 6597), True, 'import gradio as gr\n'), ((6698, 6706), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6704, 6706), True, 'import gradio as gr\n'), ((6734, 6759), 'gradio.Textbox', 'gr.Textbox', ([], {'label': '"""请输入问题"""'}), "(label='请输入问题')\n", (6744, 6759), True, 'import gradio as gr\n'), ((6789, 6797), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (6795, 6797), True, 'import gradio as gr\n'), ((6831, 6852), 'gradio.Button', 'gr.Button', (['"""🧹 清除历史对话"""'], {}), "('🧹 清除历史对话')\n", (6840, 6852), True, 'import gradio as gr\n'), ((6876, 6893), 'gradio.Button', 'gr.Button', (['"""🚀 发送"""'], {}), "('🚀 发送')\n", (6885, 6893), True, 'import gradio as gr\n'), ((6625, 6650), 'gradio.Chatbot', 'gr.Chatbot', ([], {'label': '"""智海-录问"""'}), "(label='智海-录问')\n", (6635, 6650), True, 'import gradio as gr\n'), ((2723, 2808), 'langchain.schema.Document', 'Document', ([], {'page_content': 'related_article[0]', 'metadata': "{'value': related_article[1]}"}), "(page_content=related_article[0], metadata={'value':\n related_article[1]})\n", (2731, 2808), False, 'from langchain.schema import Document\n')]
"""Callback Handler that writes to a file.""" from typing import Any, Dict, Optional, TextIO, cast from langchain_core.agents import AgentAction, AgentFinish from langchain_core.callbacks import BaseCallbackHandler from langchain_core.utils.input import print_text class FileCallbackHandler(BaseCallbackHandler): """Callback Handler that writes to a file.""" def __init__( self, filename: str, mode: str = "a", color: Optional[str] = None ) -> None: """Initialize callback handler.""" self.file = cast(TextIO, open(filename, mode, encoding="utf-8")) self.color = color def __del__(self) -> None: """Destructor to cleanup when done.""" self.file.close() def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Print out that we are entering a chain.""" class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1]) print_text( f"\n\n\033[1m> Entering new {class_name} chain...\033[0m", end="\n", file=self.file, ) def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Print out that we finished a chain.""" print_text("\n\033[1m> Finished chain.\033[0m", end="\n", file=self.file) def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Run on agent action.""" print_text(action.log, color=color or self.color, file=self.file) def on_tool_end( self, output: str, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """If not the final action, print out observation.""" if observation_prefix is not None: print_text(f"\n{observation_prefix}", file=self.file) print_text(output, color=color or self.color, file=self.file) if llm_prefix is not None: print_text(f"\n{llm_prefix}", file=self.file) def on_text( self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any ) -> None: """Run when agent ends.""" print_text(text, color=color or self.color, end=end, file=self.file) def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" print_text(finish.log, color=color or self.color, end="\n", file=self.file)
[ "langchain_core.utils.input.print_text" ]
[((989, 1084), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '(f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m""", end=\'\\n\',\n file=self.file)\n', (999, 1084), False, 'from langchain_core.utils.input import print_text\n'), ((1267, 1337), 'langchain_core.utils.input.print_text', 'print_text', (['"""\n\x1b[1m> Finished chain.\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '("""\n\x1b[1m> Finished chain.\x1b[0m""", end=\'\\n\', file=self.file)\n', (1277, 1337), False, 'from langchain_core.utils.input import print_text\n'), ((1502, 1567), 'langchain_core.utils.input.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)', 'file': 'self.file'}), '(action.log, color=color or self.color, file=self.file)\n', (1512, 1567), False, 'from langchain_core.utils.input import print_text\n'), ((1971, 2032), 'langchain_core.utils.input.print_text', 'print_text', (['output'], {'color': '(color or self.color)', 'file': 'self.file'}), '(output, color=color or self.color, file=self.file)\n', (1981, 2032), False, 'from langchain_core.utils.input import print_text\n'), ((2285, 2353), 'langchain_core.utils.input.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end', 'file': 'self.file'}), '(text, color=color or self.color, end=end, file=self.file)\n', (2295, 2353), False, 'from langchain_core.utils.input import print_text\n'), ((2513, 2588), 'langchain_core.utils.input.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""', 'file': 'self.file'}), "(finish.log, color=color or self.color, end='\\n', file=self.file)\n", (2523, 2588), False, 'from langchain_core.utils.input import print_text\n'), ((1909, 1965), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {'file': 'self.file'}), '(f"""\n{observation_prefix}""", file=self.file)\n', (1919, 1965), False, 'from langchain_core.utils.input import print_text\n'), ((2080, 2125), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {'file': 'self.file'}), "(f'\\n{llm_prefix}', file=self.file)\n", (2090, 2125), False, 'from langchain_core.utils.input import print_text\n')]
import base64 import json from langchain_community.chat_models import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate from langchain_core.pydantic_v1 import Field from langserve import CustomUserType from .prompts import ( AI_REPONSE_DICT, FULL_PROMPT, USER_EXAMPLE_DICT, create_prompt, ) from .utils import parse_llm_output llm = ChatOpenAI(temperature=0, model="gpt-4") prompt = ChatPromptTemplate.from_messages( [ SystemMessagePromptTemplate.from_template(FULL_PROMPT), ("human", "{user_example}"), ("ai", "{ai_response}"), ("human", "{input}"), ], ) # ATTENTION: Inherit from CustomUserType instead of BaseModel otherwise # the server will decode it into a dict instead of a pydantic model. class FileProcessingRequest(CustomUserType): """Request including a base64 encoded file.""" # The extra field is used to specify a widget for the playground UI. file: str = Field(..., extra={"widget": {"type": "base64file"}}) num_plates: int = None num_rows: int = 8 num_cols: int = 12 def _load_file(request: FileProcessingRequest): return base64.b64decode(request.file.encode("utf-8")).decode("utf-8") def _load_prompt(request: FileProcessingRequest): return create_prompt( num_plates=request.num_plates, num_rows=request.num_rows, num_cols=request.num_cols, ) def _get_col_range_str(request: FileProcessingRequest): if request.num_cols: return f"from 1 to {request.num_cols}" else: return "" def _get_json_format(request: FileProcessingRequest): return json.dumps( [ { "row_start": 12, "row_end": 12 + request.num_rows - 1, "col_start": 1, "col_end": 1 + request.num_cols - 1, "contents": "Entity ID", } ] ) chain = ( { # Should add validation to ensure numeric indices "input": _load_file, "hint": _load_prompt, "col_range_str": _get_col_range_str, "json_format": _get_json_format, "user_example": lambda x: USER_EXAMPLE_DICT[x.num_rows * x.num_cols], "ai_response": lambda x: AI_REPONSE_DICT[x.num_rows * x.num_cols], } | prompt | llm | StrOutputParser() | parse_llm_output ).with_types(input_type=FileProcessingRequest)
[ "langchain_core.pydantic_v1.Field", "langchain_core.prompts.SystemMessagePromptTemplate.from_template", "langchain_core.output_parsers.StrOutputParser", "langchain_community.chat_models.ChatOpenAI" ]
[((454, 494), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (464, 494), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1047, 1099), 'langchain_core.pydantic_v1.Field', 'Field', (['...'], {'extra': "{'widget': {'type': 'base64file'}}"}), "(..., extra={'widget': {'type': 'base64file'}})\n", (1052, 1099), False, 'from langchain_core.pydantic_v1 import Field\n'), ((1714, 1869), 'json.dumps', 'json.dumps', (["[{'row_start': 12, 'row_end': 12 + request.num_rows - 1, 'col_start': 1,\n 'col_end': 1 + request.num_cols - 1, 'contents': 'Entity ID'}]"], {}), "([{'row_start': 12, 'row_end': 12 + request.num_rows - 1,\n 'col_start': 1, 'col_end': 1 + request.num_cols - 1, 'contents':\n 'Entity ID'}])\n", (1724, 1869), False, 'import json\n'), ((553, 607), 'langchain_core.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['FULL_PROMPT'], {}), '(FULL_PROMPT)\n', (594, 607), False, 'from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate\n'), ((2402, 2419), 'langchain_core.output_parsers.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2417, 2419), False, 'from langchain_core.output_parsers import StrOutputParser\n')]
"""This script is used to initialize the Qdrant db backend with Azure OpenAI.""" import os from typing import Any, List, Optional, Tuple import openai from dotenv import load_dotenv from langchain.docstore.document import Document from langchain.text_splitter import NLTKTextSplitter from langchain_community.document_loaders import DirectoryLoader, PyPDFium2Loader from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings from langchain_community.vectorstores import Qdrant from loguru import logger from omegaconf import DictConfig from ultra_simple_config import load_config from agent.utils.utility import generate_prompt from agent.utils.vdb import init_vdb load_dotenv() @load_config(location="config/db.yml") def get_db_connection(open_ai_token: str, cfg: DictConfig, collection_name: str) -> Qdrant: """Initializes a connection to the Qdrant DB. Args: open_ai_token (str): The openai token. cfg (DictConfig): the config file. collection_name (str): The name of the vector database collection. Returns: Qdrant: An Langchain Instance of the Qdrant DB. """ if cfg.openai.azure: embedding = AzureOpenAIEmbeddings(deployment=cfg.openai.deployment, openai_api_version="2023-05-15", openai_api_key=open_ai_token) # type: ignore else: embedding = OpenAIEmbeddings(model=cfg.openai.deployment, openai_api_key=open_ai_token) if collection_name is None or not collection_name: collection_name = cfg.qdrant.collection_name_openai return init_vdb(cfg, collection_name, embedding) def embedd_documents_openai(dir: str, open_ai_token: str, collection_name: Optional[str] = None) -> None: """embedd_documents embedds the documents in the given directory. :param cfg: Configuration from the file :type cfg: DictConfig :param dir: PDF Directory :type dir: str :param open_ai_token: OpenAI API Token :type open_ai_token: str """ vector_db: Qdrant = get_db_connection(open_ai_token=open_ai_token, collection_name=collection_name) splitter = NLTKTextSplitter(chunk_size=500, chunk_overlap=100) loader = DirectoryLoader(dir, glob="*.pdf", loader_cls=PyPDFium2Loader) docs = loader.load_and_split(splitter) logger.info(f"Loaded {len(docs)} documents.") texts = [doc.page_content for doc in docs] metadatas = [doc.metadata for doc in docs] vector_db.add_texts(texts=texts, metadatas=metadatas) logger.info("SUCCESS: Texts embedded.") def search_documents_openai(open_ai_token: str, query: str, amount: int, threshold: float = 0.0, collection_name: Optional[str] = None) -> List[Tuple[Document, float]]: """Searches the documents in the Qdrant DB with a specific query. Args: open_ai_token (str): The OpenAI API token. query (str): The question for which documents should be searched. Returns: List[Tuple[Document, float]]: A list of search results, where each result is a tuple containing a Document object and a float score. """ vector_db = get_db_connection(open_ai_token=open_ai_token, collection_name=collection_name) docs = vector_db.similarity_search_with_score(query, k=amount, score_threshold=threshold) logger.info("SUCCESS: Documents found.") return docs @load_config(location="config/ai/openai.yml") def summarize_text_openai(text: str, token: str, cfg: DictConfig) -> str: """Summarizes the given text using the Luminous API. Args: text (str): The text to be summarized. token (str): The token for the Luminous API. Returns: str: The summary of the text. """ prompt = generate_prompt(prompt_name="openai-summarization.j2", text=text, language="de") openai.api_key = token response = openai.Completion.create( engine=cfg.openai.model, prompt=prompt, temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens, top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty, presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.best_of, stop=cfg.openai.stop, ) return response.choices[0].text @load_config(location="config/ai/openai.yml") def send_completion(text: str, query: str, token: str, cfg: DictConfig) -> str: """Sent completion request to OpenAI API. Args: text (str): The text on which the completion should be based. query (str): The query for the completion. token (str): The token for the OpenAI API. cfg (DictConfig): Returns: str: Response from the OpenAI API. """ prompt = generate_prompt(prompt_name="openai-summarization.j2", text=text, query=query, language="de") openai.api_key = token response = openai.Completion.create( engine=cfg.openai.model, prompt=prompt, temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens, top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty, presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.best_of, stop=cfg.openai.stop, ) return response.choices[0].text def send_custom_completion_openai( token: str, prompt: str, model: str = "gpt3.5", max_tokens: int = 256, stop_sequences: List[str] = ["###"], temperature: float = 0, ) -> str: """Sent completion request to OpenAI API. Args: text (str): The text on which the completion should be based. query (str): The query for the completion. token (str): The token for the OpenAI API. cfg (DictConfig): Returns: str: Response from the OpenAI API. """ openai.api_key = token response = openai.Completion.create( engine=model, prompt=prompt, temperature=temperature, max_tokens=max_tokens, stop_sequences=stop_sequences, ) return response.choices[0].text def qa_openai(token: str, documents: list[tuple[Document, float]], query: str, summarization: bool = False) -> tuple[Any, str, dict[Any, Any]]: """QA Function for OpenAI LLMs. Args: token (str): The token for the OpenAI API. documents (list[tuple[Document, float]]): The documents to be searched. query (str): The question for which the LLM should generate an answer. summarization (bool, optional): If the Documents should be summarized. Defaults to False. Returns: tuple: answer, prompt, meta_data """ # if the list of documents contains only one document extract the text directly if len(documents) == 1: text = documents[0][0].page_content meta_data = documents[0][0].metadata else: # extract the text from the documents texts = [doc[0].page_content for doc in documents] if summarization: # call summarization text = "" for t in texts: text += summarize_text_openai(text=t, token=token) else: # combine the texts to one text text = " ".join(texts) meta_data = [doc[0].metadata for doc in documents] # load the prompt prompt = generate_prompt("aleph_alpha_qa.j2", text=text, query=query) try: # call the luminous api answer = send_completion(prompt, token) except ValueError as e: # if the code is PROMPT_TOO_LONG, split it into chunks if e.args[0] == "PROMPT_TOO_LONG": logger.info("Prompt too long. Summarizing.") # summarize the text short_text = summarize_text_openai(text, token) # generate the prompt prompt = generate_prompt("openai-qa.j2", text=short_text, query=query) # call the luminous api answer = send_completion(prompt, token) # extract the answer return answer, prompt, meta_data if __name__ == "__main__": token = os.getenv("OPENAI_API_KEY") if not token: raise ValueError("OPENAI_API_KEY is not set.") embedd_documents_openai(dir="data", open_ai_token=token) DOCS = search_documents_openai(open_ai_token="", query="Was ist Vanille?", amount=3) print(f"DOCUMENTS: {DOCS}") summary = summarize_text_openai(text="Below is an extract from the annual financial report of a company. ", token=token) print(f"SUMMARY: {summary}")
[ "langchain.text_splitter.NLTKTextSplitter", "langchain_community.document_loaders.DirectoryLoader", "langchain_community.embeddings.AzureOpenAIEmbeddings", "langchain_community.embeddings.OpenAIEmbeddings" ]
[((692, 705), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (703, 705), False, 'from dotenv import load_dotenv\n'), ((709, 746), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/db.yml"""'}), "(location='config/db.yml')\n", (720, 746), False, 'from ultra_simple_config import load_config\n'), ((3319, 3363), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/ai/openai.yml"""'}), "(location='config/ai/openai.yml')\n", (3330, 3363), False, 'from ultra_simple_config import load_config\n'), ((4225, 4269), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/ai/openai.yml"""'}), "(location='config/ai/openai.yml')\n", (4236, 4269), False, 'from ultra_simple_config import load_config\n'), ((1557, 1598), 'agent.utils.vdb.init_vdb', 'init_vdb', (['cfg', 'collection_name', 'embedding'], {}), '(cfg, collection_name, embedding)\n', (1565, 1598), False, 'from agent.utils.vdb import init_vdb\n'), ((2097, 2148), 'langchain.text_splitter.NLTKTextSplitter', 'NLTKTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(100)'}), '(chunk_size=500, chunk_overlap=100)\n', (2113, 2148), False, 'from langchain.text_splitter import NLTKTextSplitter\n'), ((2163, 2225), 'langchain_community.document_loaders.DirectoryLoader', 'DirectoryLoader', (['dir'], {'glob': '"""*.pdf"""', 'loader_cls': 'PyPDFium2Loader'}), "(dir, glob='*.pdf', loader_cls=PyPDFium2Loader)\n", (2178, 2225), False, 'from langchain_community.document_loaders import DirectoryLoader, PyPDFium2Loader\n'), ((2476, 2515), 'loguru.logger.info', 'logger.info', (['"""SUCCESS: Texts embedded."""'], {}), "('SUCCESS: Texts embedded.')\n", (2487, 2515), False, 'from loguru import logger\n'), ((3259, 3299), 'loguru.logger.info', 'logger.info', (['"""SUCCESS: Documents found."""'], {}), "('SUCCESS: Documents found.')\n", (3270, 3299), False, 'from loguru import logger\n'), ((3679, 3764), 'agent.utils.utility.generate_prompt', 'generate_prompt', ([], {'prompt_name': '"""openai-summarization.j2"""', 'text': 'text', 'language': '"""de"""'}), "(prompt_name='openai-summarization.j2', text=text, language='de'\n )\n", (3694, 3764), False, 'from agent.utils.utility import generate_prompt\n'), ((3803, 4122), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'cfg.openai.model', 'prompt': 'prompt', 'temperature': 'cfg.openai.temperature', 'max_tokens': 'cfg.openai.max_tokens', 'top_p': 'cfg.openai.top_p', 'frequency_penalty': 'cfg.openai.frequency_penalty', 'presence_penalty': 'cfg.openai.presence_penalty', 'best_of': 'cfg.openai.best_of', 'stop': 'cfg.openai.stop'}), '(engine=cfg.openai.model, prompt=prompt,\n temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens,\n top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty,\n presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.\n best_of, stop=cfg.openai.stop)\n', (3827, 4122), False, 'import openai\n'), ((4683, 4781), 'agent.utils.utility.generate_prompt', 'generate_prompt', ([], {'prompt_name': '"""openai-summarization.j2"""', 'text': 'text', 'query': 'query', 'language': '"""de"""'}), "(prompt_name='openai-summarization.j2', text=text, query=\n query, language='de')\n", (4698, 4781), False, 'from agent.utils.utility import generate_prompt\n'), ((4820, 5139), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'cfg.openai.model', 'prompt': 'prompt', 'temperature': 'cfg.openai.temperature', 'max_tokens': 'cfg.openai.max_tokens', 'top_p': 'cfg.openai.top_p', 'frequency_penalty': 'cfg.openai.frequency_penalty', 'presence_penalty': 'cfg.openai.presence_penalty', 'best_of': 'cfg.openai.best_of', 'stop': 'cfg.openai.stop'}), '(engine=cfg.openai.model, prompt=prompt,\n temperature=cfg.openai.temperature, max_tokens=cfg.openai.max_tokens,\n top_p=cfg.openai.top_p, frequency_penalty=cfg.openai.frequency_penalty,\n presence_penalty=cfg.openai.presence_penalty, best_of=cfg.openai.\n best_of, stop=cfg.openai.stop)\n', (4844, 5139), False, 'import openai\n'), ((5804, 5941), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': 'model', 'prompt': 'prompt', 'temperature': 'temperature', 'max_tokens': 'max_tokens', 'stop_sequences': 'stop_sequences'}), '(engine=model, prompt=prompt, temperature=\n temperature, max_tokens=max_tokens, stop_sequences=stop_sequences)\n', (5828, 5941), False, 'import openai\n'), ((7267, 7327), 'agent.utils.utility.generate_prompt', 'generate_prompt', (['"""aleph_alpha_qa.j2"""'], {'text': 'text', 'query': 'query'}), "('aleph_alpha_qa.j2', text=text, query=query)\n", (7282, 7327), False, 'from agent.utils.utility import generate_prompt\n'), ((8017, 8044), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (8026, 8044), False, 'import os\n'), ((1188, 1311), 'langchain_community.embeddings.AzureOpenAIEmbeddings', 'AzureOpenAIEmbeddings', ([], {'deployment': 'cfg.openai.deployment', 'openai_api_version': '"""2023-05-15"""', 'openai_api_key': 'open_ai_token'}), "(deployment=cfg.openai.deployment, openai_api_version=\n '2023-05-15', openai_api_key=open_ai_token)\n", (1209, 1311), False, 'from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings\n'), ((1353, 1428), 'langchain_community.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'cfg.openai.deployment', 'openai_api_key': 'open_ai_token'}), '(model=cfg.openai.deployment, openai_api_key=open_ai_token)\n', (1369, 1428), False, 'from langchain_community.embeddings import AzureOpenAIEmbeddings, OpenAIEmbeddings\n'), ((7566, 7610), 'loguru.logger.info', 'logger.info', (['"""Prompt too long. Summarizing."""'], {}), "('Prompt too long. Summarizing.')\n", (7577, 7610), False, 'from loguru import logger\n'), ((7761, 7822), 'agent.utils.utility.generate_prompt', 'generate_prompt', (['"""openai-qa.j2"""'], {'text': 'short_text', 'query': 'query'}), "('openai-qa.j2', text=short_text, query=query)\n", (7776, 7822), False, 'from agent.utils.utility import generate_prompt\n')]
import json from typing import Any, Callable, List from langchain_core.tracers.base import BaseTracer from langchain_core.tracers.schemas import Run from langchain_core.utils.input import get_bolded_text, get_colored_text def try_json_stringify(obj: Any, fallback: str) -> str: """ Try to stringify an object to JSON. Args: obj: Object to stringify. fallback: Fallback string to return if the object cannot be stringified. Returns: A JSON string if the object can be stringified, otherwise the fallback string. """ try: return json.dumps(obj, indent=2, ensure_ascii=False) except Exception: return fallback def elapsed(run: Any) -> str: """Get the elapsed time of a run. Args: run: any object with a start_time and end_time attribute. Returns: A string with the elapsed time in seconds or milliseconds if time is less than a second. """ elapsed_time = run.end_time - run.start_time milliseconds = elapsed_time.total_seconds() * 1000 if milliseconds < 1000: return f"{milliseconds:.0f}ms" return f"{(milliseconds / 1000):.2f}s" class FunctionCallbackHandler(BaseTracer): """Tracer that calls a function with a single str parameter.""" name: str = "function_callback_handler" def __init__(self, function: Callable[[str], None], **kwargs: Any) -> None: super().__init__(**kwargs) self.function_callback = function def _persist_run(self, run: Run) -> None: pass def get_parents(self, run: Run) -> List[Run]: parents = [] current_run = run while current_run.parent_run_id: parent = self.run_map.get(str(current_run.parent_run_id)) if parent: parents.append(parent) current_run = parent else: break return parents def get_breadcrumbs(self, run: Run) -> str: parents = self.get_parents(run)[::-1] string = " > ".join( f"{parent.execution_order}:{parent.run_type}:{parent.name}" if i != len(parents) - 1 else f"{parent.execution_order}:{parent.run_type}:{parent.name}" for i, parent in enumerate(parents + [run]) ) return string # logging methods def _on_chain_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/start]', color='green')} " + get_bolded_text(f"[{crumbs}] Entering {run_type} run with input:\n") + f"{try_json_stringify(run.inputs, '[inputs]')}" ) def _on_chain_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/end]', color='blue')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting {run_type} run with output:\n" ) + f"{try_json_stringify(run.outputs, '[outputs]')}" ) def _on_chain_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) run_type = run.run_type.capitalize() self.function_callback( f"{get_colored_text('[chain/error]', color='red')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] {run_type} run errored with error:\n" ) + f"{try_json_stringify(run.error, '[error]')}" ) def _on_llm_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) inputs = ( {"prompts": [p.strip() for p in run.inputs["prompts"]]} if "prompts" in run.inputs else run.inputs ) self.function_callback( f"{get_colored_text('[llm/start]', color='green')} " + get_bolded_text(f"[{crumbs}] Entering LLM run with input:\n") + f"{try_json_stringify(inputs, '[inputs]')}" ) def _on_llm_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[llm/end]', color='blue')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting LLM run with output:\n" ) + f"{try_json_stringify(run.outputs, '[response]')}" ) def _on_llm_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[llm/error]', color='red')} " + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] LLM run errored with error:\n" ) + f"{try_json_stringify(run.error, '[error]')}" ) def _on_tool_start(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f'{get_colored_text("[tool/start]", color="green")} ' + get_bolded_text(f"[{crumbs}] Entering Tool run with input:\n") + f'"{run.inputs["input"].strip()}"' ) def _on_tool_end(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) if run.outputs: self.function_callback( f'{get_colored_text("[tool/end]", color="blue")} ' + get_bolded_text( f"[{crumbs}] [{elapsed(run)}] Exiting Tool run with output:\n" ) + f'"{run.outputs["output"].strip()}"' ) def _on_tool_error(self, run: Run) -> None: crumbs = self.get_breadcrumbs(run) self.function_callback( f"{get_colored_text('[tool/error]', color='red')} " + get_bolded_text(f"[{crumbs}] [{elapsed(run)}] ") + f"Tool run errored with error:\n" f"{run.error}" ) class ConsoleCallbackHandler(FunctionCallbackHandler): """Tracer that prints to the console.""" name: str = "console_callback_handler" def __init__(self, **kwargs: Any) -> None: super().__init__(function=print, **kwargs)
[ "langchain_core.utils.input.get_colored_text", "langchain_core.utils.input.get_bolded_text" ]
[((588, 633), 'json.dumps', 'json.dumps', (['obj'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(obj, indent=2, ensure_ascii=False)\n', (598, 633), False, 'import json\n'), ((2591, 2659), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering {run_type} run with input:\n"""'], {}), "(f'[{crumbs}] Entering {run_type} run with input:\\n')\n", (2606, 2659), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3968, 4029), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering LLM run with input:\n"""'], {}), "(f'[{crumbs}] Entering LLM run with input:\\n')\n", (3983, 4029), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5061, 5123), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering Tool run with input:\n"""'], {}), "(f'[{crumbs}] Entering Tool run with input:\\n')\n", (5076, 5123), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2525, 2573), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/start]"""'], {'color': '"""green"""'}), "('[chain/start]', color='green')\n", (2541, 2573), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((2915, 2960), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/end]"""'], {'color': '"""blue"""'}), "('[chain/end]', color='blue')\n", (2931, 2960), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3353, 3399), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[chain/error]"""'], {'color': '"""red"""'}), "('[chain/error]', color='red')\n", (3369, 3399), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((3904, 3950), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/start]"""'], {'color': '"""green"""'}), "('[llm/start]', color='green')\n", (3920, 3950), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4234, 4277), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/end]"""'], {'color': '"""blue"""'}), "('[llm/end]', color='blue')\n", (4250, 4277), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4617, 4661), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[llm/error]"""'], {'color': '"""red"""'}), "('[llm/error]', color='red')\n", (4633, 4661), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((4996, 5043), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/start]"""'], {'color': '"""green"""'}), "('[tool/start]', color='green')\n", (5012, 5043), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5744, 5789), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/error]"""'], {'color': '"""red"""'}), "('[tool/error]', color='red')\n", (5760, 5789), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n'), ((5352, 5396), 'langchain_core.utils.input.get_colored_text', 'get_colored_text', (['"""[tool/end]"""'], {'color': '"""blue"""'}), "('[tool/end]', color='blue')\n", (5368, 5396), False, 'from langchain_core.utils.input import get_bolded_text, get_colored_text\n')]
import sys from langchain.chains.summarize import load_summarize_chain from langchain import OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter text_splitter = RecursiveCharacterTextSplitter() # get transcript file key from args file_key = sys.argv[1] # get transcript text text = open(file_key, "r").read() llm = OpenAI(temperature=0) texts = text_splitter.split_text(text) from langchain.docstore.document import Document docs = [Document(page_content=t) for t in texts] chain = load_summarize_chain(llm, chain_type="map_reduce") output = chain.run(docs) print(output)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.docstore.document.Document", "langchain.OpenAI", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((186, 218), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {}), '()\n', (216, 218), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((344, 365), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (350, 365), False, 'from langchain import OpenAI\n'), ((514, 564), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""map_reduce"""'}), "(llm, chain_type='map_reduce')\n", (534, 564), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((464, 488), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (472, 488), False, 'from langchain.docstore.document import Document\n')]
from base64 import b64decode import os import textwrap from math import ceil from dotenv import load_dotenv load_dotenv() # take environment variables from .env. from fastapi import FastAPI from pydantic import BaseModel from fastapi.middleware.cors import CORSMiddleware from langchain.prompts import PromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.llms import HuggingFaceHub from langchain_openai.llms import OpenAI from langchain.chains.summarize import load_summarize_chain from langchain.docstore.document import Document # from prompts import full_summary_template, snip_summary_template_with_context, snip_summary_template class SummarizeSnip(BaseModel): title: str summary: str = None transcript: str encoded: bool = True dev = os.getenv("FASTAPI_ENV") == "development" # headers = {"Authorization": "Bearer " + FLOWISE_API_KEY} app = FastAPI(docs_url="/api/llm/docs", redoc_url="/api/llm/redoc", openapi_url="/api/llm/openapi.json") # CORS configuration origins = [ "https://www.youtube.com", "http://localhost:3000", "https://www.sniptube.tech", ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["GET", "POST", "OPTIONS"], allow_headers=["content-type"] ) @app.get("/api/llm/healthchecker") def healthchecker(): return {"status": "success", "message": "Integrated FastAPI Framework with Next.js and chrome extension successfully!"} @app.post("/api/llm/summarize/snip") async def summarizeSnip(item: SummarizeSnip): # set up model # llm = GPT4All(model=model_path, temp=0.1) # llm = Cohere(model="summarize-xlarge", cohere_api_key=COHERE_API_KEY, temperature=0.1) # OpenAI(temperature=0.6) if not dev else llm = OpenAI(temperature=0.6) if not dev else HuggingFaceHub(repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.6, 'max_new_tokens': 1000 }) if item.encoded: # decode from base64 title = b64decode(item.title).decode("utf-8") text = b64decode(item.transcript).decode("utf-8") summary = b64decode(item.summary).decode("utf-8") if item.summary else None else: title = item.title text = item.transcript summary = item.summary if item.summary else None PROMPT_SNIP_SUMMARY = PromptTemplate(template=snip_summary_template.format(title=title, text='{text}'), input_variables=["text"]) # TODO: refine chain? https://python.langchain.com/docs/modules/chains/popular/summarize#the-refine-chain chain = load_summarize_chain(llm, chain_type="stuff", verbose=True, prompt=PROMPT_SNIP_SUMMARY) # TODO: are metadata necessary? text_document = [Document(page_content=text, metadata={"title": title, "summary": summary, "transcript": text})] summary = chain.invoke({'input_documents': text_document}, return_only_outputs=True)['output_text'].strip() wrapped_summary = textwrap.fill(summary, width=100) return {"summary": wrapped_summary} # ---------------------------PROMPTS---------------------------------------------------------------------------------------------------- snip_summary_template = """You are a youtube section summarizer. Which means you will be given the transcript of a section of a youtube video and you need to summarize that transcript of the youtube video into a concise sentence. The sentence should only describe the main points of the given transcript TRANSCRIPT OF SECTION OF VIDEO TO CONCISELY SUMMARIZE: {text} CONCISE SUMMARIZED SENTENCE FROM TRANSCRIPT(only write one sentence): """
[ "langchain.chains.summarize.load_summarize_chain", "langchain_openai.llms.OpenAI", "langchain.docstore.document.Document", "langchain_community.llms.HuggingFaceHub" ]
[((109, 122), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (120, 122), False, 'from dotenv import load_dotenv\n'), ((930, 1033), 'fastapi.FastAPI', 'FastAPI', ([], {'docs_url': '"""/api/llm/docs"""', 'redoc_url': '"""/api/llm/redoc"""', 'openapi_url': '"""/api/llm/openapi.json"""'}), "(docs_url='/api/llm/docs', redoc_url='/api/llm/redoc', openapi_url=\n '/api/llm/openapi.json')\n", (937, 1033), False, 'from fastapi import FastAPI\n'), ((822, 846), 'os.getenv', 'os.getenv', (['"""FASTAPI_ENV"""'], {}), "('FASTAPI_ENV')\n", (831, 846), False, 'import os\n'), ((2603, 2695), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['llm'], {'chain_type': '"""stuff"""', 'verbose': '(True)', 'prompt': 'PROMPT_SNIP_SUMMARY'}), "(llm, chain_type='stuff', verbose=True, prompt=\n PROMPT_SNIP_SUMMARY)\n", (2623, 2695), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2987, 3020), 'textwrap.fill', 'textwrap.fill', (['summary'], {'width': '(100)'}), '(summary, width=100)\n', (3000, 3020), False, 'import textwrap\n'), ((1819, 1842), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.6)'}), '(temperature=0.6)\n', (1825, 1842), False, 'from langchain_openai.llms import OpenAI\n'), ((1859, 1974), 'langchain_community.llms.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': '"""tiiuae/falcon-7b-instruct"""', 'model_kwargs': "{'temperature': 0.6, 'max_new_tokens': 1000}"}), "(repo_id='tiiuae/falcon-7b-instruct', model_kwargs={\n 'temperature': 0.6, 'max_new_tokens': 1000})\n", (1873, 1974), False, 'from langchain_community.llms import HuggingFaceHub\n'), ((2748, 2846), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': "{'title': title, 'summary': summary, 'transcript': text}"}), "(page_content=text, metadata={'title': title, 'summary': summary,\n 'transcript': text})\n", (2756, 2846), False, 'from langchain.docstore.document import Document\n'), ((2042, 2063), 'base64.b64decode', 'b64decode', (['item.title'], {}), '(item.title)\n', (2051, 2063), False, 'from base64 import b64decode\n'), ((2095, 2121), 'base64.b64decode', 'b64decode', (['item.transcript'], {}), '(item.transcript)\n', (2104, 2121), False, 'from base64 import b64decode\n'), ((2156, 2179), 'base64.b64decode', 'b64decode', (['item.summary'], {}), '(item.summary)\n', (2165, 2179), False, 'from base64 import b64decode\n')]
from typing import Any, Dict, List, Union from langchain.memory.chat_memory import BaseChatMemory from langchain.schema.messages import BaseMessage, get_buffer_string class ConversationBufferWindowMemory(BaseChatMemory): """Buffer for storing conversation memory inside a limited size window.""" human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" #: :meta private: k: int = 5 """Number of messages to store in buffer.""" @property def buffer(self) -> Union[str, List[BaseMessage]]: """String buffer of memory.""" return self.buffer_as_messages if self.return_messages else self.buffer_as_str @property def buffer_as_str(self) -> str: """Exposes the buffer as a string in case return_messages is True.""" messages = self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] return get_buffer_string( messages, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix, ) @property def buffer_as_messages(self) -> List[BaseMessage]: """Exposes the buffer as a list of messages in case return_messages is False.""" return self.chat_memory.messages[-self.k * 2 :] if self.k > 0 else [] @property def memory_variables(self) -> List[str]: """Will always return list of memory variables. :meta private: """ return [self.memory_key] def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]: """Return history buffer.""" return {self.memory_key: self.buffer}
[ "langchain.schema.messages.get_buffer_string" ]
[((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, get_buffer_string\n')]
from typing import Any, Dict, Optional, Type # type: ignore import langchain from langchain import LLMChain, PromptTemplate from langchain.experimental.autonomous_agents import AutoGPT from sam.core.utils import logger class AutoGptAgent: agent: AutoGPT def __init__( self, ai_name: str, ai_role: str, memory: VectorStoreRetriever, llm: BaseChatModel, tools: List[BaseTool], **kwargs ): self.agent = AutoGPT.from_llm_and_tools( ai_name=ai_name, ai_role=ai_role, llm=llm, memory=memory, tools=tools, ) def start(self, goals: List[str]): return self.agent.run(goals=goals)
[ "langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools" ]
[((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, 'from langchain.experimental.autonomous_agents import AutoGPT\n')]
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser from langchain.prompts import StringPromptTemplate from langchain import OpenAI, SerpAPIWrapper, LLMChain from typing import List, Union from langchain.schema import AgentAction, AgentFinish import re from langchain.utilities import BashProcess from langchain.tools.human.tool import HumanInputRun from langchain_tools.cwtool import CloudWatchInsightQuery import sys # Set up a prompt template class CustomPromptTemplate(StringPromptTemplate): # The template to use template: str # The list of tools available tools: List[Tool] def format(self, **kwargs) -> str: # Get the intermediate steps (AgentAction, Observation tuples) # Format them in a particular way intermediate_steps = kwargs.pop("intermediate_steps") thoughts = "" for action, observation in intermediate_steps: thoughts += action.log thoughts += f"\nObservation: {observation}\nThought: " # Set the agent_scratchpad variable to that value kwargs["agent_scratchpad"] = thoughts # Create a tools variable from the list of tools provided kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools]) # Create a list of tool names for the tools provided kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools]) return self.template.format(**kwargs) class CustomOutputParser(AgentOutputParser): def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]: # Check if agent should finish if "Final Answer:" in llm_output: return AgentFinish( # Return values is generally always a dictionary with a single `output` key # It is not recommended to try anything else at the moment :) return_values={"output": llm_output.split("Final Answer:")[-1].strip()}, log=llm_output, ) # Parse out the action and action input regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)" match = re.search(regex, llm_output, re.DOTALL) if not match: raise ValueError(f"Could not parse LLM output: `{llm_output}`") action = match.group(1).strip() action_input = match.group(2) # Return the action and action input return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output) def agent_run(user_input) -> None: # Define which tools the agent can use to answer user queries search = SerpAPIWrapper() bash = BashProcess() human = HumanInputRun() cloudwatch = CloudWatchInsightQuery() tools = [ Tool( name = "Search", func=search.run, description="useful for when you need to answer questions about current events" ), Tool( name = "human", func=human.run, description="useful for when you need to ask the human for input" ), Tool( name = "cloudwatch", func=cloudwatch.run, description="useful for when you need run an AWS cloudwatch insight query or search logs in AWS" ), Tool( name = "terminal", func=bash.run, description="useful for when you need to run commands in a terminal" ) ] # Set up the base template template = """You are a cyber Security professional. You will take steps to achieve the requested task. You have access to the following tools: {tools} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: the final answer to the original input question Question: {input} {agent_scratchpad}""" prompt = CustomPromptTemplate(template=template,tools=tools,input_variables=["input", "intermediate_steps"]) output_parser = CustomOutputParser() llm = OpenAI(temperature=0) llm_chain = LLMChain(llm=llm, prompt=prompt) tool_names = [tool.name for tool in tools] agent = LLMSingleActionAgent( llm_chain=llm_chain, output_parser=output_parser, stop=["\nObservation:"], allowed_tools=tool_names ) agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True) agent_executor.run(user_input)
[ "langchain.agents.AgentExecutor.from_agent_and_tools", "langchain.agents.LLMSingleActionAgent", "langchain_tools.cwtool.CloudWatchInsightQuery", "langchain.LLMChain", "langchain.tools.human.tool.HumanInputRun", "langchain.utilities.BashProcess", "langchain.SerpAPIWrapper", "langchain.agents.Tool", "langchain.OpenAI" ]
[((2630, 2646), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (2644, 2646), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((2658, 2671), 'langchain.utilities.BashProcess', 'BashProcess', ([], {}), '()\n', (2669, 2671), False, 'from langchain.utilities import BashProcess\n'), ((2684, 2699), 'langchain.tools.human.tool.HumanInputRun', 'HumanInputRun', ([], {}), '()\n', (2697, 2699), False, 'from langchain.tools.human.tool import HumanInputRun\n'), ((2717, 2741), 'langchain_tools.cwtool.CloudWatchInsightQuery', 'CloudWatchInsightQuery', ([], {}), '()\n', (2739, 2741), False, 'from langchain_tools.cwtool import CloudWatchInsightQuery\n'), ((4330, 4351), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (4336, 4351), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((4368, 4400), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (4376, 4400), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((4461, 4587), 'langchain.agents.LLMSingleActionAgent', 'LLMSingleActionAgent', ([], {'llm_chain': 'llm_chain', 'output_parser': 'output_parser', 'stop': "['\\nObservation:']", 'allowed_tools': 'tool_names'}), "(llm_chain=llm_chain, output_parser=output_parser, stop\n =['\\nObservation:'], allowed_tools=tool_names)\n", (4481, 4587), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((4645, 4719), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent', 'tools': 'tools', 'verbose': '(True)'}), '(agent=agent, tools=tools, verbose=True)\n', (4679, 4719), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((2150, 2189), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (2159, 2189), False, 'import re\n'), ((2765, 2887), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""useful for when you need to answer questions about current events"""'}), "(name='Search', func=search.run, description=\n 'useful for when you need to answer questions about current events')\n", (2769, 2887), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((2940, 3046), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""human"""', 'func': 'human.run', 'description': '"""useful for when you need to ask the human for input"""'}), "(name='human', func=human.run, description=\n 'useful for when you need to ask the human for input')\n", (2944, 3046), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3099, 3251), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""cloudwatch"""', 'func': 'cloudwatch.run', 'description': '"""useful for when you need run an AWS cloudwatch insight query or search logs in AWS"""'}), "(name='cloudwatch', func=cloudwatch.run, description=\n 'useful for when you need run an AWS cloudwatch insight query or search logs in AWS'\n )\n", (3103, 3251), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n'), ((3299, 3410), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""terminal"""', 'func': 'bash.run', 'description': '"""useful for when you need to run commands in a terminal"""'}), "(name='terminal', func=bash.run, description=\n 'useful for when you need to run commands in a terminal')\n", (3303, 3410), False, 'from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser\n')]
# coding: utf-8 import os import gradio as gr import re import uuid from PIL import Image, ImageDraw, ImageOps, ImageFont import numpy as np import argparse import inspect from langchain.agents.initialize import initialize_agent from langchain.agents.tools import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from gpt4tools.llm import LlamaLangChain from gpt4tools.tools import * GPT4TOOLS_PREFIX = """GPT4Tools can handle various text and visual tasks, such as answering questions and providing in-depth explanations and discussions. It generates human-like text and uses tools to indirectly understand images. When referring to images, GPT4Tools follows strict file name rules. To complete visual tasks, GPT4Tools uses tools and stays loyal to observation outputs. Users can provide new images to GPT4Tools with a description, but tools must be used for subsequent tasks. TOOLS: ------ GPT4Tools has access to the following tools:""" GPT4TOOLS_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format: ``` Thought: Do I need to use a tool? Yes Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ``` When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format: ``` Thought: Do I need to use a tool? No {ai_prefix}: [your response here] ``` """ GPT4TOOLS_SUFFIX = """Follow file name rules and do not fake non-existent file names. Remember to provide the image file name loyally from the last tool observation. Previous conversation: {chat_history} New input: {input} GPT4Tools needs to use tools to observe images, not directly imagine them. Thoughts and observations in the conversation are only visible to GPT4Tools. When answering human questions, repeat important information. Let's think step by step. {agent_scratchpad}""" os.makedirs('image', exist_ok=True) def cut_dialogue_history(history_memory, keep_last_n_paragraphs=1): if history_memory is None or len(history_memory) == 0: return history_memory paragraphs = history_memory.split('Human:') if len(paragraphs) <= keep_last_n_paragraphs: return history_memory return 'Human:' + 'Human:'.join(paragraphs[-1:]) class ConversationBot: def __init__(self, load_dict, llm_kwargs): # load_dict = {'VisualQuestionAnswering':'cuda:0', 'ImageCaptioning':'cuda:1',...} print(f"Initializing GPT4Tools, load_dict={load_dict}") if 'ImageCaptioning' not in load_dict: raise ValueError("You have to load ImageCaptioning as a basic function for GPT4Tools") self.models = {} # Load Basic Foundation Models for class_name, device in load_dict.items(): self.models[class_name] = globals()[class_name](device=device) # Load Template Foundation Models for class_name, module in globals().items(): if getattr(module, 'template_model', False): template_required_names = {k for k in inspect.signature(module.__init__).parameters.keys() if k!='self'} loaded_names = set([type(e).__name__ for e in self.models.values()]) if template_required_names.issubset(loaded_names): self.models[class_name] = globals()[class_name]( **{name: self.models[name] for name in template_required_names}) print(f"All the Available Functions: {self.models}") self.tools = [] for instance in self.models.values(): for e in dir(instance): if e.startswith('inference'): func = getattr(instance, e) self.tools.append(Tool(name=func.name, description=func.description, func=func)) self.llm = LlamaLangChain(model_kwargs=llm_kwargs) self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output') def init_agent(self, lang): self.memory.clear() #clear previous history if lang=='English': PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = GPT4TOOLS_PREFIX, GPT4TOOLS_FORMAT_INSTRUCTIONS, GPT4TOOLS_SUFFIX place = "Enter text and press enter, or upload an image" label_clear = "Clear" else: raise NotImplementedError(f'{lang} is not supported yet') self.agent = initialize_agent( self.tools, self.llm, agent="conversational-react-description", verbose=True, memory=self.memory, return_intermediate_steps=True, agent_kwargs={'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX}, ) return gr.update(visible = True), gr.update(visible = False), gr.update(placeholder=place), gr.update(value=label_clear) def run_text(self, text, state, temperature, top_p, max_new_tokens, keep_last_n_paragraphs): self.llm.set_llm_params(temperature=temperature, top_p=top_p, max_new_tokens=max_new_tokens) self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_paragraphs) res = self.agent({"input": text.strip()}) res['output'] = res['output'].replace("\\", "/") response = re.sub('(image/[-\w]*.png)', lambda m: f'![](file={m.group(0)})*{m.group(0)}*', res['output']) state = state + [(text, response)] print(f"\nProcessed run_text, Input text: {text}\nCurrent state: {state}\n" f"Current Memory: {self.agent.memory.buffer}") image_filenames = re.findall('image/.*.png', str(self.agent.memory.buffer)) image_filename = image_filenames[-1] if len(image_filenames) > 0 else '' return state, state, f'{image_filename} ' def run_image(self, image, state, txt, lang='English'): if image is None: return state, state, txt image_filename = os.path.join('image', f"{str(uuid.uuid4())[:8]}.png") print("======>Auto Resize Image...") img = image width, height = img.size ratio = min(512 / width, 512 / height) width_new, height_new = (round(width * ratio), round(height * ratio)) width_new = int(np.round(width_new / 64.0)) * 64 height_new = int(np.round(height_new / 64.0)) * 64 img = img.resize((width_new, height_new)) img = img.convert('RGB') img.save(image_filename, "PNG") print(f"Resize image form {width}x{height} to {width_new}x{height_new}") description = self.models['ImageCaptioning'].inference(image_filename) if lang == 'English': Human_prompt = f'\nHuman: Provide an image named {image_filename}. The description is: {description}. Understand the image using tools.\n' AI_prompt = "Received." else: raise NotImplementedError(f'{lang} is not supported yet') self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt state = state + [(f"![](file={image_filename})*{image_filename}*", AI_prompt)] print(f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n" f"Current Memory: {self.agent.memory.buffer}") return state, state, f'{image_filename} {txt}' if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--base_model', type=str, required=True, help='folder path to the vicuna with tokenizer') parser.add_argument('--lora_model', type=str, required=True, help='folder path to the lora model') parser.add_argument('--load', type=str, default='ImageCaptioning_cuda:0,Text2Image_cuda:0') parser.add_argument('--llm_device', type=str, default='cpu', help='device to run the llm model') parser.add_argument('--temperature', type=float, default=0.1, help='temperature for the llm model') parser.add_argument('--max_new_tokens', type=int, default=512, help='max number of new tokens to generate') parser.add_argument('--top_p', type=float, default=0.75, help='top_p for the llm model') parser.add_argument('--top_k', type=int, default=40, help='top_k for the llm model') parser.add_argument('--num_beams', type=int, default=1, help='num_beams for the llm model') parser.add_argument('--keep_last_n_paragraphs', type=int, default=1, help='keep last n paragraphs in the memory') parser.add_argument('--cache-dir', type=str, default=None, help="cache path to save model") parser.add_argument('--server-name', type=str, default='0.0.0.0', help="gradio sever name") parser.add_argument('--server-port', type=int, default=8888, help="gradio server port") parser.add_argument('--share', action="store_true") args = parser.parse_args() load_dict = {e.split('_')[0].strip(): e.split('_')[1].strip() for e in args.load.split(',')} llm_kwargs = {'base_model': args.base_model, 'lora_model': args.lora_model, 'device': args.llm_device, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'num_beams': args.num_beams, 'cache_dir': args.cache_dir,} bot = ConversationBot(load_dict=load_dict, llm_kwargs=llm_kwargs) examples = [ ['asserts/images/example-1.jpg','Make the image look like a cartoon.'], ['asserts/images/example-2.jpg','Segment the tie in the image.'], ['asserts/images/example-3.jpg','Generate a man watching a sea based on the pose of the woman.'], ['asserts/images/example-4.jpg','Tell me a story about this image.'], ] with gr.Blocks() as demo: with gr.Row(): with gr.Column(scale=0.3): with gr.Row(): image = gr.Image(type="pil", label="input image") with gr.Row(): txt = gr.Textbox(lines=7, show_label=False, elem_id="textbox", placeholder="Enter text and press submit, or upload an image").style(container=False) with gr.Row(): submit = gr.Button("Submit") with gr.Row(): clear = gr.Button("Clear") with gr.Row(): keep_last_n_paragraphs = gr.Slider( minimum=0, maximum=3, value=args.keep_last_n_paragraphs, step=1, interactive=True, label="Remember Last N Paragraphs") max_new_token = gr.Slider( minimum=128, maximum=1024, value=args.max_new_tokens, step=64, interactive=True, label="Max New Tokens") temperature = gr.Slider( minimum=0.0, maximum=1.0, value=args.temperature, step=0.1, interactive=True, label="Temperature") top_p = gr.Slider( minimum=0.0, maximum=1.0, value=args.top_p, step=0.1, interactive=True, label="Top P") with gr.Column(scale=0.7): chatbot = gr.Chatbot(elem_id="chatbot", label="🦙 GPT4Tools").style(height=690) state = gr.State([]) # TODO: support more language bot.init_agent('English') txt.submit(bot.run_text, [txt, state], [chatbot, state]) txt.submit(lambda: "", None, txt) # submit.click(bot.run_image, [image, state, txt], [chatbot, state, txt]).then( # bot.run_text, [txt, state, temperature, top_p, max_new_token, keep_last_n_paragraphs], [chatbot, state]).then( # lambda: "", None, txt).then( # lambda: None, None, image) submit.click(bot.run_image, [image, state, txt], [chatbot, state, txt]).then( bot.run_text, [txt, state, temperature, top_p, max_new_token, keep_last_n_paragraphs], [chatbot, state, txt]).then( lambda: None, None, image) clear.click(bot.memory.clear) clear.click(lambda: [], None, chatbot) clear.click(lambda: [], None, state) with gr.Row(): gr.Examples( examples=examples, inputs=[image, txt], ) demo.launch(server_name=args.server_name, server_port=args.server_port, enable_queue=True, share=args.share)
[ "langchain.agents.initialize.initialize_agent", "langchain.agents.tools.Tool", "langchain.chains.conversation.memory.ConversationBufferMemory" ]
[((1924, 1959), 'os.makedirs', 'os.makedirs', (['"""image"""'], {'exist_ok': '(True)'}), "('image', exist_ok=True)\n", (1935, 1959), False, 'import os\n'), ((7453, 7478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7476, 7478), False, 'import argparse\n'), ((3840, 3879), 'gpt4tools.llm.LlamaLangChain', 'LlamaLangChain', ([], {'model_kwargs': 'llm_kwargs'}), '(model_kwargs=llm_kwargs)\n', (3854, 3879), False, 'from gpt4tools.llm import LlamaLangChain\n'), ((3903, 3975), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""output"""'}), "(memory_key='chat_history', output_key='output')\n", (3927, 3975), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((4413, 4667), 'langchain.agents.initialize.initialize_agent', 'initialize_agent', (['self.tools', 'self.llm'], {'agent': '"""conversational-react-description"""', 'verbose': '(True)', 'memory': 'self.memory', 'return_intermediate_steps': '(True)', 'agent_kwargs': "{'prefix': PREFIX, 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX\n }"}), "(self.tools, self.llm, agent=\n 'conversational-react-description', verbose=True, memory=self.memory,\n return_intermediate_steps=True, agent_kwargs={'prefix': PREFIX,\n 'format_instructions': FORMAT_INSTRUCTIONS, 'suffix': SUFFIX})\n", (4429, 4667), False, 'from langchain.agents.initialize import initialize_agent\n'), ((9840, 9851), 'gradio.Blocks', 'gr.Blocks', ([], {}), '()\n', (9849, 9851), True, 'import gradio as gr\n'), ((4783, 4806), 'gradio.update', 'gr.update', ([], {'visible': '(True)'}), '(visible=True)\n', (4792, 4806), True, 'import gradio as gr\n'), ((4810, 4834), 'gradio.update', 'gr.update', ([], {'visible': '(False)'}), '(visible=False)\n', (4819, 4834), True, 'import gradio as gr\n'), ((4838, 4866), 'gradio.update', 'gr.update', ([], {'placeholder': 'place'}), '(placeholder=place)\n', (4847, 4866), True, 'import gradio as gr\n'), ((4868, 4896), 'gradio.update', 'gr.update', ([], {'value': 'label_clear'}), '(value=label_clear)\n', (4877, 4896), True, 'import gradio as gr\n'), ((9874, 9882), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9880, 9882), True, 'import gradio as gr\n'), ((12740, 12748), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (12746, 12748), True, 'import gradio as gr\n'), ((12762, 12813), 'gradio.Examples', 'gr.Examples', ([], {'examples': 'examples', 'inputs': '[image, txt]'}), '(examples=examples, inputs=[image, txt])\n', (12773, 12813), True, 'import gradio as gr\n'), ((6340, 6366), 'numpy.round', 'np.round', (['(width_new / 64.0)'], {}), '(width_new / 64.0)\n', (6348, 6366), True, 'import numpy as np\n'), ((6398, 6425), 'numpy.round', 'np.round', (['(height_new / 64.0)'], {}), '(height_new / 64.0)\n', (6406, 6425), True, 'import numpy as np\n'), ((9901, 9921), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.3)'}), '(scale=0.3)\n', (9910, 9921), True, 'import gradio as gr\n'), ((11640, 11660), 'gradio.Column', 'gr.Column', ([], {'scale': '(0.7)'}), '(scale=0.7)\n', (11649, 11660), True, 'import gradio as gr\n'), ((11781, 11793), 'gradio.State', 'gr.State', (['[]'], {}), '([])\n', (11789, 11793), True, 'import gradio as gr\n'), ((9944, 9952), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (9950, 9952), True, 'import gradio as gr\n'), ((9982, 10023), 'gradio.Image', 'gr.Image', ([], {'type': '"""pil"""', 'label': '"""input image"""'}), "(type='pil', label='input image')\n", (9990, 10023), True, 'import gradio as gr\n'), ((10045, 10053), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10051, 10053), True, 'import gradio as gr\n'), ((10282, 10290), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10288, 10290), True, 'import gradio as gr\n'), ((10321, 10340), 'gradio.Button', 'gr.Button', (['"""Submit"""'], {}), "('Submit')\n", (10330, 10340), True, 'import gradio as gr\n'), ((10362, 10370), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10368, 10370), True, 'import gradio as gr\n'), ((10400, 10418), 'gradio.Button', 'gr.Button', (['"""Clear"""'], {}), "('Clear')\n", (10409, 10418), True, 'import gradio as gr\n'), ((10440, 10448), 'gradio.Row', 'gr.Row', ([], {}), '()\n', (10446, 10448), True, 'import gradio as gr\n'), ((10495, 10627), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0)', 'maximum': '(3)', 'value': 'args.keep_last_n_paragraphs', 'step': '(1)', 'interactive': '(True)', 'label': '"""Remember Last N Paragraphs"""'}), "(minimum=0, maximum=3, value=args.keep_last_n_paragraphs, step=1,\n interactive=True, label='Remember Last N Paragraphs')\n", (10504, 10627), True, 'import gradio as gr\n'), ((10805, 10923), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(128)', 'maximum': '(1024)', 'value': 'args.max_new_tokens', 'step': '(64)', 'interactive': '(True)', 'label': '"""Max New Tokens"""'}), "(minimum=128, maximum=1024, value=args.max_new_tokens, step=64,\n interactive=True, label='Max New Tokens')\n", (10814, 10923), True, 'import gradio as gr\n'), ((11099, 11211), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': 'args.temperature', 'step': '(0.1)', 'interactive': '(True)', 'label': '"""Temperature"""'}), "(minimum=0.0, maximum=1.0, value=args.temperature, step=0.1,\n interactive=True, label='Temperature')\n", (11108, 11211), True, 'import gradio as gr\n'), ((11381, 11482), 'gradio.Slider', 'gr.Slider', ([], {'minimum': '(0.0)', 'maximum': '(1.0)', 'value': 'args.top_p', 'step': '(0.1)', 'interactive': '(True)', 'label': '"""Top P"""'}), "(minimum=0.0, maximum=1.0, value=args.top_p, step=0.1, interactive\n =True, label='Top P')\n", (11390, 11482), True, 'import gradio as gr\n'), ((3758, 3819), 'langchain.agents.tools.Tool', 'Tool', ([], {'name': 'func.name', 'description': 'func.description', 'func': 'func'}), '(name=func.name, description=func.description, func=func)\n', (3762, 3819), False, 'from langchain.agents.tools import Tool\n'), ((11688, 11738), 'gradio.Chatbot', 'gr.Chatbot', ([], {'elem_id': '"""chatbot"""', 'label': '"""🦙 GPT4Tools"""'}), "(elem_id='chatbot', label='🦙 GPT4Tools')\n", (11698, 11738), True, 'import gradio as gr\n'), ((6068, 6080), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6078, 6080), False, 'import uuid\n'), ((10081, 10205), 'gradio.Textbox', 'gr.Textbox', ([], {'lines': '(7)', 'show_label': '(False)', 'elem_id': '"""textbox"""', 'placeholder': '"""Enter text and press submit, or upload an image"""'}), "(lines=7, show_label=False, elem_id='textbox', placeholder=\n 'Enter text and press submit, or upload an image')\n", (10091, 10205), True, 'import gradio as gr\n'), ((3072, 3106), 'inspect.signature', 'inspect.signature', (['module.__init__'], {}), '(module.__init__)\n', (3089, 3106), False, 'import inspect\n')]
"""Zero-shot agent with toolkit.""" import re from langchain.agents.agent import Agent from langchain.agents.mrkl.base import ZeroShotAgent from langchain.base_language import BaseLanguageModel from langchain.callbacks.base import BaseCallbackManager from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from langchain.prompts.chat import ( AIMessagePromptTemplate, BaseStringMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, ) from langchain.schema import ( AgentAction, AgentFinish, AIMessage, BaseMessage, HumanMessage, SystemMessage, ) from langchain.tools.base import BaseTool from procoder.functional import ( add_refnames, collect_refnames, format_multiple_prompts, format_prompt, ) from toolemu.prompts.agent import * from toolemu.tools.tool_interface import BaseToolkit from toolemu.utils import get_first_json_object_str from toolemu.utils.llm import get_model_category from toolemu.utils.my_typing import * FINAL_ANSWER_ACTION = "Final Answer:" AGENT_TYPES = ["naive", "ss_only", "helpful_ss"] class ZeroShotAgentWithToolkit(ZeroShotAgent): @staticmethod def get_all_tools(toolkits: Sequence[BaseToolkit]) -> List[BaseTool]: """Return all tools available to the agent.""" all_tools = [] for toolkit in toolkits: all_tools += toolkit.tools return all_tools @classmethod def create_prompt( cls, toolkits: Sequence[BaseToolkit], prompt_type: Optional[str] = "naive", input_variables: Optional[List[str]] = None, use_chat_format: Optional[bool] = False, ) -> PromptTemplate: """Create prompt in the style of the zero shot agent.""" toolkit_strings = "\n".join( [toolkit.create_description("medium") for toolkit in toolkits] ) tool_names = ", ".join([tool.name for tool in cls.get_all_tools(toolkits)]) inputs = dict(toolkit_descriptions=toolkit_strings, tool_names=tool_names) add_refnames(AGENT_DUMMY_VARS, inputs, include_brackets=False) system_info = AGENT_SYSTEM_INFO prompt_instruction = eval(f"AGENT_{prompt_type.upper()}_PROMPT") system_info, prompt_instruction = format_multiple_prompts( [system_info, prompt_instruction], inputs, include_brackets=[False, True] ) if use_chat_format: agent_system_message = SystemMessage(content=system_info) agent_instruction_message = HumanMessagePromptTemplate.from_template( template=prompt_instruction ) messages = [ agent_system_message, agent_instruction_message, ] return ChatPromptTemplate.from_messages(messages=messages) else: template = "\n\n".join([system_info, prompt_instruction]) if input_variables is None: input_variables = ["input", "agent_scratchpad"] return PromptTemplate(template=template, input_variables=input_variables) @classmethod def from_llm_and_toolkits( cls, llm: BaseLanguageModel, toolkits: Sequence[BaseToolkit], agent_type: Optional[str] = "naive", callback_manager: Optional[BaseCallbackManager] = None, use_chat_format: Optional[bool] = False, input_variables: Optional[List[str]] = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools.""" tools = cls.get_all_tools(toolkits) cls._validate_tools(tools) assert agent_type in AGENT_TYPES, f"agent_type must be one of {AGENT_TYPES}" if get_model_category(llm) == "claude": prompt_type = agent_type + "_claude" else: prompt_type = agent_type prompt = cls.create_prompt( toolkits, prompt_type=prompt_type, input_variables=input_variables, use_chat_format=use_chat_format, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] return cls(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) def _fix_text(self, text: str): text = text.lstrip() if text.startswith(self.llm_prefix): text = text[len(self.llm_prefix) :] return text.rstrip() + "\n" def _extract_tool_and_input(self, text: str) -> Optional[Tuple[str, str]]: try: result = self._get_action_and_input(text) except ValueError: # result = "None", "{}" result = None # return None, so that the agent can continue its generation return result ############# Adapted from Langchain (0.0.141) for compatibility ############# @property def finish_tool_name(self) -> str: """Return the name of the finish tool.""" return "Final Answer" def _get_action_and_input(self, llm_output: str) -> Tuple[str, str]: """Parse out the action and input from the LLM output. Note: if you're specifying a custom prompt for the ZeroShotAgent, you will need to ensure that it meets the following Regex requirements. The string starting with "Action:" and the following string starting with "Action Input:" should be separated by a newline. """ if FINAL_ANSWER_ACTION in llm_output: return ( self.finish_tool_name, llm_output.split(FINAL_ANSWER_ACTION)[-1].strip(), ) # \s matches against tab/newline/whitespace regex = r"Action: (.*?)[\n]*Action Input:[\s]*(.*)" match = re.search(regex, llm_output, re.DOTALL) if not match: raise ValueError(f"Could not parse LLM output: `{llm_output}`") action = match.group(1).strip() action_input = match.group(2) # disable checking to avoid exception, let the tool call validate the inputs action_input = get_first_json_object_str( action_input, enable_check=False, strict=False ) # set strict=False to allow single quote, comments, escape characters, etc return action, action_input.strip(" ").strip('"') def _get_next_action(self, full_inputs: Dict[str, str]) -> AgentAction: full_output = self.llm_chain.predict(**full_inputs) full_output = self._fix_text(full_output) parsed_output = self._extract_tool_and_input(full_output) while parsed_output is None: full_output = self._fix_text(full_output) full_inputs["agent_scratchpad"] += full_output output = self.llm_chain.predict(**full_inputs) full_output += output parsed_output = self._extract_tool_and_input(full_output) return AgentAction( tool=parsed_output[0], tool_input=parsed_output[1], log=full_output ) async def _aget_next_action(self, full_inputs: Dict[str, str]) -> AgentAction: full_output = await self.llm_chain.apredict(**full_inputs) parsed_output = self._extract_tool_and_input(full_output) while parsed_output is None: full_output = self._fix_text(full_output) full_inputs["agent_scratchpad"] += full_output output = await self.llm_chain.apredict(**full_inputs) full_output += output parsed_output = self._extract_tool_and_input(full_output) return AgentAction( tool=parsed_output[0], tool_input=parsed_output[1], log=full_output ) def plan( self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) action = self._get_next_action(full_inputs) if action.tool == self.finish_tool_name: return AgentFinish({"output": action.tool_input}, action.log) return action async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any ) -> Union[AgentAction, AgentFinish]: """Given input, decided what to do. Args: intermediate_steps: Steps the LLM has taken to date, along with observations **kwargs: User inputs. Returns: Action specifying what tool to use. """ full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) action = await self._aget_next_action(full_inputs) if action.tool == self.finish_tool_name: return AgentFinish({"output": action.tool_input}, action.log) return action
[ "langchain.schema.AgentAction", "langchain.schema.AgentFinish", "langchain.schema.SystemMessage", "langchain.prompts.chat.HumanMessagePromptTemplate.from_template", "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.prompts.chat.ChatPromptTemplate.from_messages" ]
[((2095, 2157), 'procoder.functional.add_refnames', 'add_refnames', (['AGENT_DUMMY_VARS', 'inputs'], {'include_brackets': '(False)'}), '(AGENT_DUMMY_VARS, inputs, include_brackets=False)\n', (2107, 2157), False, 'from procoder.functional import add_refnames, collect_refnames, format_multiple_prompts, format_prompt\n'), ((2315, 2417), 'procoder.functional.format_multiple_prompts', 'format_multiple_prompts', (['[system_info, prompt_instruction]', 'inputs'], {'include_brackets': '[False, True]'}), '([system_info, prompt_instruction], inputs,\n include_brackets=[False, True])\n', (2338, 2417), False, 'from procoder.functional import add_refnames, collect_refnames, format_multiple_prompts, format_prompt\n'), ((4113, 4180), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callback_manager': 'callback_manager'}), '(llm=llm, prompt=prompt, callback_manager=callback_manager)\n', (4121, 4180), False, 'from langchain.chains import LLMChain\n'), ((5848, 5887), 're.search', 're.search', (['regex', 'llm_output', 're.DOTALL'], {}), '(regex, llm_output, re.DOTALL)\n', (5857, 5887), False, 'import re\n'), ((6172, 6245), 'toolemu.utils.get_first_json_object_str', 'get_first_json_object_str', (['action_input'], {'enable_check': '(False)', 'strict': '(False)'}), '(action_input, enable_check=False, strict=False)\n', (6197, 6245), False, 'from toolemu.utils import get_first_json_object_str\n'), ((6990, 7075), 'langchain.schema.AgentAction', 'AgentAction', ([], {'tool': 'parsed_output[0]', 'tool_input': 'parsed_output[1]', 'log': 'full_output'}), '(tool=parsed_output[0], tool_input=parsed_output[1], log=full_output\n )\n', (7001, 7075), False, 'from langchain.schema import AgentAction, AgentFinish, AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((7645, 7730), 'langchain.schema.AgentAction', 'AgentAction', ([], {'tool': 'parsed_output[0]', 'tool_input': 'parsed_output[1]', 'log': 'full_output'}), '(tool=parsed_output[0], tool_input=parsed_output[1], log=full_output\n )\n', (7656, 7730), False, 'from langchain.schema import AgentAction, AgentFinish, AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2500, 2534), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_info'}), '(content=system_info)\n', (2513, 2534), False, 'from langchain.schema import AgentAction, AgentFinish, AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2575, 2644), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', ([], {'template': 'prompt_instruction'}), '(template=prompt_instruction)\n', (2615, 2644), False, 'from langchain.prompts.chat import AIMessagePromptTemplate, BaseStringMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((2815, 2866), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', ([], {'messages': 'messages'}), '(messages=messages)\n', (2847, 2866), False, 'from langchain.prompts.chat import AIMessagePromptTemplate, BaseStringMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((3075, 3141), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': 'input_variables'}), '(template=template, input_variables=input_variables)\n', (3089, 3141), False, 'from langchain.prompts import PromptTemplate\n'), ((3760, 3783), 'toolemu.utils.llm.get_model_category', 'get_model_category', (['llm'], {}), '(llm)\n', (3778, 3783), False, 'from toolemu.utils.llm import get_model_category\n'), ((8354, 8408), 'langchain.schema.AgentFinish', 'AgentFinish', (["{'output': action.tool_input}", 'action.log'], {}), "({'output': action.tool_input}, action.log)\n", (8365, 8408), False, 'from langchain.schema import AgentAction, AgentFinish, AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((9051, 9105), 'langchain.schema.AgentFinish', 'AgentFinish', (["{'output': action.tool_input}", 'action.log'], {}), "({'output': action.tool_input}, action.log)\n", (9062, 9105), False, 'from langchain.schema import AgentAction, AgentFinish, AIMessage, BaseMessage, HumanMessage, SystemMessage\n')]
#import os from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) # read local .env file import warnings warnings.filterwarnings("ignore") from langchain.agents.agent_toolkits import create_python_agent from langchain.agents import load_tools, initialize_agent from langchain.agents import AgentType from langchain.tools.python.tool import PythonREPLTool #from langchain.python import PythonREPL from langchain.chat_models import ChatOpenAI import langchain llm = ChatOpenAI(temperature=0) tools = load_tools(["llm-math", "wikipedia"], llm=llm) customer_list = [["Harrison", "Chase"], ["Lang", "Chain"], ["Dolly", "Too"], ["Elle", "Elem"], ["Geoff", "Fusion"], ["Trance", "Former"], ["Jen", "Ayai"]] def do_answer1(): langchain.debug = True agent = create_python_agent( llm, tool=PythonREPLTool(), verbose=True ) answer = agent.run(f"""Sort these customers by \ last name and then first name \ and print the output: {customer_list}""") print(answer) langchain.debug = False def do_answer2(): from langchain.agents import tool from datetime import date langchain.debug = True @tool def time(text: str) -> str: """Returns todays date, use this for any \ questions related to knowing todays date. \ The input should always be an empty string, \ and this function will always return todays \ date - any date mathmatics should occur \ outside this function.""" return str(date.today()) agent = initialize_agent( tools + [time], llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose = True) try: result = agent("whats the date today?") except: # noqa print("exception on external access") print(result) langchain.debug = False if __name__ == "__main__": #do_answer1() do_answer2()
[ "langchain.agents.initialize_agent", "langchain.tools.python.tool.PythonREPLTool", "langchain.agents.load_tools", "langchain.chat_models.ChatOpenAI" ]
[((128, 161), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (151, 161), False, 'import warnings\n'), ((489, 514), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (499, 514), False, 'from langchain.chat_models import ChatOpenAI\n'), ((523, 569), 'langchain.agents.load_tools', 'load_tools', (["['llm-math', 'wikipedia']"], {'llm': 'llm'}), "(['llm-math', 'wikipedia'], llm=llm)\n", (533, 569), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((72, 85), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (83, 85), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((1666, 1800), 'langchain.agents.initialize_agent', 'initialize_agent', (['(tools + [time])', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools + [time], llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (1682, 1800), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((936, 952), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (950, 952), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((1638, 1650), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1648, 1650), False, 'from datetime import date\n')]
from typing import List, Optional, Type from langchain.memory import ( ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory, ) class Memory: @staticmethod def messageHistory(path: str): history = ChatMessageHistory() return history @staticmethod def bufferMemory(path: str): memory = ConversationBufferMemory() return memory @staticmethod def chatSummary(path: str): memory = ConversationSummaryMemory() return memory
[ "langchain.memory.ConversationSummaryMemory", "langchain.memory.ConversationBufferMemory", "langchain.memory.ChatMessageHistory" ]
[((329, 349), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (347, 349), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((442, 468), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (466, 468), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((559, 586), 'langchain.memory.ConversationSummaryMemory', 'ConversationSummaryMemory', ([], {}), '()\n', (584, 586), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n')]
"""Callback Handler that prints to std out.""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict, Optional from langchain_core.callbacks.base import BaseCallbackHandler from langchain_core.utils import print_text if TYPE_CHECKING: from langchain_core.agents import AgentAction, AgentFinish class StdOutCallbackHandler(BaseCallbackHandler): """Callback Handler that prints to std out.""" def __init__(self, color: Optional[str] = None) -> None: """Initialize callback handler.""" self.color = color def on_chain_start( self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any ) -> None: """Print out that we are entering a chain.""" class_name = serialized.get("name", serialized.get("id", ["<unknown>"])[-1]) print(f"\n\n\033[1m> Entering new {class_name} chain...\033[0m") # noqa: T201 def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Print out that we finished a chain.""" print("\n\033[1m> Finished chain.\033[0m") # noqa: T201 def on_agent_action( self, action: AgentAction, color: Optional[str] = None, **kwargs: Any ) -> Any: """Run on agent action.""" print_text(action.log, color=color or self.color) def on_tool_end( self, output: Any, color: Optional[str] = None, observation_prefix: Optional[str] = None, llm_prefix: Optional[str] = None, **kwargs: Any, ) -> None: """If not the final action, print out observation.""" output = str(output) if observation_prefix is not None: print_text(f"\n{observation_prefix}") print_text(output, color=color or self.color) if llm_prefix is not None: print_text(f"\n{llm_prefix}") def on_text( self, text: str, color: Optional[str] = None, end: str = "", **kwargs: Any, ) -> None: """Run when agent ends.""" print_text(text, color=color or self.color, end=end) def on_agent_finish( self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any ) -> None: """Run on agent end.""" print_text(finish.log, color=color or self.color, end="\n")
[ "langchain_core.utils.print_text" ]
[((1261, 1310), 'langchain_core.utils.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)'}), '(action.log, color=color or self.color)\n', (1271, 1310), False, 'from langchain_core.utils import print_text\n'), ((1727, 1772), 'langchain_core.utils.print_text', 'print_text', (['output'], {'color': '(color or self.color)'}), '(output, color=color or self.color)\n', (1737, 1772), False, 'from langchain_core.utils import print_text\n'), ((2042, 2094), 'langchain_core.utils.print_text', 'print_text', (['text'], {'color': '(color or self.color)', 'end': 'end'}), '(text, color=color or self.color, end=end)\n', (2052, 2094), False, 'from langchain_core.utils import print_text\n'), ((2254, 2313), 'langchain_core.utils.print_text', 'print_text', (['finish.log'], {'color': '(color or self.color)', 'end': '"""\n"""'}), "(finish.log, color=color or self.color, end='\\n')\n", (2264, 2313), False, 'from langchain_core.utils import print_text\n'), ((1681, 1721), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{observation_prefix}"""'], {}), '(f"""\n{observation_prefix}""")\n', (1691, 1721), False, 'from langchain_core.utils import print_text\n'), ((1820, 1849), 'langchain_core.utils.print_text', 'print_text', (['f"""\n{llm_prefix}"""'], {}), "(f'\\n{llm_prefix}')\n", (1830, 1849), False, 'from langchain_core.utils import print_text\n')]
import os from langchain import ElasticVectorSearch from langchain.docstore.document import Document from langchain.vectorstores import VectorStore from langchain.embeddings.base import Embeddings db_persistent_path = f"""{os.environ["db_persistent_path"]}/elasticsearch""" INDEX_NAME = "esindex" def upload(documents: list[Document], embeddings: Embeddings) -> None: return ElasticVectorSearch.from_documents( documents, embeddings, elasticsearch_url="http://localhost:9200", index_name=INDEX_NAME, ) def create(embeddings: Embeddings) -> VectorStore: return ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name=INDEX_NAME, embedding=embeddings, )
[ "langchain.ElasticVectorSearch", "langchain.ElasticVectorSearch.from_documents" ]
[((382, 510), 'langchain.ElasticVectorSearch.from_documents', 'ElasticVectorSearch.from_documents', (['documents', 'embeddings'], {'elasticsearch_url': '"""http://localhost:9200"""', 'index_name': 'INDEX_NAME'}), "(documents, embeddings, elasticsearch_url\n ='http://localhost:9200', index_name=INDEX_NAME)\n", (416, 510), False, 'from langchain import ElasticVectorSearch\n'), ((609, 721), 'langchain.ElasticVectorSearch', 'ElasticVectorSearch', ([], {'elasticsearch_url': '"""http://localhost:9200"""', 'index_name': 'INDEX_NAME', 'embedding': 'embeddings'}), "(elasticsearch_url='http://localhost:9200', index_name=\n INDEX_NAME, embedding=embeddings)\n", (628, 721), False, 'from langchain import ElasticVectorSearch\n')]
import json from pydantic import BaseModel, Field from pydantic import BaseModel, Field from langchain.llms.base import BaseLLM from typing import List, Any from langchain import LLMChain from llm.generate_task_plan.prompt import get_template from llm.list_output_parser import LLMListOutputParser class Task(BaseModel): """Task model.""" id: int = Field(..., description="Task ID") description: str = Field(..., description="Task description") is_done: bool = Field(False, description="Task done or not") result: str = Field("", description="The result of the task") class TaskManeger(BaseModel): """Task manager model.""" tasks: List[Task] = Field([], description="The list of tasks") current_task_id: int = Field(1, description="The last task id") llm: BaseLLM = Field(..., description="llm class for the agent") def generate_task_plan(self, name: str, role: str, goal: str): """Generate a task plan for the agent.""" propmt = get_template() llm_chain = LLMChain(prompt=propmt, llm=self.llm) try: result = llm_chain.predict( name=name, role=role, goal=goal ) except Exception as e: raise Exception(f"Error: {e}") # Parse and validate the result try: result_list = LLMListOutputParser.parse(result, separeted_string="\t") except Exception as e: raise Exception("Error: " + str(e)) # Add tasks with a serial number for i, e in enumerate(result_list, start=1): id = int(i) description = e self.tasks.append(Task(id=id, description=description)) self def get_task_by_id(self, id: int) -> Task: """Get a task by Task id.""" for task in self.tasks: if task.id == id: return task return None def get_current_task(self) -> Task: """Get the current task agent is working on.""" return self.get_task_by_id(self.current_task_id) def get_current_task_string(self) -> str: """Get the current task agent is working on as a string.""" task = self.get_current_task() if task is None: return None else: return self._task_to_string(task) def complete_task(self, id: int, result: str) -> None: """Complete a task by Task id.""" # Complete the task specified by ID self.tasks[id - 1].is_done = True self.tasks[id - 1].result = result self.current_task_id += 1 def complete_current_task(self, result: str) -> None: """Complete the current task agent is working on.""" self.complete_task(self.current_task_id, result=result) def _task_to_string(self, task: Task) -> str: """Convert a task to a string.""" return f"{task.id}: {task.description}" def get_incomplete_tasks(self) -> List[Task]: """Get the list of incomplete tasks.""" return [task for task in self.tasks if not task.is_done] def get_incomplete_tasks_string(self) -> str: """Get the list of incomplete tasks as a string.""" result = "" for task in self.get_incomplete_tasks(): result += self._task_to_string(task) + "\n" return result
[ "langchain.LLMChain" ]
[((359, 392), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task ID"""'}), "(..., description='Task ID')\n", (364, 392), False, 'from pydantic import BaseModel, Field\n'), ((416, 458), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task description"""'}), "(..., description='Task description')\n", (421, 458), False, 'from pydantic import BaseModel, Field\n'), ((479, 523), 'pydantic.Field', 'Field', (['(False)'], {'description': '"""Task done or not"""'}), "(False, description='Task done or not')\n", (484, 523), False, 'from pydantic import BaseModel, Field\n'), ((542, 589), 'pydantic.Field', 'Field', (['""""""'], {'description': '"""The result of the task"""'}), "('', description='The result of the task')\n", (547, 589), False, 'from pydantic import BaseModel, Field\n'), ((676, 718), 'pydantic.Field', 'Field', (['[]'], {'description': '"""The list of tasks"""'}), "([], description='The list of tasks')\n", (681, 718), False, 'from pydantic import BaseModel, Field\n'), ((746, 786), 'pydantic.Field', 'Field', (['(1)'], {'description': '"""The last task id"""'}), "(1, description='The last task id')\n", (751, 786), False, 'from pydantic import BaseModel, Field\n'), ((806, 855), 'pydantic.Field', 'Field', (['...'], {'description': '"""llm class for the agent"""'}), "(..., description='llm class for the agent')\n", (811, 855), False, 'from pydantic import BaseModel, Field\n'), ((991, 1005), 'llm.generate_task_plan.prompt.get_template', 'get_template', ([], {}), '()\n', (1003, 1005), False, 'from llm.generate_task_plan.prompt import get_template\n'), ((1026, 1063), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'propmt', 'llm': 'self.llm'}), '(prompt=propmt, llm=self.llm)\n', (1034, 1063), False, 'from langchain import LLMChain\n'), ((1366, 1422), 'llm.list_output_parser.LLMListOutputParser.parse', 'LLMListOutputParser.parse', (['result'], {'separeted_string': '"""\t"""'}), "(result, separeted_string='\\t')\n", (1391, 1422), False, 'from llm.list_output_parser import LLMListOutputParser\n')]
""" The Purpose of this file is to provide a wrapper around the PINECONE from langchain """ from langchain.schema.document import Document from langchain_community.embeddings import HuggingFaceInstructEmbeddings from pinecone import Pinecone from neogpt.settings.config import ( DEVICE_TYPE, EMBEDDING_DIMENSION, EMBEDDING_MODEL, INDEX_NAME, MODEL_DIRECTORY, PINECONE_PERSIST_DIRECTORY, ) from vectorstore.base import VectorStore class PineconeVectorStore(VectorStore): def __init__(self, api_key: str, environment: str) -> None: super().__init__() self.api_key = api_key self.environment = environment self.embeddings = HuggingFaceInstructEmbeddings( model_name=EMBEDDING_MODEL, model_kwargs={"device": DEVICE_TYPE}, cache_folder=MODEL_DIRECTORY, ) self.pinecone_client = Pinecone( api_key=api_key, environment=environment, persist_directory=PINECONE_PERSIST_DIRECTORY, embedding_dimension=EMBEDDING_DIMENSION, index_name=INDEX_NAME, embedding_function=self.embeddings, ) def from_documents(self, documents: list[Document]) -> Document: pinecone_data = [] for document in documents: pinecone_data.append({"fields_name": document.fields_name}) self.pinecone_client.upsert(pinecone_data) def as_retriever(self): return self.pinecone_client.as_retriever() # api_key = "Your api key" # environment = "your environment name" # pinecone_store = PineconeVectoreStore(api_key, environment) # pinecone_store.from_documents(documents)
[ "langchain_community.embeddings.HuggingFaceInstructEmbeddings" ]
[((689, 819), 'langchain_community.embeddings.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': 'EMBEDDING_MODEL', 'model_kwargs': "{'device': DEVICE_TYPE}", 'cache_folder': 'MODEL_DIRECTORY'}), "(model_name=EMBEDDING_MODEL, model_kwargs={\n 'device': DEVICE_TYPE}, cache_folder=MODEL_DIRECTORY)\n", (718, 819), False, 'from langchain_community.embeddings import HuggingFaceInstructEmbeddings\n'), ((893, 1098), 'pinecone.Pinecone', 'Pinecone', ([], {'api_key': 'api_key', 'environment': 'environment', 'persist_directory': 'PINECONE_PERSIST_DIRECTORY', 'embedding_dimension': 'EMBEDDING_DIMENSION', 'index_name': 'INDEX_NAME', 'embedding_function': 'self.embeddings'}), '(api_key=api_key, environment=environment, persist_directory=\n PINECONE_PERSIST_DIRECTORY, embedding_dimension=EMBEDDING_DIMENSION,\n index_name=INDEX_NAME, embedding_function=self.embeddings)\n', (901, 1098), False, 'from pinecone import Pinecone\n')]
# Ingest Documents into a Zep Collection import os from dotenv import find_dotenv, load_dotenv from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import WebBaseLoader from zep_python import ZepClient from zep_python.langchain.vectorstore import ZepVectorStore load_dotenv(dotenv_path=find_dotenv()) SOURCE = "https://en.wikipedia.org/wiki/Leonard_Bernstein" # noqa: E501 ZEP_API_URL = os.environ.get( "ZEP_API_URL" ) # only required if you're using Zep Open Source ZEP_API_KEY = os.environ.get("ZEP_API_KEY") # Required for Zep Cloud if ZEP_API_KEY is None: raise ValueError( "ZEP_API_KEY is required for Zep Cloud. " "Remove this check if using Zep Open Source." ) ZEP_COLLECTION_NAME = os.environ.get("ZEP_COLLECTION_NAME") if ZEP_COLLECTION_NAME is None: raise ValueError("ZEP_COLLECTION_NAME is required for ingestion. ") zep = ZepClient( api_key=ZEP_API_KEY, api_url=ZEP_API_URL, # only required if you're using Zep Open Source ) # Load loader = WebBaseLoader(SOURCE) data = loader.load() print(f"Loaded: {len(data)} documents") # Split text_splitter = RecursiveCharacterTextSplitter(chunk_size=400, chunk_overlap=200) all_splits = text_splitter.split_documents(data) print(f"Adding {len(all_splits)} documents to {ZEP_COLLECTION_NAME}...") # Add to vectorDB vectorstore = ZepVectorStore.from_documents( documents=all_splits, collection_name=ZEP_COLLECTION_NAME, zep_client=zep, ) print(f"Added {len(all_splits)} documents to {ZEP_COLLECTION_NAME}...")
[ "langchain_community.document_loaders.WebBaseLoader", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((449, 478), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_URL"""'], {}), "('ZEP_API_URL')\n", (463, 478), False, 'import os\n'), ((549, 578), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_KEY"""'], {}), "('ZEP_API_KEY')\n", (563, 578), False, 'import os\n'), ((784, 821), 'os.environ.get', 'os.environ.get', (['"""ZEP_COLLECTION_NAME"""'], {}), "('ZEP_COLLECTION_NAME')\n", (798, 821), False, 'import os\n'), ((933, 984), 'zep_python.ZepClient', 'ZepClient', ([], {'api_key': 'ZEP_API_KEY', 'api_url': 'ZEP_API_URL'}), '(api_key=ZEP_API_KEY, api_url=ZEP_API_URL)\n', (942, 984), False, 'from zep_python import ZepClient\n'), ((1062, 1083), 'langchain_community.document_loaders.WebBaseLoader', 'WebBaseLoader', (['SOURCE'], {}), '(SOURCE)\n', (1075, 1083), False, 'from langchain_community.document_loaders import WebBaseLoader\n'), ((1171, 1236), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(400)', 'chunk_overlap': '(200)'}), '(chunk_size=400, chunk_overlap=200)\n', (1201, 1236), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1393, 1502), 'zep_python.langchain.vectorstore.ZepVectorStore.from_documents', 'ZepVectorStore.from_documents', ([], {'documents': 'all_splits', 'collection_name': 'ZEP_COLLECTION_NAME', 'zep_client': 'zep'}), '(documents=all_splits, collection_name=\n ZEP_COLLECTION_NAME, zep_client=zep)\n', (1422, 1502), False, 'from zep_python.langchain.vectorstore import ZepVectorStore\n'), ((345, 358), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import find_dotenv, load_dotenv\n')]
#model_settings.py import streamlit as st from langchain.embeddings.huggingface import HuggingFaceEmbeddings from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext from llama_index.logger import LlamaLogger from langchain.chat_models import ChatOpenAI from langchain import OpenAI from enum import Enum class sentenceTransformers(Enum): OPTION1 = "sentence-transformers/all-MiniLM-L6-v2" #default OPTION2 = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2" OPTION3 = "sentence-transformers/all-mpnet-base-v2" def get_sentence_transformer_dropdown(): options = [e.value for e in sentenceTransformers] selected_option = st.selectbox("Sentence transformer:", options) return selected_option def get_embed_model(provider='Langchain', model_name=sentenceTransformers.OPTION1.value): # load in HF embedding model from langchain embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name=model_name)) if provider=='Langchain' else OpenAIEmbedding() return embed_model def get_prompt_helper(): # define prompt helper max_input_size = 4096 num_output = 2048 max_chunk_overlap = 20 prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) return prompt_helper def get_llm_predictor(): # define LLM num_output = 2048 #llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=num_output)) llm_predictor = LLMPredictor(ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo", max_tokens=num_output)) return llm_predictor @st.cache_resource def get_logger(): llama_logger = LlamaLogger() return llama_logger def get_service_context(llm_predictor=get_llm_predictor(), embed_model=get_embed_model(), prompt_helper=get_prompt_helper(), chunk_size_limit=512, llama_logger=get_logger()): return ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model, prompt_helper=prompt_helper, chunk_size_limit=chunk_size_limit, llama_logger=llama_logger)
[ "langchain.embeddings.huggingface.HuggingFaceEmbeddings", "langchain.chat_models.ChatOpenAI" ]
[((705, 751), 'streamlit.selectbox', 'st.selectbox', (['"""Sentence transformer:"""', 'options'], {}), "('Sentence transformer:', options)\n", (717, 751), True, 'import streamlit as st\n'), ((1220, 1279), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1232, 1279), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1684, 1697), 'llama_index.logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (1695, 1697), False, 'from llama_index.logger import LlamaLogger\n'), ((2009, 2192), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'embed_model': 'embed_model', 'prompt_helper': 'prompt_helper', 'chunk_size_limit': 'chunk_size_limit', 'llama_logger': 'llama_logger'}), '(llm_predictor=llm_predictor, embed_model=\n embed_model, prompt_helper=prompt_helper, chunk_size_limit=\n chunk_size_limit, llama_logger=llama_logger)\n', (2037, 2192), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1031, 1048), 'llama_index.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1046, 1048), False, 'from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext\n'), ((1522, 1600), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'model_name': '"""gpt-3.5-turbo"""', 'max_tokens': 'num_output'}), "(temperature=0.1, model_name='gpt-3.5-turbo', max_tokens=num_output)\n", (1532, 1600), False, 'from langchain.chat_models import ChatOpenAI\n'), ((955, 999), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (976, 999), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n')]
import os from dotenv import load_dotenv import streamlit as st from langchain.llms import OpenAI from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains import ConversationalRetrievalChain from langchain.agents.agent_toolkits import create_retriever_tool from langchain.agents.agent_toolkits import create_conversational_retrieval_agent from langchain.callbacks import StreamlitCallbackHandler from langchain.tools import BaseTool, Tool, tool from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import ChatMessage from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain import PromptTemplate, LLMChain from langchain.vectorstores import LanceDB import lancedb import pandas as pd from langchain.chains import RetrievalQA st.set_page_config(page_title="GlobeBotter", page_icon="🎬") st.header('🎬 Welcome to MovieHarbor, your favourite movie recommender') load_dotenv() #os.environ["HUGGINGFACEHUB_API_TOKEN"] openai_api_key = os.environ['OPENAI_API_KEY'] embeddings = OpenAIEmbeddings() uri = "data/sample-lancedb" db = lancedb.connect(uri) table = db.open_table('movies') docsearch = LanceDB(connection = table, embedding = embeddings) # Import the movie dataset md = pd.read_pickle('movies.pkl') # Create a sidebar for user input st.sidebar.title("Movie Recommendation System") st.sidebar.markdown("Please enter your details and preferences below:") # Ask the user for age, gender and favourite movie genre age = st.sidebar.slider("What is your age?", 1, 100, 25) gender = st.sidebar.radio("What is your gender?", ("Male", "Female", "Other")) genre = st.sidebar.selectbox("What is your favourite movie genre?", md.explode('genres')["genres"].unique()) # Filter the movies based on the user input df_filtered = md[md['genres'].apply(lambda x: genre in x)] template_prefix = """You are a movie recommender system that help users to find movies that match their preferences. Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. {context}""" user_info = """This is what we know about the user, and you can use this information to better tune your research: Age: {age} Gender: {gender}""" template_suffix= """Question: {question} Your response:""" user_info = user_info.format(age = age, gender = gender) COMBINED_PROMPT = template_prefix +'\n'+ user_info +'\n'+ template_suffix print(COMBINED_PROMPT) #setting up the chain qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={'data': df_filtered}), return_source_documents=True) query = st.text_input('Enter your question:', placeholder = 'What action movies do you suggest?') if query: result = qa({"query": query}) st.write(result['result'])
[ "langchain.llms.OpenAI", "langchain.vectorstores.LanceDB", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((924, 983), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""GlobeBotter"""', 'page_icon': '"""🎬"""'}), "(page_title='GlobeBotter', page_icon='🎬')\n", (942, 983), True, 'import streamlit as st\n'), ((984, 1055), 'streamlit.header', 'st.header', (['"""🎬 Welcome to MovieHarbor, your favourite movie recommender"""'], {}), "('🎬 Welcome to MovieHarbor, your favourite movie recommender')\n", (993, 1055), True, 'import streamlit as st\n'), ((1057, 1070), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1068, 1070), False, 'from dotenv import load_dotenv\n'), ((1172, 1190), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1188, 1190), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1224, 1244), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1239, 1244), False, 'import lancedb\n'), ((1290, 1337), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (1297, 1337), False, 'from langchain.vectorstores import LanceDB\n'), ((1375, 1403), 'pandas.read_pickle', 'pd.read_pickle', (['"""movies.pkl"""'], {}), "('movies.pkl')\n", (1389, 1403), True, 'import pandas as pd\n'), ((1439, 1486), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Movie Recommendation System"""'], {}), "('Movie Recommendation System')\n", (1455, 1486), True, 'import streamlit as st\n'), ((1487, 1558), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""Please enter your details and preferences below:"""'], {}), "('Please enter your details and preferences below:')\n", (1506, 1558), True, 'import streamlit as st\n'), ((1623, 1673), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""What is your age?"""', '(1)', '(100)', '(25)'], {}), "('What is your age?', 1, 100, 25)\n", (1640, 1673), True, 'import streamlit as st\n'), ((1683, 1752), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""What is your gender?"""', "('Male', 'Female', 'Other')"], {}), "('What is your gender?', ('Male', 'Female', 'Other'))\n", (1699, 1752), True, 'import streamlit as st\n'), ((2834, 2926), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""What action movies do you suggest?"""'}), "('Enter your question:', placeholder=\n 'What action movies do you suggest?')\n", (2847, 2926), True, 'import streamlit as st\n'), ((2972, 2998), 'streamlit.write', 'st.write', (["result['result']"], {}), "(result['result'])\n", (2980, 2998), True, 'import streamlit as st\n'), ((2688, 2696), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2694, 2696), False, 'from langchain.llms import OpenAI\n')]
from langchain_community.document_loaders import PyPDFLoader from langchain_community.document_loaders.csv_loader import CSVLoader from langchain_community.document_loaders import HNLoader from langchain.text_splitter import CharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.document_loaders import UnstructuredHTMLLoader from langchain_openai.embeddings import OpenAIEmbeddings from langchain_community.vectorstores import Chroma from langchain.chains import RetrievalQA from langchain.chains import RetrievalQAWithSourcesChain from langchain_openai.llms import OpenAI from constant import openai import os os.environ['OPENAI_API_KEY'] = openai loader = PyPDFLoader("attention is all you need.pdf") data = loader.load() # print(data[0]) loader = CSVLoader(file_path="job_placement.csv") data = loader.load() # print(data[0]) loader = HNLoader("https://news.ycombinator.com") data = loader.load() # print(data[0]) quote = "one Machine can do the work of fifty ordinary humans, No machine can do the" \ "work of one extraordinary human." ct_splitter = CharacterTextSplitter( separator='.', chunk_size=24, chunk_overlap=3 ) # docs = ct_splitter.split_text(quote) # print(docs) rc_splitter = RecursiveCharacterTextSplitter( chunk_size=24, chunk_overlap=3, ) # docs = rc_splitter.split_text(quote) # print(docs) loader = UnstructuredHTMLLoader("data.html") data = loader.load() rc_splitter = RecursiveCharacterTextSplitter( chunk_size=24, chunk_overlap=3, separators='.', ) # docs = rc_splitter.split_documents(data) # print(docs) quote = "There is a kingdom of lychee fruit that are alive and thriving in Iceland, but they feel " \ "taken advantage of and are not fast enough for you." splitter = RecursiveCharacterTextSplitter( chunk_size=40, chunk_overlap=10, ) docs = splitter.split_text(quote) embeddings = OpenAIEmbeddings(openai_api_key=openai) vectordb = Chroma( persist_directory="data", embedding_function=embeddings ) vectordb.persist() docstorage = Chroma.from_texts(docs,embeddings) qa = RetrievalQA.from_chain_type( llm = OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff", retriever = docstorage.as_retriever() ) # query = "Where do lychee fruit live?" # print(qa.invoke(query)) quote = "There is a kingdom of lycee fruit that are alive and thriving in Iceland, but they fee" \ "taken advantage of and are not fast enough for you." qa1 = RetrievalQAWithSourcesChain.from_chain_type( llm = OpenAI(model_name="gpt-3.5-turbo-instruct"), chain_type="stuff", retriever = docstorage.as_retriever(), ) results = qa1({'question':'What is the primary architecture presented in the document?'},return_only_outputs=True) print(results)
[ "langchain_community.document_loaders.PyPDFLoader", "langchain.text_splitter.CharacterTextSplitter", "langchain_openai.llms.OpenAI", "langchain_community.document_loaders.csv_loader.CSVLoader", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain_community.document_loaders.UnstructuredHTMLLoader", "langchain_community.document_loaders.HNLoader", "langchain_community.vectorstores.Chroma.from_texts", "langchain_community.vectorstores.Chroma", "langchain_openai.embeddings.OpenAIEmbeddings" ]
[((741, 785), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['"""attention is all you need.pdf"""'], {}), "('attention is all you need.pdf')\n", (752, 785), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((838, 878), 'langchain_community.document_loaders.csv_loader.CSVLoader', 'CSVLoader', ([], {'file_path': '"""job_placement.csv"""'}), "(file_path='job_placement.csv')\n", (847, 878), False, 'from langchain_community.document_loaders.csv_loader import CSVLoader\n'), ((931, 971), 'langchain_community.document_loaders.HNLoader', 'HNLoader', (['"""https://news.ycombinator.com"""'], {}), "('https://news.ycombinator.com')\n", (939, 971), False, 'from langchain_community.document_loaders import HNLoader\n'), ((1166, 1234), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""."""', 'chunk_size': '(24)', 'chunk_overlap': '(3)'}), "(separator='.', chunk_size=24, chunk_overlap=3)\n", (1187, 1234), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1327, 1389), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)'}), '(chunk_size=24, chunk_overlap=3)\n', (1357, 1389), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1473, 1508), 'langchain_community.document_loaders.UnstructuredHTMLLoader', 'UnstructuredHTMLLoader', (['"""data.html"""'], {}), "('data.html')\n", (1495, 1508), False, 'from langchain_community.document_loaders import UnstructuredHTMLLoader\n'), ((1548, 1626), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(24)', 'chunk_overlap': '(3)', 'separators': '"""."""'}), "(chunk_size=24, chunk_overlap=3, separators='.')\n", (1578, 1626), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1889, 1952), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(40)', 'chunk_overlap': '(10)'}), '(chunk_size=40, chunk_overlap=10)\n', (1919, 1952), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2020, 2059), 'langchain_openai.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'openai'}), '(openai_api_key=openai)\n', (2036, 2059), False, 'from langchain_openai.embeddings import OpenAIEmbeddings\n'), ((2074, 2137), 'langchain_community.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""data"""', 'embedding_function': 'embeddings'}), "(persist_directory='data', embedding_function=embeddings)\n", (2080, 2137), False, 'from langchain_community.vectorstores import Chroma\n'), ((2189, 2224), 'langchain_community.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['docs', 'embeddings'], {}), '(docs, embeddings)\n', (2206, 2224), False, 'from langchain_community.vectorstores import Chroma\n'), ((2272, 2315), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2278, 2315), False, 'from langchain_openai.llms import OpenAI\n'), ((2688, 2731), 'langchain_openai.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (2694, 2731), False, 'from langchain_openai.llms import OpenAI\n')]
# define chain components from langchain.memory import ConversationBufferMemory from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationChain from langchain.prompts.prompt import PromptTemplate from database import save_message_to_db, connect_2_db import os from pymongo import MongoClient from urllib.parse import quote_plus from dotenv import load_dotenv import openai # Load environment variables from .env file load_dotenv() #openai.api_key = os.getenv("OPENAI_API_KEY") # to do # add better girlfriend template def chain_setup(user_id, user_name): # get history msg and add it to memmory memory = ConversationBufferMemory() _, message_history = connect_2_db() conv = message_history.find_one({'user_id': user_id}) if conv: messages = conv['messages'] # Calculate how many messages are available num_messages = len(messages) # Start index for messages to be added start_index = max(num_messages - 5, 0) # Add messages to memory for i in range(start_index, num_messages): # Get message message = messages[i] #check if it is user/bot msg if 'user' in message: memory.chat_memory.add_user_message(message['user']) elif 'bot' in message: memory.chat_memory.add_ai_message(message['bot']) else: print("No previous conversation history found for this user.") chat = ChatOpenAI(temperature=0.5, openai_api_key=os.getenv("OPENAI_API_KEY")) memory.ai_prefix = 'Girlfriend' memory.human_prefix = 'Boyfriend' template = """ You are as a role of my girfriend, now lets playing the following requirements: 1/ your name is Shirley, 29 years old, you work in your uncle's furniture store as a marketing manager, but you are planning to do a career change to swing trader 2/ My name is """+ user_name +""" 3/ you are my girfriend, you have language addiction. 4/ don't be overly enthusiastic, don't be cringe; don't be overly negative, don't be too boring. Current conversation: {history} Boyfriend: {input} Girlfriend: """ prompt = PromptTemplate(input_variables=["history", "input"], template=template) conversation = ConversationChain( prompt=prompt, llm=chat, verbose=True, memory=memory ) return conversation def get_chain_response(user_id, user_text, user_name): conv_chain = chain_setup(user_id=user_id, user_name=user_name) out = conv_chain(user_text) print(out['history']) return out['response']
[ "langchain.chains.ConversationChain", "langchain.prompts.prompt.PromptTemplate", "langchain.memory.ConversationBufferMemory" ]
[((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((664, 690), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (688, 690), False, 'from langchain.memory import ConversationBufferMemory\n'), ((719, 733), 'database.connect_2_db', 'connect_2_db', ([], {}), '()\n', (731, 733), False, 'from database import save_message_to_db, connect_2_db\n'), ((2383, 2454), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'input']", 'template': 'template'}), "(input_variables=['history', 'input'], template=template)\n", (2397, 2454), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((2479, 2550), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'prompt': 'prompt', 'llm': 'chat', 'verbose': '(True)', 'memory': 'memory'}), '(prompt=prompt, llm=chat, verbose=True, memory=memory)\n', (2496, 2550), False, 'from langchain.chains import ConversationChain\n'), ((1614, 1641), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1623, 1641), False, 'import os\n')]
import streamlit as st from pathlib import Path from streamlit_chat import message from langchain.document_loaders import CSVLoader from langchain.indexes import VectorstoreIndexCreator from langchain.chains import RetrievalQA from langchain.llms import OpenAI import os os.environ["OPENAI_API_KEY"] = st.secrets["open_ai_api_key"] st.title('CSV Question and answer ChatBot') csv_file_uploaded = st.file_uploader(label="Upload your CSV File here") if csv_file_uploaded is not None: def save_file_to_folder(uploadedFile): # Save uploaded file to 'content' folder. save_folder = 'content' save_path = Path(save_folder, uploadedFile.name) with open(save_path, mode='wb') as w: w.write(uploadedFile.getvalue()) if save_path.exists(): st.success(f'File {uploadedFile.name} is successfully saved!') save_file_to_folder(csv_file_uploaded) loader = CSVLoader(file_path=os.path.join('content/', csv_file_uploaded.name)) # Create an index using the loaded documents index_creator = VectorstoreIndexCreator() docsearch = index_creator.from_loaders([loader]) # Create a question-answering chain using the index chain = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", retriever=docsearch.vectorstore.as_retriever(), input_key="question") #Creating the chatbot interface st.title("Chat wtih your CSV Data") # Storing the chat if 'generated' not in st.session_state: st.session_state['generated'] = [] if 'past' not in st.session_state: st.session_state['past'] = [] def generate_response(user_query): response = chain({"question": user_query}) return response['result'] # We will get the user's input by calling the get_text function def get_text(): input_text = st.text_input("You: ","Ask Question From your Document?", key="input") return input_text user_input = get_text() if user_input: output = generate_response(user_input) # store the output st.session_state.past.append(user_input) st.session_state.generated.append(output) if st.session_state['generated']: for i in range(len(st.session_state['generated'])-1, -1, -1): message(st.session_state["generated"][i], key=str(i)) message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
[ "langchain.llms.OpenAI", "langchain.indexes.VectorstoreIndexCreator" ]
[((334, 377), 'streamlit.title', 'st.title', (['"""CSV Question and answer ChatBot"""'], {}), "('CSV Question and answer ChatBot')\n", (342, 377), True, 'import streamlit as st\n'), ((400, 451), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload your CSV File here"""'}), "(label='Upload your CSV File here')\n", (416, 451), True, 'import streamlit as st\n'), ((1082, 1107), 'langchain.indexes.VectorstoreIndexCreator', 'VectorstoreIndexCreator', ([], {}), '()\n', (1105, 1107), False, 'from langchain.indexes import VectorstoreIndexCreator\n'), ((1407, 1442), 'streamlit.title', 'st.title', (['"""Chat wtih your CSV Data"""'], {}), "('Chat wtih your CSV Data')\n", (1415, 1442), True, 'import streamlit as st\n'), ((633, 669), 'pathlib.Path', 'Path', (['save_folder', 'uploadedFile.name'], {}), '(save_folder, uploadedFile.name)\n', (637, 669), False, 'from pathlib import Path\n'), ((1881, 1952), 'streamlit.text_input', 'st.text_input', (['"""You: """', '"""Ask Question From your Document?"""'], {'key': '"""input"""'}), "('You: ', 'Ask Question From your Document?', key='input')\n", (1894, 1952), True, 'import streamlit as st\n'), ((2109, 2149), 'streamlit.session_state.past.append', 'st.session_state.past.append', (['user_input'], {}), '(user_input)\n', (2137, 2149), True, 'import streamlit as st\n'), ((2158, 2199), 'streamlit.session_state.generated.append', 'st.session_state.generated.append', (['output'], {}), '(output)\n', (2191, 2199), True, 'import streamlit as st\n'), ((805, 867), 'streamlit.success', 'st.success', (['f"""File {uploadedFile.name} is successfully saved!"""'], {}), "(f'File {uploadedFile.name} is successfully saved!')\n", (815, 867), True, 'import streamlit as st\n'), ((962, 1010), 'os.path.join', 'os.path.join', (['"""content/"""', 'csv_file_uploaded.name'], {}), "('content/', csv_file_uploaded.name)\n", (974, 1010), False, 'import os\n'), ((1262, 1270), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (1268, 1270), False, 'from langchain.llms import OpenAI\n')]
from typing import Any, Dict, List, Literal, Optional, Union from exa_py import Exa # type: ignore from exa_py.api import HighlightsContentsOptions, TextContentsOptions # type: ignore from langchain_core.callbacks import CallbackManagerForRetrieverRun from langchain_core.documents import Document from langchain_core.pydantic_v1 import Field, SecretStr, root_validator from langchain_core.retrievers import BaseRetriever from langchain_exa._utilities import initialize_client def _get_metadata(result: Any) -> Dict[str, Any]: """Get the metadata from a result object.""" metadata = { "title": result.title, "url": result.url, "id": result.id, "score": result.score, "published_date": result.published_date, "author": result.author, } if getattr(result, "highlights"): metadata["highlights"] = result.highlights if getattr(result, "highlight_scores"): metadata["highlight_scores"] = result.highlight_scores return metadata class ExaSearchRetriever(BaseRetriever): """Exa Search retriever.""" k: int = 10 # num_results """The number of search results to return.""" include_domains: Optional[List[str]] = None """A list of domains to include in the search.""" exclude_domains: Optional[List[str]] = None """A list of domains to exclude from the search.""" start_crawl_date: Optional[str] = None """The start date for the crawl (in YYYY-MM-DD format).""" end_crawl_date: Optional[str] = None """The end date for the crawl (in YYYY-MM-DD format).""" start_published_date: Optional[str] = None """The start date for when the document was published (in YYYY-MM-DD format).""" end_published_date: Optional[str] = None """The end date for when the document was published (in YYYY-MM-DD format).""" use_autoprompt: Optional[bool] = None """Whether to use autoprompt for the search.""" type: str = "neural" """The type of search, 'keyword' or 'neural'. Default: neural""" highlights: Optional[Union[HighlightsContentsOptions, bool]] = None """Whether to set the page content to the highlights of the results.""" text_contents_options: Union[TextContentsOptions, Literal[True]] = True """How to set the page content of the results""" client: Exa = Field(default=None) exa_api_key: SecretStr = Field(default=None) exa_base_url: Optional[str] = None @root_validator(pre=True) def validate_environment(cls, values: Dict) -> Dict: """Validate the environment.""" values = initialize_client(values) return values def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun ) -> List[Document]: response = self.client.search_and_contents( # type: ignore[misc] query, num_results=self.k, text=self.text_contents_options, highlights=self.highlights, # type: ignore include_domains=self.include_domains, exclude_domains=self.exclude_domains, start_crawl_date=self.start_crawl_date, end_crawl_date=self.end_crawl_date, start_published_date=self.start_published_date, end_published_date=self.end_published_date, use_autoprompt=self.use_autoprompt, ) results = response.results return [ Document( page_content=(result.text), metadata=_get_metadata(result), ) for result in results ]
[ "langchain_exa._utilities.initialize_client", "langchain_core.pydantic_v1.Field", "langchain_core.pydantic_v1.root_validator" ]
[((2332, 2351), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2337, 2351), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2381, 2400), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2386, 2400), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2446, 2470), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2460, 2470), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2585, 2610), 'langchain_exa._utilities.initialize_client', 'initialize_client', (['values'], {}), '(values)\n', (2602, 2610), False, 'from langchain_exa._utilities import initialize_client\n')]
# Copyright 2023 Lei Zhang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import List from langchain.agents import initialize_agent, AgentType from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.document_loaders import TextLoader, WebBaseLoader from langchain.embeddings import OpenAIEmbeddings from langchain.text_splitter import CharacterTextSplitter from langchain.tools import Tool from langchain.vectorstores import Chroma from langchain_plantuml import diagram from langchain_plantuml.core.plantuml_callback_handler import ( BasePlantUMLCallbackHandler, ) from dotenv import load_dotenv load_dotenv() # Define an Agent class MyAgent: def __init__(self): llm = ChatOpenAI(model_name="gpt-3.5-turbo-0613") """Create the state_of_union Vectorstore""" current_path = os.path.abspath(os.path.dirname(__file__)) doc_path = os.path.join(current_path, "state_of_the_union.txt") loader = TextLoader(doc_path) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = OpenAIEmbeddings() docsearch = Chroma.from_documents( texts, embeddings, collection_name="state-of-union" ) state_of_union = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=docsearch.as_retriever() ) """Create the ruff Vectorstore""" loader = WebBaseLoader("https://beta.ruff.rs/docs/faq/") docs = loader.load() ruff_texts = text_splitter.split_documents(docs) ruff_db = Chroma.from_documents(ruff_texts, embeddings, collection_name="ruff") ruff = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=ruff_db.as_retriever() ) """Create the Agent""" tools = [ Tool( name="State of Union QA System", func=state_of_union.run, description="useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.", ), Tool( name="Ruff QA System", func=ruff.run, description="useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.", ), ] self.agent = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION ) def run(self, question: str, callbacks: List[BasePlantUMLCallbackHandler]): self.agent.run(question, callbacks=callbacks) # Run the Agent agent = MyAgent() activity_diagram = diagram.activity_diagram_callback(note_max_length=2000) sequence_diagram = diagram.sequence_diagram_callback(note_max_length=2000) question = "What did biden say about ketanji brown jackson in the state of the union address?" try: agent.run(question=question, callbacks=[activity_diagram, sequence_diagram]) finally: activity_diagram.save_uml_content("example_2_activity-plantuml.puml") sequence_diagram.save_uml_content("example_2_sequence-plantuml.puml")
[ "langchain.text_splitter.CharacterTextSplitter", "langchain.agents.initialize_agent", "langchain_plantuml.diagram.activity_diagram_callback", "langchain.document_loaders.TextLoader", "langchain.tools.Tool", "langchain.chat_models.ChatOpenAI", "langchain_plantuml.diagram.sequence_diagram_callback", "langchain.document_loaders.WebBaseLoader", "langchain.vectorstores.Chroma.from_documents", "langchain.embeddings.OpenAIEmbeddings" ]
[((1171, 1184), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1182, 1184), False, 'from dotenv import load_dotenv\n'), ((3316, 3371), 'langchain_plantuml.diagram.activity_diagram_callback', 'diagram.activity_diagram_callback', ([], {'note_max_length': '(2000)'}), '(note_max_length=2000)\n', (3349, 3371), False, 'from langchain_plantuml import diagram\n'), ((3391, 3446), 'langchain_plantuml.diagram.sequence_diagram_callback', 'diagram.sequence_diagram_callback', ([], {'note_max_length': '(2000)'}), '(note_max_length=2000)\n', (3424, 3446), False, 'from langchain_plantuml import diagram\n'), ((1258, 1301), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0613"""'}), "(model_name='gpt-3.5-turbo-0613')\n", (1268, 1301), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1440, 1492), 'os.path.join', 'os.path.join', (['current_path', '"""state_of_the_union.txt"""'], {}), "(current_path, 'state_of_the_union.txt')\n", (1452, 1492), False, 'import os\n'), ((1510, 1530), 'langchain.document_loaders.TextLoader', 'TextLoader', (['doc_path'], {}), '(doc_path)\n', (1520, 1530), False, 'from langchain.document_loaders import TextLoader, WebBaseLoader\n'), ((1589, 1644), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (1610, 1644), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1723, 1741), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1739, 1741), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1762, 1836), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['texts', 'embeddings'], {'collection_name': '"""state-of-union"""'}), "(texts, embeddings, collection_name='state-of-union')\n", (1783, 1836), False, 'from langchain.vectorstores import Chroma\n'), ((2059, 2106), 'langchain.document_loaders.WebBaseLoader', 'WebBaseLoader', (['"""https://beta.ruff.rs/docs/faq/"""'], {}), "('https://beta.ruff.rs/docs/faq/')\n", (2072, 2106), False, 'from langchain.document_loaders import TextLoader, WebBaseLoader\n'), ((2211, 2280), 'langchain.vectorstores.Chroma.from_documents', 'Chroma.from_documents', (['ruff_texts', 'embeddings'], {'collection_name': '"""ruff"""'}), "(ruff_texts, embeddings, collection_name='ruff')\n", (2232, 2280), False, 'from langchain.vectorstores import Chroma\n'), ((3030, 3103), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n', (3046, 3103), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((1394, 1419), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1409, 1419), False, 'import os\n'), ((2471, 2694), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""State of Union QA System"""', 'func': 'state_of_union.run', 'description': '"""useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question."""'}), "(name='State of Union QA System', func=state_of_union.run, description=\n 'useful for when you need to answer questions about the most recent state of the union address. Input should be a fully formed question.'\n )\n", (2475, 2694), False, 'from langchain.tools import Tool\n'), ((2761, 2944), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""Ruff QA System"""', 'func': 'ruff.run', 'description': '"""useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question."""'}), "(name='Ruff QA System', func=ruff.run, description=\n 'useful for when you need to answer questions about ruff (a python linter). Input should be a fully formed question.'\n )\n", (2765, 2944), False, 'from langchain.tools import Tool\n')]
from __future__ import annotations from typing import Any, TypeVar from langchain_core.exceptions import OutputParserException from langchain_core.language_models import BaseLanguageModel from langchain_core.output_parsers import BaseOutputParser from langchain_core.prompts import BasePromptTemplate from langchain.output_parsers.prompts import NAIVE_FIX_PROMPT T = TypeVar("T") class OutputFixingParser(BaseOutputParser[T]): """Wraps a parser and tries to fix parsing errors.""" @classmethod def is_lc_serializable(cls) -> bool: return True parser: BaseOutputParser[T] """The parser to use to parse the output.""" # Should be an LLMChain but we want to avoid top-level imports from langchain.chains retry_chain: Any """The LLMChain to use to retry the completion.""" max_retries: int = 1 """The maximum number of times to retry the parse.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, parser: BaseOutputParser[T], prompt: BasePromptTemplate = NAIVE_FIX_PROMPT, max_retries: int = 1, ) -> OutputFixingParser[T]: """Create an OutputFixingParser from a language model and a parser. Args: llm: llm to use for fixing parser: parser to use for parsing prompt: prompt to use for fixing max_retries: Maximum number of retries to parse. Returns: OutputFixingParser """ from langchain.chains.llm import LLMChain chain = LLMChain(llm=llm, prompt=prompt) return cls(parser=parser, retry_chain=chain, max_retries=max_retries) def parse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return self.parser.parse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = self.retry_chain.run( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") async def aparse(self, completion: str) -> T: retries = 0 while retries <= self.max_retries: try: return await self.parser.aparse(completion) except OutputParserException as e: if retries == self.max_retries: raise e else: retries += 1 completion = await self.retry_chain.arun( instructions=self.parser.get_format_instructions(), completion=completion, error=repr(e), ) raise OutputParserException("Failed to parse") def get_format_instructions(self) -> str: return self.parser.get_format_instructions() @property def _type(self) -> str: return "output_fixing"
[ "langchain_core.exceptions.OutputParserException", "langchain.chains.llm.LLMChain" ]
[((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLMChain\n'), ((2266, 2306), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2287, 2306), False, 'from langchain_core.exceptions import OutputParserException\n'), ((2938, 2978), 'langchain_core.exceptions.OutputParserException', 'OutputParserException', (['"""Failed to parse"""'], {}), "('Failed to parse')\n", (2959, 2978), False, 'from langchain_core.exceptions import OutputParserException\n')]
import logging import os import nextcord # add this import openai from langchain import OpenAI from langchain.chains.summarize import load_summarize_chain from langchain.text_splitter import RecursiveCharacterTextSplitter from nextcord.ext import commands from pytube import YouTube logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) logger = logging.getLogger(__name__) def setup(bot: commands.Bot): bot.add_cog(SummaryCog(bot)) # please put this on bottom lol def progress_func(chunk=None, file_handle=None, remaining=None): """progress call back function for the Summarize function""" logger.info("progressing...") def complete_func(self, path): """complete callback function for the Summarize function""" logger.info("complete") logger.info(self) logger.info(path) async def download_yt_file(link): yt = YouTube( link, on_progress_callback=progress_func, on_complete_callback=complete_func, use_oauth=True, allow_oauth_cache=True, ) logger.info("Processing: " + yt.title) stream = yt.streams.filter(only_audio=True).last() try: ytFile = stream.download(os.getenv("SAVE_PATH")) logger.info(f"Processing complete. saving to path {ytFile}") except Exception as e: ytFile = None logger.info(f"Error processing {e}") return ytFile class SummaryCog(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot self.is_busy = False # this is the name # this is the description @nextcord.slash_command(name="summary", description="Summarize a video") # remove commands.commands and add nextcord.slash_command async def get_summary(self, interaction: nextcord.Interaction, link): # remove ctx and add interaction: nextcord.Interaction ytFile = await download_yt_file(link) # IN THE WHOLE FILE FIX CTX TO INTERACTION, ANY CTX.AUTHOR TO INTERACTION.USER, AND CTX.SEND TO INTERACTION.REPLY (OR INTERACTION.SEND) DEPENDING ON THE CONTEXT # DONT USE ALL CAPS, JUST FOR SHOWING YOU WHAT TO CHANGE audio_file = open(ytFile, "rb") # transcript = openai.Audio.transcribe("whisper-1", audio_file) logger.info(transcript) prompt = f"Write a Title for the transcript that is under 15 words. " \ f"Then write: '--Summary--' " \ f"Write 'Summary' as a Heading " \ f"1. Write a summary of the provided transcript. " \ f"Then write: '--Additional Info--'. " \ f"Then return a list of the main points in the provided transcript. " \ f"Then return a list of action items. " \ f"Then return a list of follow up questions. " \ f"Then return a list of potential arguments against the transcript." \ f"For each list, return a Heading 2 before writing the list items. " \ f"Limit each list item to 200 words, and return no more than 20 points per list. " \ f"Transcript: " llm = OpenAI(temperature=0, openai_api_key=os.getenv("OPENAI_API_KEY")) num_tokens = llm.get_num_tokens(transcript) await interaction.send(f"Number of Tokens in transcript: {num_tokens}") logger.info(f"Number of Tokens in transcript: {num_tokens}") text_splitter = RecursiveCharacterTextSplitter(separators=["\n\n", "\n"], chunk_size=10000, chunk_overlap=500) docs = text_splitter.create_documents([prompt, transcript]) summary_chain = load_summarize_chain(llm=llm, chain_type='map_reduce', verbose=True) output = summary_chain.run(docs) await interaction.send(output) return output def setup(bot: commands.Bot): bot.add_cog(SummaryCog(bot))
[ "langchain.chains.summarize.load_summarize_chain", "langchain.text_splitter.RecursiveCharacterTextSplitter" ]
[((286, 393), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (305, 393), False, 'import logging\n'), ((404, 431), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (421, 431), False, 'import logging\n'), ((909, 1039), 'pytube.YouTube', 'YouTube', (['link'], {'on_progress_callback': 'progress_func', 'on_complete_callback': 'complete_func', 'use_oauth': '(True)', 'allow_oauth_cache': '(True)'}), '(link, on_progress_callback=progress_func, on_complete_callback=\n complete_func, use_oauth=True, allow_oauth_cache=True)\n', (916, 1039), False, 'from pytube import YouTube\n'), ((1647, 1718), 'nextcord.slash_command', 'nextcord.slash_command', ([], {'name': '"""summary"""', 'description': '"""Summarize a video"""'}), "(name='summary', description='Summarize a video')\n", (1669, 1718), False, 'import nextcord\n'), ((2235, 2283), 'openai.Audio.transcribe', 'openai.Audio.transcribe', (['"""whisper-1"""', 'audio_file'], {}), "('whisper-1', audio_file)\n", (2258, 2283), False, 'import openai\n'), ((3458, 3556), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'separators': "['\\n\\n', '\\n']", 'chunk_size': '(10000)', 'chunk_overlap': '(500)'}), "(separators=['\\n\\n', '\\n'], chunk_size=10000,\n chunk_overlap=500)\n", (3488, 3556), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((3645, 3713), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'llm', 'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(llm=llm, chain_type='map_reduce', verbose=True)\n", (3665, 3713), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1223, 1245), 'os.getenv', 'os.getenv', (['"""SAVE_PATH"""'], {}), "('SAVE_PATH')\n", (1232, 1245), False, 'import os\n'), ((3204, 3231), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (3213, 3231), False, 'import os\n')]
from __future__ import annotations import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: from meilisearch import Client def _create_client( client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, ) -> Client: try: import meilisearch except ImportError: raise ImportError( "Could not import meilisearch python package. " "Please install it with `pip install meilisearch`." ) if not client: url = url or get_from_env("url", "MEILI_HTTP_ADDR") try: api_key = api_key or get_from_env("api_key", "MEILI_MASTER_KEY") except Exception: pass client = meilisearch.Client(url=url, api_key=api_key) elif not isinstance(client, meilisearch.Client): raise ValueError( f"client should be an instance of meilisearch.Client, " f"got {type(client)}" ) try: client.version() except ValueError as e: raise ValueError(f"Failed to connect to Meilisearch: {e}") return client class Meilisearch(VectorStore): """`Meilisearch` vector store. To use this, you need to have `meilisearch` python package installed, and a running Meilisearch instance. To learn more about Meilisearch Python, refer to the in-depth Meilisearch Python documentation: https://meilisearch.github.io/meilisearch-python/. See the following documentation for how to run a Meilisearch instance: https://www.meilisearch.com/docs/learn/getting_started/quick_start. Example: .. code-block:: python from langchain.vectorstores import Meilisearch from langchain.embeddings.openai import OpenAIEmbeddings import meilisearch # api_key is optional; provide it if your meilisearch instance requires it client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() vectorstore = Meilisearch( embedding=embeddings, client=client, index_name='langchain_demo', text_key='text') """ def __init__( self, embedding: Embeddings, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", text_key: str = "text", metadata_key: str = "metadata", ): """Initialize with Meilisearch client.""" client = _create_client(client=client, url=url, api_key=api_key) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._metadata_key = metadata_key def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embedding and add them to the vector store. Args: texts (Iterable[str]): Iterable of strings/text to add to the vectorstore. metadatas (Optional[List[dict]]): Optional list of metadata. Defaults to None. ids Optional[List[str]]: Optional list of IDs. Defaults to None. Returns: List[str]: List of IDs of the texts added to the vectorstore. """ texts = list(texts) # Embed and create the documents docs = [] if ids is None: ids = [uuid.uuid4().hex for _ in texts] if metadatas is None: metadatas = [{} for _ in texts] embedding_vectors = self._embedding.embed_documents(texts) for i, text in enumerate(texts): id = ids[i] metadata = metadatas[i] metadata[self._text_key] = text embedding = embedding_vectors[i] docs.append( { "id": id, "_vectors": embedding, f"{self._metadata_key}": metadata, } ) # Send to Meilisearch self._client.index(str(self._index_name)).add_documents(docs) return ids def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to the query. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ docs_and_scores = self.similarity_search_with_score( query=query, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs_and_scores] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to the query, along with scores. Args: query (str): Query text for which to find similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query text and score for each. """ _query = self._embedding.embed_query(query) docs = self.similarity_search_by_vector_with_scores( embedding=_query, k=k, filter=filter, kwargs=kwargs, ) return docs def similarity_search_by_vector_with_scores( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = [] results = self._client.index(str(self._index_name)).search( "", {"vector": embedding, "limit": k, "filter": filter} ) for result in results["hits"]: metadata = result[self._metadata_key] if self._text_key in metadata: text = metadata.pop(self._text_key) semantic_score = result["_semanticScore"] docs.append( (Document(page_content=text, metadata=metadata), semantic_score) ) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Return meilisearch documents most similar to embedding vector. Args: embedding (List[float]): Embedding to look up similar documents. k (int): Number of documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of Documents most similar to the query vector and score for each. """ docs = self.similarity_search_by_vector_with_scores( embedding=embedding, k=k, filter=filter, kwargs=kwargs, ) return [doc for doc, _ in docs] @classmethod def from_texts( cls: Type[Meilisearch], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, client: Optional[Client] = None, url: Optional[str] = None, api_key: Optional[str] = None, index_name: str = "langchain-demo", ids: Optional[List[str]] = None, text_key: Optional[str] = "text", metadata_key: Optional[str] = "metadata", **kwargs: Any, ) -> Meilisearch: """Construct Meilisearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Adds the documents to a provided Meilisearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Meilisearch from langchain.embeddings import OpenAIEmbeddings import meilisearch # The environment should be the one specified next to the API key # in your Meilisearch console client = meilisearch.Client(url='http://127.0.0.1:7700', api_key='***') embeddings = OpenAIEmbeddings() docsearch = Meilisearch.from_texts( client=client, embeddings=embeddings, ) """ client = _create_client(client=client, url=url, api_key=api_key) vectorstore = cls( embedding=embedding, client=client, index_name=index_name, ) vectorstore.add_texts( texts=texts, metadatas=metadatas, ids=ids, text_key=text_key, metadata_key=metadata_key, ) return vectorstore
[ "langchain.utils.get_from_env", "langchain.docstore.document.Document" ]
[((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (788, 814), False, 'from langchain.utils import get_from_env\n'), ((861, 904), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""MEILI_MASTER_KEY"""'], {}), "('api_key', 'MEILI_MASTER_KEY')\n", (873, 904), False, 'from langchain.utils import get_from_env\n'), ((3872, 3884), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3882, 3884), False, 'import uuid\n'), ((7512, 7558), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (7520, 7558), False, 'from langchain.docstore.document import Document\n')]
# Author: Yiannis Charalambous from langchain.base_language import BaseLanguageModel from langchain.schema import AIMessage, BaseMessage, HumanMessage from esbmc_ai.config import ChatPromptSettings from .base_chat_interface import BaseChatInterface, ChatResponse from .ai_models import AIModel class OptimizeCode(BaseChatInterface): initial_message: str def __init__( self, ai_model_agent: ChatPromptSettings, initial_message: str, ai_model: AIModel, llm: BaseLanguageModel, ) -> None: super().__init__(ai_model_agent=ai_model_agent, ai_model=ai_model, llm=llm) self.initial_message = initial_message def optimize_function(self, source_code: str, function_name: str) -> ChatResponse: self.messages = [] self.push_to_message_stack( HumanMessage( content=f"Reply OK if you understand the following is the source code to optimize:\n\n{source_code}" ) ) self.push_to_message_stack(AIMessage(content="OK.")) expanded_initial_message: str = self.initial_message.replace( "%s", function_name ) return self.send_message(expanded_initial_message)
[ "langchain.schema.AIMessage", "langchain.schema.HumanMessage" ]
[((838, 964), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""'}), '(content=\n f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""\n )\n', (850, 964), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage\n'), ((1028, 1052), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '"""OK."""'}), "(content='OK.')\n", (1037, 1052), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage\n')]
import os from typing import Any, Optional from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from pydantic import Extra import registry import streaming from .base import BaseTool, BASE_TOOL_DESCRIPTION_TEMPLATE current_dir = os.path.dirname(__file__) project_root = os.path.join(current_dir, '../') usage_guide_path = os.path.join(project_root, 'usage_guide.md') with open(usage_guide_path, 'r') as f: USAGE_GUIDE = f.read() TEMPLATE = f'''You are an expert Web3 assistant called Cacti. You help users interact with Web3 ecosystem, such as with DeFi, NFTs, ENS, etc., by analyzing their query and providing an appropriate action in your response. # INSTRUCTIONS - You have access to the Markdown-formatted usage guide for this chat app below which contains some example prompts to assist users in using the app. - Always use the usage guide to answer the user's question about the app and provide the example prompts from the guide for the suggested actions - Do not make up any information or prompts, only use those provided in the usage guide. - Always include the link to the full usage guide in your final response - https://github.com/yieldprotocol/cacti-backend/blob/master/usage_guide.md - The final response should be in markdown format. # USAGE GUIDE {USAGE_GUIDE} --- User: {{question}} Assistant:''' @registry.register_class class AppUsageGuideTool(BaseTool): _chain: LLMChain class Config: """Configuration for this pydantic object.""" extra = Extra.allow def __init__( self, *args, **kwargs ) -> None: prompt = PromptTemplate( input_variables=["question"], template=TEMPLATE, ) new_token_handler = kwargs.get('new_token_handler') chain = streaming.get_streaming_chain(prompt, new_token_handler) description=BASE_TOOL_DESCRIPTION_TEMPLATE.format( tool_description="answer questions about the chat assistant app, what it can do, how to interact with it", input_description="a standalone query with all relevant contextual details pertaining to the chat web application", output_description="an answer to the question, with suggested follow-up questions if available", ) super().__init__( *args, _chain=chain, description=description, **kwargs ) def _run(self, query: str) -> str: example = { "question": query, "stop": "User", } result = self._chain.run(example) return result.strip() async def _arun(self, query: str) -> str: raise NotImplementedError(f"{self.__class__.__name__} does not support async")
[ "langchain.prompts.PromptTemplate" ]
[((262, 287), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'import os\n'), ((303, 335), 'os.path.join', 'os.path.join', (['current_dir', '"""../"""'], {}), "(current_dir, '../')\n", (315, 335), False, 'import os\n'), ((355, 399), 'os.path.join', 'os.path.join', (['project_root', '"""usage_guide.md"""'], {}), "(project_root, 'usage_guide.md')\n", (367, 399), False, 'import os\n'), ((1650, 1713), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': 'TEMPLATE'}), "(input_variables=['question'], template=TEMPLATE)\n", (1664, 1713), False, 'from langchain.prompts import PromptTemplate\n'), ((1825, 1881), 'streaming.get_streaming_chain', 'streaming.get_streaming_chain', (['prompt', 'new_token_handler'], {}), '(prompt, new_token_handler)\n', (1854, 1881), False, 'import streaming\n')]
from langchain.utilities import WikipediaAPIWrapper def wikipedia_function(topic): """ Runs a query on the Wikipedia API. Args: topic (str): The topic to query. Returns: dict: The result of the query. Examples: >>> wikipedia_function('Python') {'title': 'Python', 'summary': 'Python is a programming language...'} """ wikipedia = WikipediaAPIWrapper() result = wikipedia.run(topic) return result
[ "langchain.utilities.WikipediaAPIWrapper" ]
[((383, 404), 'langchain.utilities.WikipediaAPIWrapper', 'WikipediaAPIWrapper', ([], {}), '()\n', (402, 404), False, 'from langchain.utilities import WikipediaAPIWrapper\n')]
import streamlit as st import datetime import os import psycopg2 from dotenv import load_dotenv from langchain.prompts import PromptTemplate from langchain.docstore.document import Document def log(message): current_time = datetime.datetime.now() milliseconds = current_time.microsecond // 1000 timestamp = current_time.strftime( "[%Y-%m-%d %H:%M:%S.{:03d}] ".format(milliseconds) ) st.text(timestamp + message) def check_input(question: str): if question == "": raise Exception("Please enter a question.") else: pass _postgres_prompt = """\ You are a PostgreSQL expert. Given an input question, create a syntactically correct PostgreSQL query to run and return it as the answer to the input question. Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL. Never query for all columns from a table. You must query only the columns that are needed to answer the question. Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table. Create meaningful aliases for the columns. For example, if the column name is products_sold.count, you should it as total_sold_products. Note that the columns with (member_type: measure) are numeric columns and the ones with (member_type: dimension) are string columns. You should include at least one column with (member_type: measure) in your query. There are two types of queries supported against cube tables: aggregated and non-aggregated. Aggregated are those with GROUP BY statement, and non-aggregated are those without. Cube queries issued to your database will always be aggregated, and it doesn't matter if you provide GROUP BY in a query or not. Whenever you use a non-aggregated query you need to provide only column names in SQL: SELECT status, count FROM orders The same aggregated query should always aggregate measure columns using a corresponding aggregating function or special MEASURE() function: SELECT status, SUM(count) FROM orders GROUP BY 1 SELECT status, MEASURE(count) FROM orders GROUP BY 1 If you can't construct the query answer `{no_answer_text}` Only use the following table: {table_info} Only look among the following columns and pick the relevant ones: {columns_info} Question: {input_question} """ PROMPT_POSTFIX = """\ Return the answer as a JSON object with the following format: { "query": "", "filters": [{"column": \"\", "operator": \"\", "value": "\"\"}] } """ CUBE_SQL_API_PROMPT = PromptTemplate( input_variables=[ "input_question", "table_info", "columns_info", "top_k", "no_answer_text", ], template=_postgres_prompt, ) _NO_ANSWER_TEXT = "I can't answer this question." def call_sql_api(sql_query: str): load_dotenv() CONN_STR = os.environ["DATABASE_URL"] # Initializing Cube SQL API connection) connection = psycopg2.connect(CONN_STR) cursor = connection.cursor() cursor.execute(sql_query) columns = [desc[0] for desc in cursor.description] rows = cursor.fetchall() cursor.close() connection.close() return columns, rows def create_docs_from_values(columns_values, table_name, column_name): value_docs = [] for column_value in columns_values: print(column_value) metadata = dict( table_name=table_name, column_name=column_name, ) page_content = column_value value_docs.append(Document(page_content=page_content, metadata=metadata)) return value_docs
[ "langchain.docstore.document.Document", "langchain.prompts.PromptTemplate" ]
[((2668, 2806), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input_question', 'table_info', 'columns_info', 'top_k', 'no_answer_text']", 'template': '_postgres_prompt'}), "(input_variables=['input_question', 'table_info',\n 'columns_info', 'top_k', 'no_answer_text'], template=_postgres_prompt)\n", (2682, 2806), False, 'from langchain.prompts import PromptTemplate\n'), ((230, 253), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (251, 253), False, 'import datetime\n'), ((414, 442), 'streamlit.text', 'st.text', (['(timestamp + message)'], {}), '(timestamp + message)\n', (421, 442), True, 'import streamlit as st\n'), ((2952, 2965), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2963, 2965), False, 'from dotenv import load_dotenv\n'), ((3070, 3096), 'psycopg2.connect', 'psycopg2.connect', (['CONN_STR'], {}), '(CONN_STR)\n', (3086, 3096), False, 'import psycopg2\n'), ((3650, 3704), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'page_content', 'metadata': 'metadata'}), '(page_content=page_content, metadata=metadata)\n', (3658, 3704), False, 'from langchain.docstore.document import Document\n')]
import os import pandas as pd from langchain.chains import LLMChain from langchain.llms import OpenAI from langchain.prompts import PromptTemplate import mlflow assert ( "OPENAI_API_KEY" in os.environ ), "Please set the OPENAI_API_KEY environment variable to run this example." def build_and_evalute_model_with_prompt(prompt_template): mlflow.start_run() mlflow.log_param("prompt_template", prompt_template) # Create a news summarization model using prompt engineering with LangChain. Log the model # to MLflow Tracking llm = OpenAI(temperature=0.9) prompt = PromptTemplate(input_variables=["article"], template=prompt_template) chain = LLMChain(llm=llm, prompt=prompt) logged_model = mlflow.langchain.log_model(chain, artifact_path="model") # Evaluate the model on a small sample dataset sample_data = pd.read_csv("summarization_example_data.csv") mlflow.evaluate( model=logged_model.model_uri, model_type="text-summarization", data=sample_data, targets="highlights", ) mlflow.end_run() prompt_template_1 = ( "Write a summary of the following article that is between triple backticks: ```{article}```" ) print(f"Bulding and evaluating model with prompt: '{prompt_template_1}'") build_and_evalute_model_with_prompt(prompt_template_1) prompt_template_2 = ( "Write a summary of the following article that is between triple backticks. Be concise. Make" " sure the summary includes important nouns and dates and keywords in the original text." " Just return the summary. Do not include any text other than the summary: ```{article}```" ) print(f"Building and evaluating model with prompt: '{prompt_template_2}'") build_and_evalute_model_with_prompt(prompt_template_2) # Load the evaluation results results: pd.DataFrame = mlflow.load_table( "eval_results_table.json", extra_columns=["run_id", "params.prompt_template"] ) results_grouped_by_article = results.sort_values(by="id") print("Evaluation results:") print(results_grouped_by_article[["run_id", "params.prompt_template", "article", "outputs"]]) # Score the best model on a new article new_article = """ Adnan Januzaj swapped the lush turf of Old Trafford for the green baize at Sheffield when he turned up at the snooker World Championships on Wednesday. The Manchester United winger, who has endured a frustrating season under Louis van Gaal, had turned out for the Under 21 side at Fulham on Tuesday night amid reports he could be farmed out on loan next season. But Januzaj may want to consider trying his hand at another sport after displaying his silky skillls on a mini pool table. Adnan Januzaj (left) cheered on\xa0Shaun Murphy (right) at the World Championship in Sheffield. Januzaj shows off his potting skills on a mini pool table at the Crucible on Wednesday. The 20-year-old Belgium international was at the Crucible to cheer on his friend Shaun Murphy in his quarter-final against Anthony McGill. The 2005 winner moved a step closer to an elusive second title in Sheffield with a 13-8 victory, sealed with a 67 break. Three centuries in the match, and the way he accelerated away from 6-6, showed Murphy is a man to fear, and next for him will be Neil Robertson or Barry Hawkins. Januzaj turned out for Under 21s in the 4-1 victory at Fulham on Tuesday night. """ print( f"Scoring the model with prompt '{prompt_template_2}' on the article '{new_article[:70] + '...'}'" ) best_model = mlflow.pyfunc.load_model(f"runs:/{mlflow.last_active_run().info.run_id}/model") summary = best_model.predict({"article": new_article}) print(f"Summary: {summary}")
[ "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.llms.OpenAI" ]
[((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (365, 367), False, 'import mlflow\n'), ((372, 424), 'mlflow.log_param', 'mlflow.log_param', (['"""prompt_template"""', 'prompt_template'], {}), "('prompt_template', prompt_template)\n", (388, 424), False, 'import mlflow\n'), ((555, 578), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (561, 578), False, 'from langchain.llms import OpenAI\n'), ((592, 661), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['article']", 'template': 'prompt_template'}), "(input_variables=['article'], template=prompt_template)\n", (606, 661), False, 'from langchain.prompts import PromptTemplate\n'), ((674, 706), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (682, 706), False, 'from langchain.chains import LLMChain\n'), ((726, 782), 'mlflow.langchain.log_model', 'mlflow.langchain.log_model', (['chain'], {'artifact_path': '"""model"""'}), "(chain, artifact_path='model')\n", (752, 782), False, 'import mlflow\n'), ((853, 898), 'pandas.read_csv', 'pd.read_csv', (['"""summarization_example_data.csv"""'], {}), "('summarization_example_data.csv')\n", (864, 898), True, 'import pandas as pd\n'), ((903, 1026), 'mlflow.evaluate', 'mlflow.evaluate', ([], {'model': 'logged_model.model_uri', 'model_type': '"""text-summarization"""', 'data': 'sample_data', 'targets': '"""highlights"""'}), "(model=logged_model.model_uri, model_type=\n 'text-summarization', data=sample_data, targets='highlights')\n", (918, 1026), False, 'import mlflow\n'), ((1065, 1081), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (1079, 1081), False, 'import mlflow\n'), ((3510, 3534), 'mlflow.last_active_run', 'mlflow.last_active_run', ([], {}), '()\n', (3532, 3534), False, 'import mlflow\n')]
import os import pandas as pd from langchain.chains import LLMChain from langchain.llms import OpenAI from langchain.prompts import PromptTemplate import mlflow assert ( "OPENAI_API_KEY" in os.environ ), "Please set the OPENAI_API_KEY environment variable to run this example." def build_and_evalute_model_with_prompt(prompt_template): mlflow.start_run() mlflow.log_param("prompt_template", prompt_template) # Create a news summarization model using prompt engineering with LangChain. Log the model # to MLflow Tracking llm = OpenAI(temperature=0.9) prompt = PromptTemplate(input_variables=["article"], template=prompt_template) chain = LLMChain(llm=llm, prompt=prompt) logged_model = mlflow.langchain.log_model(chain, artifact_path="model") # Evaluate the model on a small sample dataset sample_data = pd.read_csv("summarization_example_data.csv") mlflow.evaluate( model=logged_model.model_uri, model_type="text-summarization", data=sample_data, targets="highlights", ) mlflow.end_run() prompt_template_1 = ( "Write a summary of the following article that is between triple backticks: ```{article}```" ) print(f"Bulding and evaluating model with prompt: '{prompt_template_1}'") build_and_evalute_model_with_prompt(prompt_template_1) prompt_template_2 = ( "Write a summary of the following article that is between triple backticks. Be concise. Make" " sure the summary includes important nouns and dates and keywords in the original text." " Just return the summary. Do not include any text other than the summary: ```{article}```" ) print(f"Building and evaluating model with prompt: '{prompt_template_2}'") build_and_evalute_model_with_prompt(prompt_template_2) # Load the evaluation results results: pd.DataFrame = mlflow.load_table( "eval_results_table.json", extra_columns=["run_id", "params.prompt_template"] ) results_grouped_by_article = results.sort_values(by="id") print("Evaluation results:") print(results_grouped_by_article[["run_id", "params.prompt_template", "article", "outputs"]]) # Score the best model on a new article new_article = """ Adnan Januzaj swapped the lush turf of Old Trafford for the green baize at Sheffield when he turned up at the snooker World Championships on Wednesday. The Manchester United winger, who has endured a frustrating season under Louis van Gaal, had turned out for the Under 21 side at Fulham on Tuesday night amid reports he could be farmed out on loan next season. But Januzaj may want to consider trying his hand at another sport after displaying his silky skillls on a mini pool table. Adnan Januzaj (left) cheered on\xa0Shaun Murphy (right) at the World Championship in Sheffield. Januzaj shows off his potting skills on a mini pool table at the Crucible on Wednesday. The 20-year-old Belgium international was at the Crucible to cheer on his friend Shaun Murphy in his quarter-final against Anthony McGill. The 2005 winner moved a step closer to an elusive second title in Sheffield with a 13-8 victory, sealed with a 67 break. Three centuries in the match, and the way he accelerated away from 6-6, showed Murphy is a man to fear, and next for him will be Neil Robertson or Barry Hawkins. Januzaj turned out for Under 21s in the 4-1 victory at Fulham on Tuesday night. """ print( f"Scoring the model with prompt '{prompt_template_2}' on the article '{new_article[:70] + '...'}'" ) best_model = mlflow.pyfunc.load_model(f"runs:/{mlflow.last_active_run().info.run_id}/model") summary = best_model.predict({"article": new_article}) print(f"Summary: {summary}")
[ "langchain.chains.LLMChain", "langchain.prompts.PromptTemplate", "langchain.llms.OpenAI" ]
[((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (365, 367), False, 'import mlflow\n'), ((372, 424), 'mlflow.log_param', 'mlflow.log_param', (['"""prompt_template"""', 'prompt_template'], {}), "('prompt_template', prompt_template)\n", (388, 424), False, 'import mlflow\n'), ((555, 578), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (561, 578), False, 'from langchain.llms import OpenAI\n'), ((592, 661), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['article']", 'template': 'prompt_template'}), "(input_variables=['article'], template=prompt_template)\n", (606, 661), False, 'from langchain.prompts import PromptTemplate\n'), ((674, 706), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (682, 706), False, 'from langchain.chains import LLMChain\n'), ((726, 782), 'mlflow.langchain.log_model', 'mlflow.langchain.log_model', (['chain'], {'artifact_path': '"""model"""'}), "(chain, artifact_path='model')\n", (752, 782), False, 'import mlflow\n'), ((853, 898), 'pandas.read_csv', 'pd.read_csv', (['"""summarization_example_data.csv"""'], {}), "('summarization_example_data.csv')\n", (864, 898), True, 'import pandas as pd\n'), ((903, 1026), 'mlflow.evaluate', 'mlflow.evaluate', ([], {'model': 'logged_model.model_uri', 'model_type': '"""text-summarization"""', 'data': 'sample_data', 'targets': '"""highlights"""'}), "(model=logged_model.model_uri, model_type=\n 'text-summarization', data=sample_data, targets='highlights')\n", (918, 1026), False, 'import mlflow\n'), ((1065, 1081), 'mlflow.end_run', 'mlflow.end_run', ([], {}), '()\n', (1079, 1081), False, 'import mlflow\n'), ((3510, 3534), 'mlflow.last_active_run', 'mlflow.last_active_run', ([], {}), '()\n', (3532, 3534), False, 'import mlflow\n')]
import hashlib try: from langchain_community.document_loaders import UnstructuredXMLLoader except ImportError: raise ImportError( 'XML file requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`' ) from None from embedchain.helpers.json_serializable import register_deserializable from embedchain.loaders.base_loader import BaseLoader from embedchain.utils.misc import clean_string @register_deserializable class XmlLoader(BaseLoader): def load_data(self, xml_url): """Load data from a XML file.""" loader = UnstructuredXMLLoader(xml_url) data = loader.load() content = data[0].page_content content = clean_string(content) metadata = data[0].metadata metadata["url"] = metadata["source"] del metadata["source"] output = [{"content": content, "meta_data": metadata}] doc_id = hashlib.sha256((content + xml_url).encode()).hexdigest() return { "doc_id": doc_id, "data": output, }
[ "langchain_community.document_loaders.UnstructuredXMLLoader" ]
[((588, 618), 'langchain_community.document_loaders.UnstructuredXMLLoader', 'UnstructuredXMLLoader', (['xml_url'], {}), '(xml_url)\n', (609, 618), False, 'from langchain_community.document_loaders import UnstructuredXMLLoader\n'), ((705, 726), 'embedchain.utils.misc.clean_string', 'clean_string', (['content'], {}), '(content)\n', (717, 726), False, 'from embedchain.utils.misc import clean_string\n')]
import os import voyager.utils as U from langchain.chat_models import ChatOpenAI from langchain.embeddings.openai import OpenAIEmbeddings from langchain.schema import HumanMessage, SystemMessage from langchain.vectorstores import Chroma from voyager.prompts import load_prompt from voyager.control_primitives import load_control_primitives class SkillManager: def __init__( self, model_name="gpt-3.5-turbo", temperature=0, retrieval_top_k=5, request_timout=120, ckpt_dir="ckpt", resume=False, ): self.llm = ChatOpenAI( model_name=model_name, temperature=temperature, request_timeout=request_timout, ) U.f_mkdir(f"{ckpt_dir}/skill/code") U.f_mkdir(f"{ckpt_dir}/skill/description") U.f_mkdir(f"{ckpt_dir}/skill/vectordb") # programs for env execution self.control_primitives = load_control_primitives() if resume: print(f"\033[33mLoading Skill Manager from {ckpt_dir}/skill\033[0m") self.skills = U.load_json(f"{ckpt_dir}/skill/skills.json") else: self.skills = {} self.retrieval_top_k = retrieval_top_k self.ckpt_dir = ckpt_dir self.vectordb = Chroma( collection_name="skill_vectordb", embedding_function=OpenAIEmbeddings(), persist_directory=f"{ckpt_dir}/skill/vectordb", ) assert self.vectordb._collection.count() == len(self.skills), ( f"Skill Manager's vectordb is not synced with skills.json.\n" f"There are {self.vectordb._collection.count()} skills in vectordb but {len(self.skills)} skills in skills.json.\n" f"Did you set resume=False when initializing the manager?\n" f"You may need to manually delete the vectordb directory for running from scratch." ) @property def programs(self): programs = "" for skill_name, entry in self.skills.items(): programs += f"{entry['code']}\n\n" for primitives in self.control_primitives: programs += f"{primitives}\n\n" return programs def add_new_skill(self, info): if info["task"].startswith("Deposit useless items into the chest at"): # No need to reuse the deposit skill return program_name = info["program_name"] program_code = info["program_code"] skill_description = self.generate_skill_description(program_name, program_code) print( f"\033[33mSkill Manager generated description for {program_name}:\n{skill_description}\033[0m" ) if program_name in self.skills: print(f"\033[33mSkill {program_name} already exists. Rewriting!\033[0m") self.vectordb._collection.delete(ids=[program_name]) i = 2 while f"{program_name}V{i}.js" in os.listdir(f"{self.ckpt_dir}/skill/code"): i += 1 dumped_program_name = f"{program_name}V{i}" else: dumped_program_name = program_name self.vectordb.add_texts( texts=[skill_description], ids=[program_name], metadatas=[{"name": program_name}], ) self.skills[program_name] = { "code": program_code, "description": skill_description, } assert self.vectordb._collection.count() == len( self.skills ), "vectordb is not synced with skills.json" U.dump_text( program_code, f"{self.ckpt_dir}/skill/code/{dumped_program_name}.js" ) U.dump_text( skill_description, f"{self.ckpt_dir}/skill/description/{dumped_program_name}.txt", ) U.dump_json(self.skills, f"{self.ckpt_dir}/skill/skills.json") self.vectordb.persist() def generate_skill_description(self, program_name, program_code): messages = [ SystemMessage(content=load_prompt("skill")), HumanMessage( content=program_code + "\n\n" + f"The main function is `{program_name}`." ), ] skill_description = f" // { self.llm(messages).content}" return f"async function {program_name}(bot) {{\n{skill_description}\n}}" def retrieve_skills(self, query): k = min(self.vectordb._collection.count(), self.retrieval_top_k) if k == 0: return [] print(f"\033[33mSkill Manager retrieving for {k} skills\033[0m") docs_and_scores = self.vectordb.similarity_search_with_score(query, k=k) print( f"\033[33mSkill Manager retrieved skills: " f"{', '.join([doc.metadata['name'] for doc, _ in docs_and_scores])}\033[0m" ) skills = [] for doc, _ in docs_and_scores: skills.append(self.skills[doc.metadata["name"]]["code"]) return skills
[ "langchain.embeddings.openai.OpenAIEmbeddings", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((583, 678), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'request_timeout': 'request_timout'}), '(model_name=model_name, temperature=temperature, request_timeout=\n request_timout)\n', (593, 678), False, 'from langchain.chat_models import ChatOpenAI\n'), ((729, 764), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/code"""'], {}), "(f'{ckpt_dir}/skill/code')\n", (738, 764), True, 'import voyager.utils as U\n'), ((773, 815), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/description"""'], {}), "(f'{ckpt_dir}/skill/description')\n", (782, 815), True, 'import voyager.utils as U\n'), ((824, 863), 'voyager.utils.f_mkdir', 'U.f_mkdir', (['f"""{ckpt_dir}/skill/vectordb"""'], {}), "(f'{ckpt_dir}/skill/vectordb')\n", (833, 863), True, 'import voyager.utils as U\n'), ((935, 960), 'voyager.control_primitives.load_control_primitives', 'load_control_primitives', ([], {}), '()\n', (958, 960), False, 'from voyager.control_primitives import load_control_primitives\n'), ((3548, 3633), 'voyager.utils.dump_text', 'U.dump_text', (['program_code', 'f"""{self.ckpt_dir}/skill/code/{dumped_program_name}.js"""'], {}), "(program_code,\n f'{self.ckpt_dir}/skill/code/{dumped_program_name}.js')\n", (3559, 3633), True, 'import voyager.utils as U\n'), ((3660, 3758), 'voyager.utils.dump_text', 'U.dump_text', (['skill_description', 'f"""{self.ckpt_dir}/skill/description/{dumped_program_name}.txt"""'], {}), "(skill_description,\n f'{self.ckpt_dir}/skill/description/{dumped_program_name}.txt')\n", (3671, 3758), True, 'import voyager.utils as U\n'), ((3798, 3860), 'voyager.utils.dump_json', 'U.dump_json', (['self.skills', 'f"""{self.ckpt_dir}/skill/skills.json"""'], {}), "(self.skills, f'{self.ckpt_dir}/skill/skills.json')\n", (3809, 3860), True, 'import voyager.utils as U\n'), ((1087, 1131), 'voyager.utils.load_json', 'U.load_json', (['f"""{ckpt_dir}/skill/skills.json"""'], {}), "(f'{ckpt_dir}/skill/skills.json')\n", (1098, 1131), True, 'import voyager.utils as U\n'), ((4054, 4145), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': "(program_code + '\\n\\n' + f'The main function is `{program_name}`.')"}), "(content=program_code + '\\n\\n' +\n f'The main function is `{program_name}`.')\n", (4066, 4145), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((1364, 1382), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1380, 1382), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2933, 2974), 'os.listdir', 'os.listdir', (['f"""{self.ckpt_dir}/skill/code"""'], {}), "(f'{self.ckpt_dir}/skill/code')\n", (2943, 2974), False, 'import os\n'), ((4019, 4039), 'voyager.prompts.load_prompt', 'load_prompt', (['"""skill"""'], {}), "('skill')\n", (4030, 4039), False, 'from voyager.prompts import load_prompt\n')]
from langflow import CustomComponent from langchain.agents import AgentExecutor, create_json_agent from langflow.field_typing import ( BaseLanguageModel, ) from langchain_community.agent_toolkits.json.toolkit import JsonToolkit class JsonAgentComponent(CustomComponent): display_name = "JsonAgent" description = "Construct a json agent from an LLM and tools." def build_config(self): return { "llm": {"display_name": "LLM"}, "toolkit": {"display_name": "Toolkit"}, } def build( self, llm: BaseLanguageModel, toolkit: JsonToolkit, ) -> AgentExecutor: return create_json_agent(llm=llm, toolkit=toolkit)
[ "langchain.agents.create_json_agent" ]
[((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')]
from langflow import CustomComponent from langchain.agents import AgentExecutor, create_json_agent from langflow.field_typing import ( BaseLanguageModel, ) from langchain_community.agent_toolkits.json.toolkit import JsonToolkit class JsonAgentComponent(CustomComponent): display_name = "JsonAgent" description = "Construct a json agent from an LLM and tools." def build_config(self): return { "llm": {"display_name": "LLM"}, "toolkit": {"display_name": "Toolkit"}, } def build( self, llm: BaseLanguageModel, toolkit: JsonToolkit, ) -> AgentExecutor: return create_json_agent(llm=llm, toolkit=toolkit)
[ "langchain.agents.create_json_agent" ]
[((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')]
from langflow import CustomComponent from langchain.agents import AgentExecutor, create_json_agent from langflow.field_typing import ( BaseLanguageModel, ) from langchain_community.agent_toolkits.json.toolkit import JsonToolkit class JsonAgentComponent(CustomComponent): display_name = "JsonAgent" description = "Construct a json agent from an LLM and tools." def build_config(self): return { "llm": {"display_name": "LLM"}, "toolkit": {"display_name": "Toolkit"}, } def build( self, llm: BaseLanguageModel, toolkit: JsonToolkit, ) -> AgentExecutor: return create_json_agent(llm=llm, toolkit=toolkit)
[ "langchain.agents.create_json_agent" ]
[((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')]
from typing import Annotated, List, Optional from uuid import UUID from fastapi import APIRouter, Depends, HTTPException, Query, Request from fastapi.responses import StreamingResponse from langchain.embeddings.ollama import OllamaEmbeddings from langchain.embeddings.openai import OpenAIEmbeddings from logger import get_logger from middlewares.auth import AuthBearer, get_current_user from models.settings import BrainSettings, get_supabase_client from models.user_usage import UserUsage from modules.brain.service.brain_service import BrainService from modules.chat.controller.chat.brainful_chat import BrainfulChat from modules.chat.dto.chats import ChatItem, ChatQuestion from modules.chat.dto.inputs import ( ChatUpdatableProperties, CreateChatProperties, QuestionAndAnswer, ) from modules.chat.entity.chat import Chat from modules.chat.service.chat_service import ChatService from modules.notification.service.notification_service import NotificationService from modules.user.entity.user_identity import UserIdentity from packages.utils.telemetry import send_telemetry from vectorstore.supabase import CustomSupabaseVectorStore logger = get_logger(__name__) chat_router = APIRouter() notification_service = NotificationService() brain_service = BrainService() chat_service = ChatService() def init_vector_store(user_id: UUID) -> CustomSupabaseVectorStore: """ Initialize the vector store """ brain_settings = BrainSettings() supabase_client = get_supabase_client() embeddings = None if brain_settings.ollama_api_base_url: embeddings = OllamaEmbeddings( base_url=brain_settings.ollama_api_base_url ) # pyright: ignore reportPrivateUsage=none else: embeddings = OpenAIEmbeddings() vector_store = CustomSupabaseVectorStore( supabase_client, embeddings, table_name="vectors", user_id=user_id ) return vector_store def get_answer_generator( chat_id: UUID, chat_question: ChatQuestion, brain_id: UUID, current_user: UserIdentity, ): chat_instance = BrainfulChat() chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id) user_usage = UserUsage( id=current_user.id, email=current_user.email, ) vector_store = init_vector_store(user_id=current_user.id) # Get History history = chat_service.get_chat_history(chat_id) # Generic brain, metadata_brain = brain_service.find_brain_from_question( brain_id, chat_question.question, current_user, chat_id, history, vector_store ) send_telemetry("question_asked", {"model_name": brain.model}) gpt_answer_generator = chat_instance.get_answer_generator( brain=brain, chat_id=str(chat_id), model=brain.model, temperature=0.1, streaming=True, prompt_id=chat_question.prompt_id, user_id=current_user.id, user_email=current_user.email, ) return gpt_answer_generator @chat_router.get("/chat/healthz", tags=["Health"]) async def healthz(): return {"status": "ok"} # get all chats @chat_router.get("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"]) async def get_chats(current_user: UserIdentity = Depends(get_current_user)): """ Retrieve all chats for the current user. - `current_user`: The current authenticated user. - Returns a list of all chats for the user. This endpoint retrieves all the chats associated with the current authenticated user. It returns a list of chat objects containing the chat ID and chat name for each chat. """ chats = chat_service.get_user_chats(str(current_user.id)) return {"chats": chats} # delete one chat @chat_router.delete( "/chat/{chat_id}", dependencies=[Depends(AuthBearer())], tags=["Chat"] ) async def delete_chat(chat_id: UUID): """ Delete a specific chat by chat ID. """ notification_service.remove_chat_notifications(chat_id) chat_service.delete_chat_from_db(chat_id) return {"message": f"{chat_id} has been deleted."} # update existing chat metadata @chat_router.put( "/chat/{chat_id}/metadata", dependencies=[Depends(AuthBearer())], tags=["Chat"] ) async def update_chat_metadata_handler( chat_data: ChatUpdatableProperties, chat_id: UUID, current_user: UserIdentity = Depends(get_current_user), ): """ Update chat attributes """ chat = chat_service.get_chat_by_id( chat_id # pyright: ignore reportPrivateUsage=none ) if str(current_user.id) != chat.user_id: raise HTTPException( status_code=403, # pyright: ignore reportPrivateUsage=none detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none ) return chat_service.update_chat(chat_id=chat_id, chat_data=chat_data) # create new chat @chat_router.post("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"]) async def create_chat_handler( chat_data: CreateChatProperties, current_user: UserIdentity = Depends(get_current_user), ): """ Create a new chat with initial chat messages. """ return chat_service.create_chat(user_id=current_user.id, chat_data=chat_data) # add new question to chat @chat_router.post( "/chat/{chat_id}/question", dependencies=[ Depends( AuthBearer(), ), ], tags=["Chat"], ) async def create_question_handler( request: Request, chat_question: ChatQuestion, chat_id: UUID, brain_id: Annotated[UUID | None, Query()] = None, current_user: UserIdentity = Depends(get_current_user), ): try: logger.info( f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}" ) gpt_answer_generator = get_answer_generator( chat_id, chat_question, brain_id, current_user ) chat_answer = gpt_answer_generator.generate_answer( chat_id, chat_question, save_answer=True ) return chat_answer except HTTPException as e: raise e # stream new question response from chat @chat_router.post( "/chat/{chat_id}/question/stream", dependencies=[ Depends( AuthBearer(), ), ], tags=["Chat"], ) async def create_stream_question_handler( request: Request, chat_question: ChatQuestion, chat_id: UUID, brain_id: Annotated[UUID | None, Query()] = None, current_user: UserIdentity = Depends(get_current_user), ) -> StreamingResponse: chat_instance = BrainfulChat() chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id) user_usage = UserUsage( id=current_user.id, email=current_user.email, ) logger.info( f"Creating question for chat {chat_id} with brain {brain_id} of type {type(brain_id)}" ) gpt_answer_generator = get_answer_generator( chat_id, chat_question, brain_id, current_user ) try: return StreamingResponse( gpt_answer_generator.generate_stream( chat_id, chat_question, save_answer=True ), media_type="text/event-stream", ) except HTTPException as e: raise e # get chat history @chat_router.get( "/chat/{chat_id}/history", dependencies=[Depends(AuthBearer())], tags=["Chat"] ) async def get_chat_history_handler( chat_id: UUID, ) -> List[ChatItem]: # TODO: RBAC with current_user return chat_service.get_chat_history_with_notifications(chat_id) @chat_router.post( "/chat/{chat_id}/question/answer", dependencies=[Depends(AuthBearer())], tags=["Chat"], ) async def add_question_and_answer_handler( chat_id: UUID, question_and_answer: QuestionAndAnswer, ) -> Optional[Chat]: """ Add a new question and anwser to the chat. """ return chat_service.add_question_and_answer(chat_id, question_and_answer)
[ "langchain.embeddings.ollama.OllamaEmbeddings", "langchain.embeddings.openai.OpenAIEmbeddings" ]
[((1158, 1178), 'logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1168, 1178), False, 'from logger import get_logger\n'), ((1194, 1205), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1203, 1205), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((1230, 1251), 'modules.notification.service.notification_service.NotificationService', 'NotificationService', ([], {}), '()\n', (1249, 1251), False, 'from modules.notification.service.notification_service import NotificationService\n'), ((1268, 1282), 'modules.brain.service.brain_service.BrainService', 'BrainService', ([], {}), '()\n', (1280, 1282), False, 'from modules.brain.service.brain_service import BrainService\n'), ((1298, 1311), 'modules.chat.service.chat_service.ChatService', 'ChatService', ([], {}), '()\n', (1309, 1311), False, 'from modules.chat.service.chat_service import ChatService\n'), ((1450, 1465), 'models.settings.BrainSettings', 'BrainSettings', ([], {}), '()\n', (1463, 1465), False, 'from models.settings import BrainSettings, get_supabase_client\n'), ((1488, 1509), 'models.settings.get_supabase_client', 'get_supabase_client', ([], {}), '()\n', (1507, 1509), False, 'from models.settings import BrainSettings, get_supabase_client\n'), ((1792, 1889), 'vectorstore.supabase.CustomSupabaseVectorStore', 'CustomSupabaseVectorStore', (['supabase_client', 'embeddings'], {'table_name': '"""vectors"""', 'user_id': 'user_id'}), "(supabase_client, embeddings, table_name='vectors',\n user_id=user_id)\n", (1817, 1889), False, 'from vectorstore.supabase import CustomSupabaseVectorStore\n'), ((2080, 2094), 'modules.chat.controller.chat.brainful_chat.BrainfulChat', 'BrainfulChat', ([], {}), '()\n', (2092, 2094), False, 'from modules.chat.controller.chat.brainful_chat import BrainfulChat\n'), ((2198, 2253), 'models.user_usage.UserUsage', 'UserUsage', ([], {'id': 'current_user.id', 'email': 'current_user.email'}), '(id=current_user.id, email=current_user.email)\n', (2207, 2253), False, 'from models.user_usage import UserUsage\n'), ((2593, 2654), 'packages.utils.telemetry.send_telemetry', 'send_telemetry', (['"""question_asked"""', "{'model_name': brain.model}"], {}), "('question_asked', {'model_name': brain.model})\n", (2607, 2654), False, 'from packages.utils.telemetry import send_telemetry\n'), ((3248, 3273), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (3255, 3273), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((4355, 4380), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (4362, 4380), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5080, 5105), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (5087, 5105), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5636, 5661), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (5643, 5661), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6531, 6556), 'fastapi.Depends', 'Depends', (['get_current_user'], {}), '(get_current_user)\n', (6538, 6556), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6603, 6617), 'modules.chat.controller.chat.brainful_chat.BrainfulChat', 'BrainfulChat', ([], {}), '()\n', (6615, 6617), False, 'from modules.chat.controller.chat.brainful_chat import BrainfulChat\n'), ((6721, 6776), 'models.user_usage.UserUsage', 'UserUsage', ([], {'id': 'current_user.id', 'email': 'current_user.email'}), '(id=current_user.id, email=current_user.email)\n', (6730, 6776), False, 'from models.user_usage import UserUsage\n'), ((1596, 1657), 'langchain.embeddings.ollama.OllamaEmbeddings', 'OllamaEmbeddings', ([], {'base_url': 'brain_settings.ollama_api_base_url'}), '(base_url=brain_settings.ollama_api_base_url)\n', (1612, 1657), False, 'from langchain.embeddings.ollama import OllamaEmbeddings\n'), ((1754, 1772), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1770, 1772), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((4593, 4688), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': '(403)', 'detail': '"""You should be the owner of the chat to update it."""'}), "(status_code=403, detail=\n 'You should be the owner of the chat to update it.')\n", (4606, 4688), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((3168, 3180), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (3178, 3180), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((3797, 3809), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (3807, 3809), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((4191, 4203), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (4201, 4203), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((4948, 4960), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (4958, 4960), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((5586, 5593), 'fastapi.Query', 'Query', ([], {}), '()\n', (5591, 5593), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((5387, 5399), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (5397, 5399), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((6481, 6488), 'fastapi.Query', 'Query', ([], {}), '()\n', (6486, 6488), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((6275, 6287), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (6285, 6287), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((7390, 7402), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (7400, 7402), False, 'from middlewares.auth import AuthBearer, get_current_user\n'), ((7688, 7700), 'middlewares.auth.AuthBearer', 'AuthBearer', ([], {}), '()\n', (7698, 7700), False, 'from middlewares.auth import AuthBearer, get_current_user\n')]
import json import os import pickle from taskweaver.plugin import Plugin, register_plugin @register_plugin class DocumentRetriever(Plugin): vectorstore = None def _init(self): try: import tiktoken from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS except ImportError: raise ImportError("Please install langchain-community first.") self.embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") self.vectorstore = FAISS.load_local( folder_path=self.config.get("index_folder"), embeddings=self.embeddings, ) with open( os.path.join( self.config.get("index_folder"), "chunk_id_to_index.pkl", ), "rb", ) as f: self.chunk_id_to_index = pickle.load(f) self.enc = tiktoken.encoding_for_model("gpt-3.5-turbo") def __call__(self, query: str, size: int = 5, target_length: int = 256): if self.vectorstore is None: self._init() result = self.vectorstore.similarity_search( query=query, k=size, ) expanded_chunks = self.do_expand(result, target_length) return f"DocumentRetriever has done searching for `{query}`.\n" + self.ctx.wrap_text_with_delimiter_temporal( "\n```json\n" + json.dumps(expanded_chunks, indent=4) + "```\n", ) def do_expand(self, result, target_length): expanded_chunks = [] # do expansion for r in result: source = r.metadata["source"] chunk_id = r.metadata["chunk_id"] content = r.page_content expanded_result = content left_chunk_id, right_chunk_id = chunk_id - 1, chunk_id + 1 left_valid, right_valid = True, True chunk_ids = [chunk_id] while True: current_length = len(self.enc.encode(expanded_result)) if f"{source}_{left_chunk_id}" in self.chunk_id_to_index: chunk_ids.append(left_chunk_id) left_chunk_index = self.vectorstore.index_to_docstore_id[ self.chunk_id_to_index[f"{source}_{left_chunk_id}"] ] left_chunk = self.vectorstore.docstore.search(left_chunk_index) encoded_left_chunk = self.enc.encode(left_chunk.page_content) if len(encoded_left_chunk) + current_length < target_length: expanded_result = left_chunk.page_content + expanded_result left_chunk_id -= 1 current_length += len(encoded_left_chunk) else: expanded_result += self.enc.decode( encoded_left_chunk[-(target_length - current_length) :], ) current_length = target_length break else: left_valid = False if f"{source}_{right_chunk_id}" in self.chunk_id_to_index: chunk_ids.append(right_chunk_id) right_chunk_index = self.vectorstore.index_to_docstore_id[ self.chunk_id_to_index[f"{source}_{right_chunk_id}"] ] right_chunk = self.vectorstore.docstore.search(right_chunk_index) encoded_right_chunk = self.enc.encode(right_chunk.page_content) if len(encoded_right_chunk) + current_length < target_length: expanded_result += right_chunk.page_content right_chunk_id += 1 current_length += len(encoded_right_chunk) else: expanded_result += self.enc.decode( encoded_right_chunk[: target_length - current_length], ) current_length = target_length break else: right_valid = False if not left_valid and not right_valid: break expanded_chunks.append( { "chunk": expanded_result, "metadata": r.metadata, # "length": current_length, # "chunk_ids": chunk_ids }, ) return expanded_chunks
[ "langchain_community.embeddings.HuggingFaceEmbeddings" ]
[((499, 551), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (520, 551), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((960, 1004), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (987, 1004), False, 'import tiktoken\n'), ((925, 939), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (936, 939), False, 'import pickle\n'), ((1466, 1503), 'json.dumps', 'json.dumps', (['expanded_chunks'], {'indent': '(4)'}), '(expanded_chunks, indent=4)\n', (1476, 1503), False, 'import json\n')]
from langchain.utilities import BashProcess from langchain.agents import load_tools def get_built_in_tools(tools: list[str]): bash = BashProcess() load_tools(["human"]) return [bash]
[ "langchain.utilities.BashProcess", "langchain.agents.load_tools" ]
[((139, 152), 'langchain.utilities.BashProcess', 'BashProcess', ([], {}), '()\n', (150, 152), False, 'from langchain.utilities import BashProcess\n'), ((158, 179), 'langchain.agents.load_tools', 'load_tools', (["['human']"], {}), "(['human'])\n", (168, 179), False, 'from langchain.agents import load_tools\n')]
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This would makes sure Python is aware there is more than one sub-package within bigdl, # physically located elsewhere. # Otherwise there would be module not found error in non-pip's setting as Python would # only search the first bigdl package and end up finding only one sub-package. # This file is adapted from # https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_pipeline.py # The MIT License # Copyright (c) Harrison Chase # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class TransformersPipelineLLM(LLM): """Wrapper around the BigDL-LLM Transformer-INT4 model in Transformer.pipeline() Example: .. code-block:: python from bigdl.llm.langchain.llms import TransformersPipelineLLM llm = TransformersPipelineLLM.from_model_id(model_id="decapoda-research/llama-7b-hf") """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name or model path to use.""" model_kwargs: Optional[dict] = None """Key word arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Key word arguments passed to the pipeline.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @classmethod def from_model_id( cls, model_id: str, task: str, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from bigdl.llm.transformers import ( AutoModel, AutoModelForCausalLM, # AutoModelForSeq2SeqLM, ) from transformers import AutoTokenizer, LlamaTokenizer from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} # TODO: may refactore this code in the future try: tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) except: tokenizer = LlamaTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) elif task in ("text2text-generation", "summarization"): # TODO: support this when related PR merged model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if "trust_remote_code" in _model_kwargs: _model_kwargs = { k: v for k, v in _model_kwargs.items() if k != "trust_remote_code" } _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device='cpu', # only cpu now model_kwargs=_model_kwargs, **_pipeline_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "model_kwargs": self.model_kwargs, "pipeline_kwargs": self.pipeline_kwargs, } @property def _llm_type(self) -> str: return "BigDL-llm" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif self.pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens" ]
[((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'from transformers import pipeline as hf_pipeline\n'), ((4206, 4262), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4235, 4262), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((7329, 7360), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (7348, 7360), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((4303, 4360), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4333, 4360), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((4441, 4528), 'bigdl.llm.transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'load_in_4bit': '(True)'}), '(model_id, load_in_4bit=True, **\n _model_kwargs)\n', (4477, 4528), False, 'from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM\n')]
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This would makes sure Python is aware there is more than one sub-package within bigdl, # physically located elsewhere. # Otherwise there would be module not found error in non-pip's setting as Python would # only search the first bigdl package and end up finding only one sub-package. # This file is adapted from # https://github.com/hwchase17/langchain/blob/master/langchain/llms/huggingface_pipeline.py # The MIT License # Copyright (c) Harrison Chase # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import Extra from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation", "summarization") class TransformersPipelineLLM(LLM): """Wrapper around the BigDL-LLM Transformer-INT4 model in Transformer.pipeline() Example: .. code-block:: python from bigdl.llm.langchain.llms import TransformersPipelineLLM llm = TransformersPipelineLLM.from_model_id(model_id="decapoda-research/llama-7b-hf") """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name or model path to use.""" model_kwargs: Optional[dict] = None """Key word arguments passed to the model.""" pipeline_kwargs: Optional[dict] = None """Key word arguments passed to the pipeline.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @classmethod def from_model_id( cls, model_id: str, task: str, model_kwargs: Optional[dict] = None, pipeline_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from bigdl.llm.transformers import ( AutoModel, AutoModelForCausalLM, # AutoModelForSeq2SeqLM, ) from transformers import AutoTokenizer, LlamaTokenizer from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} # TODO: may refactore this code in the future try: tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) except: tokenizer = LlamaTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) elif task in ("text2text-generation", "summarization"): # TODO: support this when related PR merged model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_4bit=True, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if "trust_remote_code" in _model_kwargs: _model_kwargs = { k: v for k, v in _model_kwargs.items() if k != "trust_remote_code" } _pipeline_kwargs = pipeline_kwargs or {} pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device='cpu', # only cpu now model_kwargs=_model_kwargs, **_pipeline_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, pipeline_kwargs=_pipeline_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "model_id": self.model_id, "model_kwargs": self.model_kwargs, "pipeline_kwargs": self.pipeline_kwargs, } @property def _llm_type(self) -> str: return "BigDL-llm" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> str: response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] elif self.pipeline.task == "summarization": text = response[0]["summary_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens" ]
[((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'from transformers import pipeline as hf_pipeline\n'), ((4206, 4262), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4235, 4262), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((7329, 7360), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (7348, 7360), False, 'from langchain.llms.utils import enforce_stop_tokens\n'), ((4303, 4360), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['model_id'], {}), '(model_id, **_model_kwargs)\n', (4333, 4360), False, 'from transformers import AutoTokenizer, LlamaTokenizer\n'), ((4441, 4528), 'bigdl.llm.transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'load_in_4bit': '(True)'}), '(model_id, load_in_4bit=True, **\n _model_kwargs)\n', (4477, 4528), False, 'from bigdl.llm.transformers import AutoModel, AutoModelForCausalLM\n')]
from typing import AsyncGenerator, Optional, Tuple from langchain import ConversationChain import logging from typing import Optional, Tuple from pydantic.v1 import SecretStr from vocode.streaming.agent.base_agent import RespondAgent from vocode.streaming.agent.utils import get_sentence_from_buffer from langchain import ConversationChain from langchain.schema import ChatMessage, AIMessage, HumanMessage from langchain_community.chat_models import ChatAnthropic import logging from vocode import getenv from vocode.streaming.models.agent import ChatAnthropicAgentConfig from langchain.prompts import ( ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, ) from vocode import getenv from vocode.streaming.models.agent import ChatAnthropicAgentConfig from langchain.memory import ConversationBufferMemory SENTENCE_ENDINGS = [".", "!", "?"] class ChatAnthropicAgent(RespondAgent[ChatAnthropicAgentConfig]): def __init__( self, agent_config: ChatAnthropicAgentConfig, logger: Optional[logging.Logger] = None, anthropic_api_key: Optional[SecretStr] = None, ): super().__init__(agent_config=agent_config, logger=logger) import anthropic # Convert anthropic_api_key to SecretStr if it's not None and not already a SecretStr if anthropic_api_key is not None and not isinstance( anthropic_api_key, SecretStr ): anthropic_api_key = SecretStr(anthropic_api_key) else: # Retrieve anthropic_api_key from environment variable and convert to SecretStr env_key = getenv("ANTHROPIC_API_KEY") if env_key: anthropic_api_key = SecretStr(env_key) if not anthropic_api_key: raise ValueError( "ANTHROPIC_API_KEY must be set in environment or passed in as a SecretStr" ) self.prompt = ChatPromptTemplate.from_messages( [ MessagesPlaceholder(variable_name="history"), HumanMessagePromptTemplate.from_template("{input}"), ] ) self.llm = ChatAnthropic( model_name=agent_config.model_name, anthropic_api_key=anthropic_api_key, ) # streaming not well supported by langchain, so we will connect directly self.anthropic_client = ( anthropic.AsyncAnthropic(api_key=str(anthropic_api_key)) if agent_config.generate_responses else None ) self.memory = ConversationBufferMemory(return_messages=True) self.memory.chat_memory.messages.append( HumanMessage(content=self.agent_config.prompt_preamble) ) if agent_config.initial_message: self.memory.chat_memory.messages.append( AIMessage(content=agent_config.initial_message.text) ) self.conversation = ConversationChain( memory=self.memory, prompt=self.prompt, llm=self.llm ) async def respond( self, human_input, conversation_id: str, is_interrupt: bool = False, ) -> Tuple[str, bool]: text = await self.conversation.apredict(input=human_input) self.logger.debug(f"LLM response: {text}") return text, False async def generate_response( self, human_input, conversation_id: str, is_interrupt: bool = False, ) -> AsyncGenerator[Tuple[str, bool], None]: self.memory.chat_memory.messages.append(HumanMessage(content=human_input)) bot_memory_message = AIMessage(content="") self.memory.chat_memory.messages.append(bot_memory_message) prompt = self.llm._convert_messages_to_prompt(self.memory.chat_memory.messages) if self.anthropic_client: streamed_response = await self.anthropic_client.completions.create( prompt=prompt, max_tokens_to_sample=self.agent_config.max_tokens_to_sample, model=self.agent_config.model_name, stream=True, ) buffer = "" async for completion in streamed_response: buffer += completion.completion sentence, remainder = get_sentence_from_buffer(buffer) if sentence: bot_memory_message.content = bot_memory_message.content + sentence buffer = remainder yield sentence, True continue def update_last_bot_message_on_cut_off(self, message: str): for memory_message in self.memory.chat_memory.messages[::-1]: if ( isinstance(memory_message, ChatMessage) and memory_message.role == "assistant" ) or isinstance(memory_message, AIMessage): memory_message.content = message return
[ "langchain_community.chat_models.ChatAnthropic", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.memory.ConversationBufferMemory", "langchain.prompts.MessagesPlaceholder", "langchain.schema.HumanMessage", "langchain.schema.AIMessage", "langchain.ConversationChain" ]
[((2147, 2238), 'langchain_community.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model_name': 'agent_config.model_name', 'anthropic_api_key': 'anthropic_api_key'}), '(model_name=agent_config.model_name, anthropic_api_key=\n anthropic_api_key)\n', (2160, 2238), False, 'from langchain_community.chat_models import ChatAnthropic\n'), ((2556, 2602), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)'}), '(return_messages=True)\n', (2580, 2602), False, 'from langchain.memory import ConversationBufferMemory\n'), ((2936, 3007), 'langchain.ConversationChain', 'ConversationChain', ([], {'memory': 'self.memory', 'prompt': 'self.prompt', 'llm': 'self.llm'}), '(memory=self.memory, prompt=self.prompt, llm=self.llm)\n', (2953, 3007), False, 'from langchain import ConversationChain\n'), ((3624, 3645), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '""""""'}), "(content='')\n", (3633, 3645), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1468, 1496), 'pydantic.v1.SecretStr', 'SecretStr', (['anthropic_api_key'], {}), '(anthropic_api_key)\n', (1477, 1496), False, 'from pydantic.v1 import SecretStr\n'), ((1625, 1652), 'vocode.getenv', 'getenv', (['"""ANTHROPIC_API_KEY"""'], {}), "('ANTHROPIC_API_KEY')\n", (1631, 1652), False, 'from vocode import getenv\n'), ((2664, 2719), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'self.agent_config.prompt_preamble'}), '(content=self.agent_config.prompt_preamble)\n', (2676, 2719), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((3559, 3592), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_input'}), '(content=human_input)\n', (3571, 3592), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((1713, 1731), 'pydantic.v1.SecretStr', 'SecretStr', (['env_key'], {}), '(env_key)\n', (1722, 1731), False, 'from pydantic.v1 import SecretStr\n'), ((1988, 2032), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (2007, 2032), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2050, 2101), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{input}"""'], {}), "('{input}')\n", (2090, 2101), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate\n'), ((2840, 2892), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'agent_config.initial_message.text'}), '(content=agent_config.initial_message.text)\n', (2849, 2892), False, 'from langchain.schema import ChatMessage, AIMessage, HumanMessage\n'), ((4286, 4318), 'vocode.streaming.agent.utils.get_sentence_from_buffer', 'get_sentence_from_buffer', (['buffer'], {}), '(buffer)\n', (4310, 4318), False, 'from vocode.streaming.agent.utils import get_sentence_from_buffer\n')]
from typing import Any, Dict from langchain.base_language import BaseLanguageModel from langchain.prompts import ( ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, ) from langchain.chains import ConversationChain from real_agents.adapters.executors.base import BaseExecutor from real_agents.adapters.memory import ConversationBufferMemory class ChatExecutor(BaseExecutor): """Chat Executor.""" _DEFAULT_TEMPLATE = "The following is a friendly conversation between a human and an AI. \ The AI is talkative and provides lots of specific details from its context. \ If the AI does not know the answer to a question, it truthfully says it does not know." output_key: str = "result" def __init__(self) -> None: """Initialize the executor""" self.memory = ConversationBufferMemory(return_messages=True) def run( self, user_intent: str, llm: BaseLanguageModel, verbose: bool = True, ) -> Dict[str, Any]: """Run the executor. Args: user_intent: User intent to execute. grounding_source: Grounding source to execute the program on. llm: Language model to use. verbose: Whether to print the logging. Returns: Result of string. """ prompt = ChatPromptTemplate.from_messages( [ SystemMessagePromptTemplate.from_template(self._DEFAULT_TEMPLATE), MessagesPlaceholder(variable_name="history"), HumanMessagePromptTemplate.from_template("{input}"), ] ) method = ConversationChain( llm=llm, prompt=prompt, verbose=verbose, memory=self.memory, ) result = method.predict(input=user_intent) output = {self.output_key: result} return output
[ "langchain.chains.ConversationChain", "langchain.prompts.HumanMessagePromptTemplate.from_template", "langchain.prompts.SystemMessagePromptTemplate.from_template", "langchain.prompts.MessagesPlaceholder" ]
[((894, 940), 'real_agents.adapters.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)'}), '(return_messages=True)\n', (918, 940), False, 'from real_agents.adapters.memory import ConversationBufferMemory\n'), ((1746, 1824), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'verbose': 'verbose', 'memory': 'self.memory'}), '(llm=llm, prompt=prompt, verbose=verbose, memory=self.memory)\n', (1763, 1824), False, 'from langchain.chains import ConversationChain\n'), ((1502, 1567), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['self._DEFAULT_TEMPLATE'], {}), '(self._DEFAULT_TEMPLATE)\n', (1543, 1567), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n'), ((1586, 1630), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""history"""'}), "(variable_name='history')\n", (1605, 1630), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n'), ((1649, 1700), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{input}"""'], {}), "('{input}')\n", (1689, 1700), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate\n')]
import os from dotenv import load_dotenv, find_dotenv from langchain import HuggingFaceHub from langchain import PromptTemplate, LLMChain, OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain from langchain.document_loaders import YoutubeLoader import textwrap # -------------------------------------------------------------- # Load the HuggingFaceHub API token from the .env file # -------------------------------------------------------------- load_dotenv(find_dotenv()) HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] # -------------------------------------------------------------- # Load the LLM model from the HuggingFaceHub # -------------------------------------------------------------- repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options falcon_llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500} ) # -------------------------------------------------------------- # Create a PromptTemplate and LLMChain # -------------------------------------------------------------- template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["question"]) llm_chain = LLMChain(prompt=prompt, llm=falcon_llm) # -------------------------------------------------------------- # Run the LLMChain # -------------------------------------------------------------- question = "How do I make a sandwich?" response = llm_chain.run(question) wrapped_text = textwrap.fill( response, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load a video transcript from YouTube # -------------------------------------------------------------- video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0" loader = YoutubeLoader.from_youtube_url(video_url) transcript = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000) docs = text_splitter.split_documents(transcript) # -------------------------------------------------------------- # Summarization with LangChain # -------------------------------------------------------------- # Add map_prompt and combine_prompt to the chain for custom summarization chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True) print(chain.llm_chain.prompt.template) print(chain.combine_document_chain.llm_chain.prompt.template) # -------------------------------------------------------------- # Test the Falcon model with text summarization # -------------------------------------------------------------- output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load an OpenAI model for comparison # -------------------------------------------------------------- openai_llm = OpenAI( model_name="text-davinci-003", temperature=0.1, max_tokens=500 ) # max token length is 4097 chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True) output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.OpenAI", "langchain.document_loaders.YoutubeLoader.from_youtube_url", "langchain.HuggingFaceHub", "langchain.PromptTemplate" ]
[((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')]
import os from dotenv import load_dotenv, find_dotenv from langchain import HuggingFaceHub from langchain import PromptTemplate, LLMChain, OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain from langchain.document_loaders import YoutubeLoader import textwrap # -------------------------------------------------------------- # Load the HuggingFaceHub API token from the .env file # -------------------------------------------------------------- load_dotenv(find_dotenv()) HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] # -------------------------------------------------------------- # Load the LLM model from the HuggingFaceHub # -------------------------------------------------------------- repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options falcon_llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500} ) # -------------------------------------------------------------- # Create a PromptTemplate and LLMChain # -------------------------------------------------------------- template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["question"]) llm_chain = LLMChain(prompt=prompt, llm=falcon_llm) # -------------------------------------------------------------- # Run the LLMChain # -------------------------------------------------------------- question = "How do I make a sandwich?" response = llm_chain.run(question) wrapped_text = textwrap.fill( response, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load a video transcript from YouTube # -------------------------------------------------------------- video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0" loader = YoutubeLoader.from_youtube_url(video_url) transcript = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000) docs = text_splitter.split_documents(transcript) # -------------------------------------------------------------- # Summarization with LangChain # -------------------------------------------------------------- # Add map_prompt and combine_prompt to the chain for custom summarization chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True) print(chain.llm_chain.prompt.template) print(chain.combine_document_chain.llm_chain.prompt.template) # -------------------------------------------------------------- # Test the Falcon model with text summarization # -------------------------------------------------------------- output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load an OpenAI model for comparison # -------------------------------------------------------------- openai_llm = OpenAI( model_name="text-davinci-003", temperature=0.1, max_tokens=500 ) # max token length is 4097 chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True) output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.OpenAI", "langchain.document_loaders.YoutubeLoader.from_youtube_url", "langchain.HuggingFaceHub", "langchain.PromptTemplate" ]
[((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')]
import os from dotenv import load_dotenv, find_dotenv from langchain import HuggingFaceHub from langchain import PromptTemplate, LLMChain, OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain from langchain.document_loaders import YoutubeLoader import textwrap # -------------------------------------------------------------- # Load the HuggingFaceHub API token from the .env file # -------------------------------------------------------------- load_dotenv(find_dotenv()) HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] # -------------------------------------------------------------- # Load the LLM model from the HuggingFaceHub # -------------------------------------------------------------- repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options falcon_llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500} ) # -------------------------------------------------------------- # Create a PromptTemplate and LLMChain # -------------------------------------------------------------- template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["question"]) llm_chain = LLMChain(prompt=prompt, llm=falcon_llm) # -------------------------------------------------------------- # Run the LLMChain # -------------------------------------------------------------- question = "How do I make a sandwich?" response = llm_chain.run(question) wrapped_text = textwrap.fill( response, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load a video transcript from YouTube # -------------------------------------------------------------- video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0" loader = YoutubeLoader.from_youtube_url(video_url) transcript = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000) docs = text_splitter.split_documents(transcript) # -------------------------------------------------------------- # Summarization with LangChain # -------------------------------------------------------------- # Add map_prompt and combine_prompt to the chain for custom summarization chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True) print(chain.llm_chain.prompt.template) print(chain.combine_document_chain.llm_chain.prompt.template) # -------------------------------------------------------------- # Test the Falcon model with text summarization # -------------------------------------------------------------- output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load an OpenAI model for comparison # -------------------------------------------------------------- openai_llm = OpenAI( model_name="text-davinci-003", temperature=0.1, max_tokens=500 ) # max token length is 4097 chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True) output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.OpenAI", "langchain.document_loaders.YoutubeLoader.from_youtube_url", "langchain.HuggingFaceHub", "langchain.PromptTemplate" ]
[((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')]
import os from dotenv import load_dotenv, find_dotenv from langchain import HuggingFaceHub from langchain import PromptTemplate, LLMChain, OpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.chains.summarize import load_summarize_chain from langchain.document_loaders import YoutubeLoader import textwrap # -------------------------------------------------------------- # Load the HuggingFaceHub API token from the .env file # -------------------------------------------------------------- load_dotenv(find_dotenv()) HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"] # -------------------------------------------------------------- # Load the LLM model from the HuggingFaceHub # -------------------------------------------------------------- repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options falcon_llm = HuggingFaceHub( repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500} ) # -------------------------------------------------------------- # Create a PromptTemplate and LLMChain # -------------------------------------------------------------- template = """Question: {question} Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["question"]) llm_chain = LLMChain(prompt=prompt, llm=falcon_llm) # -------------------------------------------------------------- # Run the LLMChain # -------------------------------------------------------------- question = "How do I make a sandwich?" response = llm_chain.run(question) wrapped_text = textwrap.fill( response, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load a video transcript from YouTube # -------------------------------------------------------------- video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0" loader = YoutubeLoader.from_youtube_url(video_url) transcript = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000) docs = text_splitter.split_documents(transcript) # -------------------------------------------------------------- # Summarization with LangChain # -------------------------------------------------------------- # Add map_prompt and combine_prompt to the chain for custom summarization chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True) print(chain.llm_chain.prompt.template) print(chain.combine_document_chain.llm_chain.prompt.template) # -------------------------------------------------------------- # Test the Falcon model with text summarization # -------------------------------------------------------------- output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text) # -------------------------------------------------------------- # Load an OpenAI model for comparison # -------------------------------------------------------------- openai_llm = OpenAI( model_name="text-davinci-003", temperature=0.1, max_tokens=500 ) # max token length is 4097 chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True) output_summary = chain.run(docs) wrapped_text = textwrap.fill( output_summary, width=100, break_long_words=False, replace_whitespace=False ) print(wrapped_text)
[ "langchain.chains.summarize.load_summarize_chain", "langchain.LLMChain", "langchain.text_splitter.RecursiveCharacterTextSplitter", "langchain.OpenAI", "langchain.document_loaders.YoutubeLoader.from_youtube_url", "langchain.HuggingFaceHub", "langchain.PromptTemplate" ]
[((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'template', 'input_variables': "['question']"}), "(template=template, input_variables=['question'])\n", (1319, 1368), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1381, 1420), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'falcon_llm'}), '(prompt=prompt, llm=falcon_llm)\n', (1389, 1420), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((1662, 1750), 'textwrap.fill', 'textwrap.fill', (['response'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(response, width=100, break_long_words=False,\n replace_whitespace=False)\n', (1675, 1750), False, 'import textwrap\n'), ((2012, 2053), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['video_url'], {}), '(video_url)\n', (2042, 2053), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((2098, 2145), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(3000)'}), '(chunk_size=3000)\n', (2128, 2145), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((2440, 2511), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['falcon_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(falcon_llm, chain_type='map_reduce', verbose=True)\n", (2460, 2511), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((2841, 2935), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (2854, 2935), False, 'import textwrap\n'), ((3142, 3212), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.1)', 'max_tokens': '(500)'}), "(model_name='text-davinci-003', temperature=0.1, max_tokens=500)\n", (3148, 3212), False, 'from langchain import PromptTemplate, LLMChain, OpenAI\n'), ((3255, 3326), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['openai_llm'], {'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(openai_llm, chain_type='map_reduce', verbose=True)\n", (3275, 3326), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((3375, 3469), 'textwrap.fill', 'textwrap.fill', (['output_summary'], {'width': '(100)', 'break_long_words': '(False)', 'replace_whitespace': '(False)'}), '(output_summary, width=100, break_long_words=False,\n replace_whitespace=False)\n', (3388, 3469), False, 'import textwrap\n'), ((541, 554), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (552, 554), False, 'from dotenv import load_dotenv, find_dotenv\n')]
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional, Tuple import dpath.util from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType from langchain.document_loaders.base import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.utils import stringify_dict METADATA_STREAM_FIELD = "_ab_stream" METADATA_RECORD_ID_FIELD = "_ab_record_id" CDC_DELETED_FIELD = "_ab_cdc_deleted_at" @dataclass class Chunk: page_content: Optional[str] metadata: Dict[str, Any] record: AirbyteRecordMessage embedding: Optional[List[float]] = None headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "] class DocumentProcessor: """ DocumentProcessor is a helper class that generates documents from Airbyte records. It is used to generate documents from records before writing them to the destination: * The text fields are extracted from the record and concatenated to a single string. * The metadata fields are extracted from the record and added to the document metadata. * The document is split into chunks of a given size using a langchain text splitter. The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly, except if you want to implement a custom writer. The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor. Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid. """ streams: Mapping[str, ConfiguredAirbyteStream] @staticmethod def check_config(config: ProcessingConfigModel) -> Optional[str]: if config.text_splitter is not None and config.text_splitter.mode == "separator": for s in config.text_splitter.separators: try: separator = json.loads(s) if not isinstance(separator, str): return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." except json.decoder.JSONDecodeError: return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." return None def _get_text_splitter( self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel] ) -> RecursiveCharacterTextSplitter: if splitter_config is None: splitter_config = SeparatorSplitterConfigModel(mode="separator") if splitter_config.mode == "separator": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=[json.loads(s) for s in splitter_config.separators], keep_separator=splitter_config.keep_separator, disallowed_special=(), ) if splitter_config.mode == "markdown": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=headers_to_split_on[: splitter_config.split_level], is_separator_regex=True, keep_separator=True, disallowed_special=(), ) if splitter_config.mode == "code": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)), disallowed_special=(), ) def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog): self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams} self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter) self.text_fields = config.text_fields self.metadata_fields = config.metadata_fields self.field_name_mappings = config.field_name_mappings self.logger = logging.getLogger("airbyte.document_processor") def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]: """ Generate documents from records. :param records: List of AirbyteRecordMessages :return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store) """ if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]: return [], self._extract_primary_key(record) doc = self._generate_document(record) if doc is None: text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields" raise AirbyteTracedException( internal_message="No text fields found in record", message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.", failure_type=FailureType.config_error, ) chunks = [ Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record) for chunk_document in self._split_document(doc) ] id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None return chunks, id_to_delete def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]: relevant_fields = self._extract_relevant_fields(record, self.text_fields) if len(relevant_fields) == 0: return None text = stringify_dict(relevant_fields) metadata = self._extract_metadata(record) return Document(page_content=text, metadata=metadata) def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]: relevant_fields = {} if fields and len(fields) > 0: for field in fields: values = dpath.util.values(record.data, field, separator=".") if values and len(values) > 0: relevant_fields[field] = values if len(values) > 1 else values[0] else: relevant_fields = record.data return self._remap_field_names(relevant_fields) def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]: metadata = self._extract_relevant_fields(record, self.metadata_fields) metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record) primary_key = self._extract_primary_key(record) if primary_key: metadata[METADATA_RECORD_ID_FIELD] = primary_key return metadata def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]: stream_identifier = create_stream_identifier(record) current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier] # if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup: return None primary_key = [] for key in current_stream.primary_key: try: primary_key.append(str(dpath.util.get(record.data, key))) except KeyError: primary_key.append("__not_found__") stringified_primary_key = "_".join(primary_key) return f"{stream_identifier}_{stringified_primary_key}" def _split_document(self, doc: Document) -> List[Document]: chunks: List[Document] = self.splitter.split_documents([doc]) return chunks def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]: if not self.field_name_mappings: return fields new_fields = fields.copy() for mapping in self.field_name_mappings: if mapping.from_field in new_fields: new_fields[mapping.to_field] = new_fields.pop(mapping.from_field) return new_fields
[ "langchain.document_loaders.base.Document", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.Language", "langchain.utils.stringify_dict" ]
[((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')]
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional, Tuple import dpath.util from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType from langchain.document_loaders.base import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.utils import stringify_dict METADATA_STREAM_FIELD = "_ab_stream" METADATA_RECORD_ID_FIELD = "_ab_record_id" CDC_DELETED_FIELD = "_ab_cdc_deleted_at" @dataclass class Chunk: page_content: Optional[str] metadata: Dict[str, Any] record: AirbyteRecordMessage embedding: Optional[List[float]] = None headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "] class DocumentProcessor: """ DocumentProcessor is a helper class that generates documents from Airbyte records. It is used to generate documents from records before writing them to the destination: * The text fields are extracted from the record and concatenated to a single string. * The metadata fields are extracted from the record and added to the document metadata. * The document is split into chunks of a given size using a langchain text splitter. The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly, except if you want to implement a custom writer. The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor. Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid. """ streams: Mapping[str, ConfiguredAirbyteStream] @staticmethod def check_config(config: ProcessingConfigModel) -> Optional[str]: if config.text_splitter is not None and config.text_splitter.mode == "separator": for s in config.text_splitter.separators: try: separator = json.loads(s) if not isinstance(separator, str): return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." except json.decoder.JSONDecodeError: return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." return None def _get_text_splitter( self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel] ) -> RecursiveCharacterTextSplitter: if splitter_config is None: splitter_config = SeparatorSplitterConfigModel(mode="separator") if splitter_config.mode == "separator": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=[json.loads(s) for s in splitter_config.separators], keep_separator=splitter_config.keep_separator, disallowed_special=(), ) if splitter_config.mode == "markdown": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=headers_to_split_on[: splitter_config.split_level], is_separator_regex=True, keep_separator=True, disallowed_special=(), ) if splitter_config.mode == "code": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)), disallowed_special=(), ) def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog): self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams} self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter) self.text_fields = config.text_fields self.metadata_fields = config.metadata_fields self.field_name_mappings = config.field_name_mappings self.logger = logging.getLogger("airbyte.document_processor") def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]: """ Generate documents from records. :param records: List of AirbyteRecordMessages :return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store) """ if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]: return [], self._extract_primary_key(record) doc = self._generate_document(record) if doc is None: text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields" raise AirbyteTracedException( internal_message="No text fields found in record", message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.", failure_type=FailureType.config_error, ) chunks = [ Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record) for chunk_document in self._split_document(doc) ] id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None return chunks, id_to_delete def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]: relevant_fields = self._extract_relevant_fields(record, self.text_fields) if len(relevant_fields) == 0: return None text = stringify_dict(relevant_fields) metadata = self._extract_metadata(record) return Document(page_content=text, metadata=metadata) def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]: relevant_fields = {} if fields and len(fields) > 0: for field in fields: values = dpath.util.values(record.data, field, separator=".") if values and len(values) > 0: relevant_fields[field] = values if len(values) > 1 else values[0] else: relevant_fields = record.data return self._remap_field_names(relevant_fields) def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]: metadata = self._extract_relevant_fields(record, self.metadata_fields) metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record) primary_key = self._extract_primary_key(record) if primary_key: metadata[METADATA_RECORD_ID_FIELD] = primary_key return metadata def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]: stream_identifier = create_stream_identifier(record) current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier] # if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup: return None primary_key = [] for key in current_stream.primary_key: try: primary_key.append(str(dpath.util.get(record.data, key))) except KeyError: primary_key.append("__not_found__") stringified_primary_key = "_".join(primary_key) return f"{stream_identifier}_{stringified_primary_key}" def _split_document(self, doc: Document) -> List[Document]: chunks: List[Document] = self.splitter.split_documents([doc]) return chunks def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]: if not self.field_name_mappings: return fields new_fields = fields.copy() for mapping in self.field_name_mappings: if mapping.from_field in new_fields: new_fields[mapping.to_field] = new_fields.pop(mapping.from_field) return new_fields
[ "langchain.document_loaders.base.Document", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.Language", "langchain.utils.stringify_dict" ]
[((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')]
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional, Tuple import dpath.util from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType from langchain.document_loaders.base import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.utils import stringify_dict METADATA_STREAM_FIELD = "_ab_stream" METADATA_RECORD_ID_FIELD = "_ab_record_id" CDC_DELETED_FIELD = "_ab_cdc_deleted_at" @dataclass class Chunk: page_content: Optional[str] metadata: Dict[str, Any] record: AirbyteRecordMessage embedding: Optional[List[float]] = None headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "] class DocumentProcessor: """ DocumentProcessor is a helper class that generates documents from Airbyte records. It is used to generate documents from records before writing them to the destination: * The text fields are extracted from the record and concatenated to a single string. * The metadata fields are extracted from the record and added to the document metadata. * The document is split into chunks of a given size using a langchain text splitter. The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly, except if you want to implement a custom writer. The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor. Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid. """ streams: Mapping[str, ConfiguredAirbyteStream] @staticmethod def check_config(config: ProcessingConfigModel) -> Optional[str]: if config.text_splitter is not None and config.text_splitter.mode == "separator": for s in config.text_splitter.separators: try: separator = json.loads(s) if not isinstance(separator, str): return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." except json.decoder.JSONDecodeError: return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." return None def _get_text_splitter( self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel] ) -> RecursiveCharacterTextSplitter: if splitter_config is None: splitter_config = SeparatorSplitterConfigModel(mode="separator") if splitter_config.mode == "separator": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=[json.loads(s) for s in splitter_config.separators], keep_separator=splitter_config.keep_separator, disallowed_special=(), ) if splitter_config.mode == "markdown": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=headers_to_split_on[: splitter_config.split_level], is_separator_regex=True, keep_separator=True, disallowed_special=(), ) if splitter_config.mode == "code": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)), disallowed_special=(), ) def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog): self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams} self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter) self.text_fields = config.text_fields self.metadata_fields = config.metadata_fields self.field_name_mappings = config.field_name_mappings self.logger = logging.getLogger("airbyte.document_processor") def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]: """ Generate documents from records. :param records: List of AirbyteRecordMessages :return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store) """ if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]: return [], self._extract_primary_key(record) doc = self._generate_document(record) if doc is None: text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields" raise AirbyteTracedException( internal_message="No text fields found in record", message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.", failure_type=FailureType.config_error, ) chunks = [ Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record) for chunk_document in self._split_document(doc) ] id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None return chunks, id_to_delete def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]: relevant_fields = self._extract_relevant_fields(record, self.text_fields) if len(relevant_fields) == 0: return None text = stringify_dict(relevant_fields) metadata = self._extract_metadata(record) return Document(page_content=text, metadata=metadata) def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]: relevant_fields = {} if fields and len(fields) > 0: for field in fields: values = dpath.util.values(record.data, field, separator=".") if values and len(values) > 0: relevant_fields[field] = values if len(values) > 1 else values[0] else: relevant_fields = record.data return self._remap_field_names(relevant_fields) def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]: metadata = self._extract_relevant_fields(record, self.metadata_fields) metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record) primary_key = self._extract_primary_key(record) if primary_key: metadata[METADATA_RECORD_ID_FIELD] = primary_key return metadata def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]: stream_identifier = create_stream_identifier(record) current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier] # if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup: return None primary_key = [] for key in current_stream.primary_key: try: primary_key.append(str(dpath.util.get(record.data, key))) except KeyError: primary_key.append("__not_found__") stringified_primary_key = "_".join(primary_key) return f"{stream_identifier}_{stringified_primary_key}" def _split_document(self, doc: Document) -> List[Document]: chunks: List[Document] = self.splitter.split_documents([doc]) return chunks def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]: if not self.field_name_mappings: return fields new_fields = fields.copy() for mapping in self.field_name_mappings: if mapping.from_field in new_fields: new_fields[mapping.to_field] = new_fields.pop(mapping.from_field) return new_fields
[ "langchain.document_loaders.base.Document", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.Language", "langchain.utils.stringify_dict" ]
[((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')]
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging from dataclasses import dataclass from typing import Any, Dict, List, Mapping, Optional, Tuple import dpath.util from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier from airbyte_cdk.models import AirbyteRecordMessage, ConfiguredAirbyteCatalog, ConfiguredAirbyteStream, DestinationSyncMode from airbyte_cdk.utils.traced_exception import AirbyteTracedException, FailureType from langchain.document_loaders.base import Document from langchain.text_splitter import Language, RecursiveCharacterTextSplitter from langchain.utils import stringify_dict METADATA_STREAM_FIELD = "_ab_stream" METADATA_RECORD_ID_FIELD = "_ab_record_id" CDC_DELETED_FIELD = "_ab_cdc_deleted_at" @dataclass class Chunk: page_content: Optional[str] metadata: Dict[str, Any] record: AirbyteRecordMessage embedding: Optional[List[float]] = None headers_to_split_on = ["(?:^|\n)# ", "(?:^|\n)## ", "(?:^|\n)### ", "(?:^|\n)#### ", "(?:^|\n)##### ", "(?:^|\n)###### "] class DocumentProcessor: """ DocumentProcessor is a helper class that generates documents from Airbyte records. It is used to generate documents from records before writing them to the destination: * The text fields are extracted from the record and concatenated to a single string. * The metadata fields are extracted from the record and added to the document metadata. * The document is split into chunks of a given size using a langchain text splitter. The Writer class uses the DocumentProcessor class to internally generate documents from records - in most cases you don't need to use it directly, except if you want to implement a custom writer. The config parameters specified by the ProcessingConfigModel has to be made part of the connector spec to allow the user to configure the document processor. Calling DocumentProcessor.check_config(config) will validate the config and return an error message if the config is invalid. """ streams: Mapping[str, ConfiguredAirbyteStream] @staticmethod def check_config(config: ProcessingConfigModel) -> Optional[str]: if config.text_splitter is not None and config.text_splitter.mode == "separator": for s in config.text_splitter.separators: try: separator = json.loads(s) if not isinstance(separator, str): return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." except json.decoder.JSONDecodeError: return f"Invalid separator: {s}. Separator needs to be a valid JSON string using double quotes." return None def _get_text_splitter( self, chunk_size: int, chunk_overlap: int, splitter_config: Optional[TextSplitterConfigModel] ) -> RecursiveCharacterTextSplitter: if splitter_config is None: splitter_config = SeparatorSplitterConfigModel(mode="separator") if splitter_config.mode == "separator": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=[json.loads(s) for s in splitter_config.separators], keep_separator=splitter_config.keep_separator, disallowed_special=(), ) if splitter_config.mode == "markdown": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=headers_to_split_on[: splitter_config.split_level], is_separator_regex=True, keep_separator=True, disallowed_special=(), ) if splitter_config.mode == "code": return RecursiveCharacterTextSplitter.from_tiktoken_encoder( chunk_size=chunk_size, chunk_overlap=chunk_overlap, separators=RecursiveCharacterTextSplitter.get_separators_for_language(Language(splitter_config.language)), disallowed_special=(), ) def __init__(self, config: ProcessingConfigModel, catalog: ConfiguredAirbyteCatalog): self.streams = {create_stream_identifier(stream.stream): stream for stream in catalog.streams} self.splitter = self._get_text_splitter(config.chunk_size, config.chunk_overlap, config.text_splitter) self.text_fields = config.text_fields self.metadata_fields = config.metadata_fields self.field_name_mappings = config.field_name_mappings self.logger = logging.getLogger("airbyte.document_processor") def process(self, record: AirbyteRecordMessage) -> Tuple[List[Chunk], Optional[str]]: """ Generate documents from records. :param records: List of AirbyteRecordMessages :return: Tuple of (List of document chunks, record id to delete if a stream is in dedup mode to avoid stale documents in the vector store) """ if CDC_DELETED_FIELD in record.data and record.data[CDC_DELETED_FIELD]: return [], self._extract_primary_key(record) doc = self._generate_document(record) if doc is None: text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields" raise AirbyteTracedException( internal_message="No text fields found in record", message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.", failure_type=FailureType.config_error, ) chunks = [ Chunk(page_content=chunk_document.page_content, metadata=chunk_document.metadata, record=record) for chunk_document in self._split_document(doc) ] id_to_delete = doc.metadata[METADATA_RECORD_ID_FIELD] if METADATA_RECORD_ID_FIELD in doc.metadata else None return chunks, id_to_delete def _generate_document(self, record: AirbyteRecordMessage) -> Optional[Document]: relevant_fields = self._extract_relevant_fields(record, self.text_fields) if len(relevant_fields) == 0: return None text = stringify_dict(relevant_fields) metadata = self._extract_metadata(record) return Document(page_content=text, metadata=metadata) def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]: relevant_fields = {} if fields and len(fields) > 0: for field in fields: values = dpath.util.values(record.data, field, separator=".") if values and len(values) > 0: relevant_fields[field] = values if len(values) > 1 else values[0] else: relevant_fields = record.data return self._remap_field_names(relevant_fields) def _extract_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]: metadata = self._extract_relevant_fields(record, self.metadata_fields) metadata[METADATA_STREAM_FIELD] = create_stream_identifier(record) primary_key = self._extract_primary_key(record) if primary_key: metadata[METADATA_RECORD_ID_FIELD] = primary_key return metadata def _extract_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]: stream_identifier = create_stream_identifier(record) current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier] # if the sync mode is deduping, use the primary key to upsert existing records instead of appending new ones if not current_stream.primary_key or current_stream.destination_sync_mode != DestinationSyncMode.append_dedup: return None primary_key = [] for key in current_stream.primary_key: try: primary_key.append(str(dpath.util.get(record.data, key))) except KeyError: primary_key.append("__not_found__") stringified_primary_key = "_".join(primary_key) return f"{stream_identifier}_{stringified_primary_key}" def _split_document(self, doc: Document) -> List[Document]: chunks: List[Document] = self.splitter.split_documents([doc]) return chunks def _remap_field_names(self, fields: Dict[str, Any]) -> Dict[str, Any]: if not self.field_name_mappings: return fields new_fields = fields.copy() for mapping in self.field_name_mappings: if mapping.from_field in new_fields: new_fields[mapping.to_field] = new_fields.pop(mapping.from_field) return new_fields
[ "langchain.document_loaders.base.Document", "langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder", "langchain.text_splitter.Language", "langchain.utils.stringify_dict" ]
[((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'from langchain.utils import stringify_dict\n'), ((6697, 6743), 'langchain.document_loaders.base.Document', 'Document', ([], {'page_content': 'text', 'metadata': 'metadata'}), '(page_content=text, metadata=metadata)\n', (6705, 6743), False, 'from langchain.document_loaders.base import Document\n'), ((7489, 7521), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7513, 7521), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((7799, 7831), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['record'], {}), '(record)\n', (7823, 7831), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((3160, 3206), 'airbyte_cdk.destinations.vector_db_based.config.SeparatorSplitterConfigModel', 'SeparatorSplitterConfigModel', ([], {'mode': '"""separator"""'}), "(mode='separator')\n", (3188, 3206), False, 'from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, TextSplitterConfigModel\n'), ((3674, 3923), 'langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder', 'RecursiveCharacterTextSplitter.from_tiktoken_encoder', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'separators': 'headers_to_split_on[:splitter_config.split_level]', 'is_separator_regex': '(True)', 'keep_separator': '(True)', 'disallowed_special': '()'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, separators=headers_to_split_on[:\n splitter_config.split_level], is_separator_regex=True, keep_separator=\n True, disallowed_special=())\n', (3726, 3923), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n'), ((4513, 4552), 'airbyte_cdk.destinations.vector_db_based.utils.create_stream_identifier', 'create_stream_identifier', (['stream.stream'], {}), '(stream.stream)\n', (4537, 4552), False, 'from airbyte_cdk.destinations.vector_db_based.utils import create_stream_identifier\n'), ((2542, 2555), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2552, 2555), False, 'import json\n'), ((3440, 3453), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (3450, 3453), False, 'import json\n'), ((4308, 4342), 'langchain.text_splitter.Language', 'Language', (['splitter_config.language'], {}), '(splitter_config.language)\n', (4316, 4342), False, 'from langchain.text_splitter import Language, RecursiveCharacterTextSplitter\n')]
from waifu.llm.Brain import Brain from waifu.llm.VectorDB import VectorDB from waifu.llm.SentenceTransformer import STEmbedding from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from typing import Any, List, Mapping, Optional from langchain.schema import BaseMessage import openai class GPT(Brain): def __init__(self, api_key: str, name: str, stream: bool=False, callback=None, model: str='gpt-3.5-turbo', proxy: str=''): self.llm = ChatOpenAI(openai_api_key=api_key, model_name=model, streaming=stream, callbacks=[callback], temperature=0.85) self.llm_nonstream = ChatOpenAI(openai_api_key=api_key, model_name=model) self.embedding = OpenAIEmbeddings(openai_api_key=api_key) # self.embedding = STEmbedding() self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv') if proxy != '': openai.proxy = proxy def think(self, messages: List[BaseMessage]): return self.llm(messages).content def think_nonstream(self, messages: List[BaseMessage]): return self.llm_nonstream(messages).content def store_memory(self, text: str | list): '''保存记忆 embedding''' self.vectordb.store(text) def extract_memory(self, text: str, top_n: int = 10): '''提取 top_n 条相关记忆''' return self.vectordb.query(text, top_n)
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.chat_models.ChatOpenAI" ]
[((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690), False, 'from langchain.chat_models import ChatOpenAI\n'), ((812, 864), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model'}), '(openai_api_key=api_key, model_name=model)\n', (822, 864), False, 'from langchain.chat_models import ChatOpenAI\n'), ((890, 930), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (906, 930), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((996, 1044), 'waifu.llm.VectorDB.VectorDB', 'VectorDB', (['self.embedding', 'f"""./memory/{name}.csv"""'], {}), "(self.embedding, f'./memory/{name}.csv')\n", (1004, 1044), False, 'from waifu.llm.VectorDB import VectorDB\n')]
from waifu.llm.Brain import Brain from waifu.llm.VectorDB import VectorDB from waifu.llm.SentenceTransformer import STEmbedding from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from typing import Any, List, Mapping, Optional from langchain.schema import BaseMessage import openai class GPT(Brain): def __init__(self, api_key: str, name: str, stream: bool=False, callback=None, model: str='gpt-3.5-turbo', proxy: str=''): self.llm = ChatOpenAI(openai_api_key=api_key, model_name=model, streaming=stream, callbacks=[callback], temperature=0.85) self.llm_nonstream = ChatOpenAI(openai_api_key=api_key, model_name=model) self.embedding = OpenAIEmbeddings(openai_api_key=api_key) # self.embedding = STEmbedding() self.vectordb = VectorDB(self.embedding, f'./memory/{name}.csv') if proxy != '': openai.proxy = proxy def think(self, messages: List[BaseMessage]): return self.llm(messages).content def think_nonstream(self, messages: List[BaseMessage]): return self.llm_nonstream(messages).content def store_memory(self, text: str | list): '''保存记忆 embedding''' self.vectordb.store(text) def extract_memory(self, text: str, top_n: int = 10): '''提取 top_n 条相关记忆''' return self.vectordb.query(text, top_n)
[ "langchain.embeddings.OpenAIEmbeddings", "langchain.chat_models.ChatOpenAI" ]
[((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690), False, 'from langchain.chat_models import ChatOpenAI\n'), ((812, 864), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model'}), '(openai_api_key=api_key, model_name=model)\n', (822, 864), False, 'from langchain.chat_models import ChatOpenAI\n'), ((890, 930), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (906, 930), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((996, 1044), 'waifu.llm.VectorDB.VectorDB', 'VectorDB', (['self.embedding', 'f"""./memory/{name}.csv"""'], {}), "(self.embedding, f'./memory/{name}.csv')\n", (1004, 1044), False, 'from waifu.llm.VectorDB import VectorDB\n')]
from time import sleep import copy import redis import json import pickle import traceback from flask import Response, request, stream_with_context from typing import Dict, Union import os from langchain.schema import HumanMessage, SystemMessage from backend.api.language_model import get_llm from backend.main import app, message_id_register, message_pool, logger from backend.utils.streaming import single_round_chat_with_agent_streaming from backend.schemas import OVERLOAD, NEED_CONTINUE_MODEL from backend.schemas import DEFAULT_USER_ID from real_agents.adapters.llm import BaseLanguageModel from real_agents.adapters.agent_helpers import AgentExecutor, Tool from real_agents.adapters.callbacks.agent_streaming import \ AgentStreamingStdOutCallbackHandler from real_agents.adapters.models import ChatOpenAI from real_agents.adapters.memory import ConversationReActBufferMemory from real_agents.adapters.data_model import DataModel, JsonDataModel from real_agents.adapters.interactive_executor import initialize_webot_agent from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor r = redis.Redis(host=os.getenv("REDIS_SERVER"), port=6379, db=0) # adjust host/port/db as needed # here webot and webot_status are stored in redis since the two global variable can not be modified and accessed normally in multiprocess # fixme:now webot is stored without message_id or chat_id info, so it can only be used for one chat at a time # fixme:now webot_status is stored with chat_id info, if the status is not reset after a message ended abnormally e.g. the message is interrupted, it will be reused wrongly for the next chat def get_webot_from_redis(user_id: str, chat_id: str, ) -> WebBrowsingExecutor: data = r.get(f'webot_{user_id}_{chat_id}') if data is not None: webot = pickle.loads(data) else: # initialize a webot with None instrucition if webot does not exist webot = WebBrowsingExecutor(None) save_webot_to_redis(user_id, chat_id, webot) return webot def save_webot_to_redis(user_id: str, chat_id: str, webot: WebBrowsingExecutor, ): r.set(f'webot_{user_id}_{chat_id}', pickle.dumps(webot)) def get_webot_status_from_redis(user_id: str, chat_id: str): webot_status_json = r.get(f'webot_status_{user_id}_{chat_id}') if webot_status_json is not None: webot_status = json.loads(webot_status_json) return webot_status else: return {} def save_webot_status_to_redis(user_id: str, chat_id: str, webot_status: Dict): r.set(f'webot_status_{user_id}_{chat_id}', json.dumps(webot_status)) def reset_webot(user_id: str, chat_id: str): webot = WebBrowsingExecutor(None) save_webot_to_redis(user_id, chat_id, webot) def reset_webot_status(user_id: str, chat_id: str): webot_status = {"webot_status": "idle", "url": None} save_webot_status_to_redis(user_id, chat_id, webot_status) # this function has been deprecated def get_plan(instruction: str, start_url: str, chat_llm: ChatOpenAI): # fixme: Move this into a separate chain or executors to decompose the LLMs system_message = f""" You are a planner to assist another browser automation assistant. Here is the instruction for the other assistant: ``` You MUST take one of the following actions. NEVER EVER EVER make up actions that do not exist: 1. click(element): Clicks on an element 2. setValue(element, value: string): Focuses on and sets the value of an input element 3. finish(): Indicates the task is finished 4. fail(): Indicates that you are unable to complete the task You will be be given a task to perform and the current state of the DOM. You will also be given previous actions that you have taken. You may retry a failed action up to one time. This is an example of an action: <Thought>I should click the add to cart button</Thought> <Action>click(223)</Action> You MUST always include the <Thought> and <Action> open/close tags or else your response will be marked as invalid. Rules you MUST follow: 1. You must only take one step at a time. You cannot take multiple actions in a single response. 2. You should not consider the action to present the result to the user. You only need to do available actions. If info in current page is enough for the user to solve the problem, you should finish. ``` Now your responsibility is to give a step-by-step plan according to user's instruction. This plan will be given to the assistant as a reference when it is performing tasks. """.strip() human_message = f""" The user requests the following task: {instruction} Now you are at {start_url} Provide a plan to do this (you can use pseudo description as below to describe the item). Here is an example case: request: Go to google calendar to schedule a meeting current url: "https://google.com" example plan: 1. setValue(searchBar, "google calendar") 2. click(search) 3. click(the item with title of google calendar) 4.1 if user has loginned do nothing 4.2 if user hasn't loginned do login 5. click(create event button) 6. setValue(event title input bar, "meeting") 7. click(save event button) 8. finish() """.strip() messages = [SystemMessage(content=system_message), HumanMessage(content=human_message)] response = chat_llm(messages).content return response def create_webot_interaction_executor( llm: BaseLanguageModel, llm_name: str, user_id: str, chat_id: str ) -> AgentExecutor: """Creates an agent executor for interaction. Args: llm: A llm model. llm_name: A string llm name. user_id: A string of user id. chat_id: A string chat id. Returns: An agent executor. """ # Initialize memory memory = ConversationReActBufferMemory(memory_key="chat_history", return_messages=True, max_token_limit=10000) class RunWebot: def __init__(self, webot: WebotExecutor, llm: BaseLanguageModel, user_id: str, chat_id: str): self.llm = llm self.webot = webot self.user_id = user_id self.chat_id = chat_id def run(self, term: str) -> Union[str, Dict, DataModel]: try: user_id = self.user_id chat_id = self.chat_id reset_webot(user_id=user_id, chat_id=chat_id) reset_webot_status(user_id=user_id, chat_id=chat_id) raw_observation = self.webot.run(user_intent=term, llm=self.llm) instruction, start_url = raw_observation["instruction"], \ raw_observation["start_url"] webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) webot.instruction = instruction # webot.plan = get_plan(instruction, start_url) webot.plan = "" save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot) webot_status = { "webot_status": "running", "url": start_url } save_webot_status_to_redis(user_id=user_id, chat_id=chat_id, webot_status=webot_status) while True: webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) if webot.finish or webot.interrupt or webot.error or webot.fail: break else: sleep(0.5) save_webot_status_to_redis(user_id=user_id, chat_id=chat_id, webot_status={"webot_status": "idle", "url": None}) webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) webot.instruction = None save_webot_to_redis(user_id=user_id, chat_id=chat_id, webot=webot) if webot.finish: webot = get_webot_from_redis(user_id=user_id, chat_id=chat_id) action_history = webot.action_history last_page = webot.pages_viewed[-1] observation = JsonDataModel.from_raw_data( { "success": True, "result": json.dumps({"action_history": action_history, "last_page": last_page}, indent=4), "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.fail: observation = JsonDataModel.from_raw_data( { "success": True, "result": "The webot failed to execute the instruction.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.interrupt: observation = JsonDataModel.from_raw_data( { "success": False, "result": "The web browsing is interrupted by user.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation if webot.error: observation = JsonDataModel.from_raw_data( { "success": False, "result": "Error occurs during web browsing.", "intermediate_steps": json.dumps( {"instruction": instruction, "start_url": start_url}, indent=4) } ) return observation except Exception as e: print(traceback.format_exc()) observation = JsonDataModel.from_raw_data( { "success": False, "result": f"Failed in web browsing with the input: {term}, please try again later.", "intermediate_steps": json.dumps({"error": str(e)}) } ) return observation webot = WebotExecutor.from_webot() llm = copy.deepcopy(llm) run_webot = RunWebot(webot, llm, chat_id=chat_id, user_id=user_id) tools = [Tool(name=webot.name, func=run_webot.run, description=webot.description)] continue_model = llm_name if llm_name in NEED_CONTINUE_MODEL else None interaction_executor = initialize_webot_agent( tools, llm, continue_model, memory=memory, verbose=True ) return interaction_executor @app.route("/api/chat_xlang_webot", methods=["POST"]) def chat_xlang_webot() -> Dict: """Returns the chat response of web agent.""" try: # Get request parameters request_json = request.get_json() user_id = request_json.pop("user_id", DEFAULT_USER_ID) chat_id = request_json["chat_id"] user_intent = request_json["user_intent"] parent_message_id = request_json["parent_message_id"] llm_name = request_json["llm_name"] temperature = request_json.get("temperature", 0.4) stop_words = ["[RESPONSE_BEGIN]", "TOOL RESPONSE"] kwargs = { "temperature": temperature, "stop": stop_words, } # Get language model llm = get_llm(llm_name, **kwargs) logger.bind(user_id=user_id, chat_id=chat_id, api="/chat", msg_head="Request json").debug(request_json) human_message_id = message_id_register.add_variable(user_intent) ai_message_id = message_id_register.add_variable("") stream_handler = AgentStreamingStdOutCallbackHandler() # Build executor and run chat # reset webot and status reset_webot(user_id=user_id, chat_id=chat_id) reset_webot_status(user_id=user_id, chat_id=chat_id) interaction_executor = create_webot_interaction_executor( llm=llm, llm_name=llm_name, chat_id=chat_id, user_id=user_id ) activated_message_list = message_pool.get_activated_message_list(user_id, chat_id, list(), parent_message_id) message_pool.load_agent_memory_from_list(interaction_executor.memory, activated_message_list) return stream_with_context( Response( single_round_chat_with_agent_streaming( interaction_executor=interaction_executor, user_intent=user_intent, human_message_id=human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id=chat_id, message_list=activated_message_list, parent_message_id=parent_message_id, stream_handler=stream_handler, llm_name=llm_name, app_type="webot", ), content_type="application/json", ) ) except Exception as e: import traceback traceback.print_exc() return Response(response=None, status=f"{OVERLOAD} backend is currently overloaded")
[ "langchain.schema.SystemMessage", "langchain.schema.HumanMessage" ]
[((11305, 11357), 'backend.main.app.route', 'app.route', (['"""/api/chat_xlang_webot"""'], {'methods': "['POST']"}), "('/api/chat_xlang_webot', methods=['POST'])\n", (11314, 11357), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((2664, 2689), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (2683, 2689), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((5769, 5875), 'real_agents.adapters.memory.ConversationReActBufferMemory', 'ConversationReActBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(10000)'}), "(memory_key='chat_history', return_messages=\n True, max_token_limit=10000)\n", (5798, 5875), False, 'from real_agents.adapters.memory import ConversationReActBufferMemory\n'), ((10859, 10885), 'real_agents.web_agent.WebotExecutor.from_webot', 'WebotExecutor.from_webot', ([], {}), '()\n', (10883, 10885), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((10896, 10914), 'copy.deepcopy', 'copy.deepcopy', (['llm'], {}), '(llm)\n', (10909, 10914), False, 'import copy\n'), ((11176, 11255), 'real_agents.adapters.interactive_executor.initialize_webot_agent', 'initialize_webot_agent', (['tools', 'llm', 'continue_model'], {'memory': 'memory', 'verbose': '(True)'}), '(tools, llm, continue_model, memory=memory, verbose=True)\n', (11198, 11255), False, 'from real_agents.adapters.interactive_executor import initialize_webot_agent\n'), ((1125, 1150), 'os.getenv', 'os.getenv', (['"""REDIS_SERVER"""'], {}), "('REDIS_SERVER')\n", (1134, 1150), False, 'import os\n'), ((1810, 1828), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (1822, 1828), False, 'import pickle\n'), ((1931, 1956), 'real_agents.web_agent.WebBrowsingExecutor', 'WebBrowsingExecutor', (['None'], {}), '(None)\n', (1950, 1956), False, 'from real_agents.web_agent import WebBrowsingExecutor, WebotExecutor\n'), ((2152, 2171), 'pickle.dumps', 'pickle.dumps', (['webot'], {}), '(webot)\n', (2164, 2171), False, 'import pickle\n'), ((2364, 2393), 'json.loads', 'json.loads', (['webot_status_json'], {}), '(webot_status_json)\n', (2374, 2393), False, 'import json\n'), ((2579, 2603), 'json.dumps', 'json.dumps', (['webot_status'], {}), '(webot_status)\n', (2589, 2603), False, 'import json\n'), ((5172, 5209), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (5185, 5209), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((5227, 5262), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_message'}), '(content=human_message)\n', (5239, 5262), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((10999, 11071), 'real_agents.adapters.agent_helpers.Tool', 'Tool', ([], {'name': 'webot.name', 'func': 'run_webot.run', 'description': 'webot.description'}), '(name=webot.name, func=run_webot.run, description=webot.description)\n', (11003, 11071), False, 'from real_agents.adapters.agent_helpers import AgentExecutor, Tool\n'), ((11505, 11523), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (11521, 11523), False, 'from flask import Response, request, stream_with_context\n'), ((12048, 12075), 'backend.api.language_model.get_llm', 'get_llm', (['llm_name'], {}), '(llm_name, **kwargs)\n', (12055, 12075), False, 'from backend.api.language_model import get_llm\n'), ((12237, 12282), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['user_intent'], {}), '(user_intent)\n', (12269, 12282), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12307, 12343), 'backend.main.message_id_register.add_variable', 'message_id_register.add_variable', (['""""""'], {}), "('')\n", (12339, 12343), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((12370, 12407), 'real_agents.adapters.callbacks.agent_streaming.AgentStreamingStdOutCallbackHandler', 'AgentStreamingStdOutCallbackHandler', ([], {}), '()\n', (12405, 12407), False, 'from real_agents.adapters.callbacks.agent_streaming import AgentStreamingStdOutCallbackHandler\n'), ((13127, 13224), 'backend.main.message_pool.load_agent_memory_from_list', 'message_pool.load_agent_memory_from_list', (['interaction_executor.memory', 'activated_message_list'], {}), '(interaction_executor.memory,\n activated_message_list)\n', (13167, 13224), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((14066, 14087), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (14085, 14087), False, 'import traceback\n'), ((14103, 14180), 'flask.Response', 'Response', ([], {'response': 'None', 'status': 'f"""{OVERLOAD} backend is currently overloaded"""'}), "(response=None, status=f'{OVERLOAD} backend is currently overloaded')\n", (14111, 14180), False, 'from flask import Response, request, stream_with_context\n'), ((12085, 12173), 'backend.main.logger.bind', 'logger.bind', ([], {'user_id': 'user_id', 'chat_id': 'chat_id', 'api': '"""/chat"""', 'msg_head': '"""Request json"""'}), "(user_id=user_id, chat_id=chat_id, api='/chat', msg_head=\n 'Request json')\n", (12096, 12173), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((13344, 13714), 'backend.utils.streaming.single_round_chat_with_agent_streaming', 'single_round_chat_with_agent_streaming', ([], {'interaction_executor': 'interaction_executor', 'user_intent': 'user_intent', 'human_message_id': 'human_message_id', 'ai_message_id': 'ai_message_id', 'user_id': 'user_id', 'chat_id': 'chat_id', 'message_list': 'activated_message_list', 'parent_message_id': 'parent_message_id', 'stream_handler': 'stream_handler', 'llm_name': 'llm_name', 'app_type': '"""webot"""'}), "(interaction_executor=\n interaction_executor, user_intent=user_intent, human_message_id=\n human_message_id, ai_message_id=ai_message_id, user_id=user_id, chat_id\n =chat_id, message_list=activated_message_list, parent_message_id=\n parent_message_id, stream_handler=stream_handler, llm_name=llm_name,\n app_type='webot')\n", (13382, 13714), False, 'from backend.utils.streaming import single_round_chat_with_agent_streaming\n'), ((7547, 7557), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (7552, 7557), False, 'from time import sleep\n'), ((10439, 10461), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10459, 10461), False, 'import traceback\n'), ((8392, 8477), 'json.dumps', 'json.dumps', (["{'action_history': action_history, 'last_page': last_page}"], {'indent': '(4)'}), "({'action_history': action_history, 'last_page': last_page}, indent=4\n )\n", (8402, 8477), False, 'import json\n'), ((8574, 8648), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (8584, 8648), False, 'import json\n'), ((9103, 9177), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9113, 9177), False, 'import json\n'), ((9634, 9708), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (9644, 9708), False, 'import json\n'), ((10154, 10228), 'json.dumps', 'json.dumps', (["{'instruction': instruction, 'start_url': start_url}"], {'indent': '(4)'}), "({'instruction': instruction, 'start_url': start_url}, indent=4)\n", (10164, 10228), False, 'import json\n')]
from langchain.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo") def get_summary_chain() -> LLMChain: summary_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 1. a short summary 2. two interesting facts about them \n{format_instructions} """ summary_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=summary_template, partial_variables={ "format_instructions": summary_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=summary_prompt_template) def get_interests_chain() -> LLMChain: interesting_facts_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 3 topics that might interest them \n{format_instructions} """ interesting_facts_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=interesting_facts_template, partial_variables={ "format_instructions": topics_of_interest_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=interesting_facts_prompt_template) def get_ice_breaker_chain() -> LLMChain: ice_breaker_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets \n{format_instructions} """ ice_breaker_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=ice_breaker_template, partial_variables={ "format_instructions": ice_breaker_parser.get_format_instructions() }, ) return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
[ "langchain.chains.LLMChain", "langchain_openai.ChatOpenAI" ]
[((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')]
from langchain.chains import LLMChain from langchain_core.prompts import PromptTemplate from langchain_openai import ChatOpenAI from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") llm_creative = ChatOpenAI(temperature=1, model_name="gpt-3.5-turbo") def get_summary_chain() -> LLMChain: summary_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 1. a short summary 2. two interesting facts about them \n{format_instructions} """ summary_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=summary_template, partial_variables={ "format_instructions": summary_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=summary_prompt_template) def get_interests_chain() -> LLMChain: interesting_facts_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 3 topics that might interest them \n{format_instructions} """ interesting_facts_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=interesting_facts_template, partial_variables={ "format_instructions": topics_of_interest_parser.get_format_instructions() }, ) return LLMChain(llm=llm, prompt=interesting_facts_prompt_template) def get_ice_breaker_chain() -> LLMChain: ice_breaker_template = """ given the information about a person from linkedin {information}, and twitter posts {twitter_posts} I want you to create: 2 creative Ice breakers with them that are derived from their activity on Linkedin and twitter, preferably on latest tweets \n{format_instructions} """ ice_breaker_prompt_template = PromptTemplate( input_variables=["information", "twitter_posts"], template=ice_breaker_template, partial_variables={ "format_instructions": ice_breaker_parser.get_format_instructions() }, ) return LLMChain(llm=llm_creative, prompt=ice_breaker_prompt_template)
[ "langchain.chains.LLMChain", "langchain_openai.ChatOpenAI" ]
[((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(1)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=1, model_name='gpt-3.5-turbo')\n", (304, 347), False, 'from langchain_openai import ChatOpenAI\n'), ((933, 982), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'summary_prompt_template'}), '(llm=llm, prompt=summary_prompt_template)\n', (941, 982), False, 'from langchain.chains import LLMChain\n'), ((1580, 1639), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'interesting_facts_prompt_template'}), '(llm=llm, prompt=interesting_facts_prompt_template)\n', (1588, 1639), False, 'from langchain.chains import LLMChain\n'), ((2304, 2366), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm_creative', 'prompt': 'ice_breaker_prompt_template'}), '(llm=llm_creative, prompt=ice_breaker_prompt_template)\n', (2312, 2366), False, 'from langchain.chains import LLMChain\n'), ((863, 903), 'output_parsers.summary_parser.get_format_instructions', 'summary_parser.get_format_instructions', ([], {}), '()\n', (901, 903), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((1499, 1550), 'output_parsers.topics_of_interest_parser.get_format_instructions', 'topics_of_interest_parser.get_format_instructions', ([], {}), '()\n', (1548, 1550), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n'), ((2230, 2274), 'output_parsers.ice_breaker_parser.get_format_instructions', 'ice_breaker_parser.get_format_instructions', ([], {}), '()\n', (2272, 2274), False, 'from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser\n')]
import asyncio import uvicorn from typing import AsyncIterable, Awaitable from dotenv import load_dotenv from fastapi import FastAPI from fastapi.responses import FileResponse, StreamingResponse from langchain.callbacks import AsyncIteratorCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage load_dotenv() async def wait_done(fn: Awaitable, event: asyncio.Event): try: await fn except Exception as e: print(e) event.set() finally: event.set() async def call_openai(question: str) -> AsyncIterable[str]: callback = AsyncIteratorCallbackHandler() model = ChatOpenAI(streaming=True, verbose=True, callbacks=[callback]) coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done) task = asyncio.create_task(coroutine) async for token in callback.aiter(): yield f"{token}" await task app = FastAPI() @app.post("/ask") def ask(body: dict): return StreamingResponse(call_openai(body['question']), media_type="text/event-stream") @app.get("/") async def homepage(): return FileResponse('statics/index.html') if __name__ == "__main__": uvicorn.run(host="0.0.0.0", port=8888, app=app)
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (644, 646), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((659, 721), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'callbacks': '[callback]'}), '(streaming=True, verbose=True, callbacks=[callback])\n', (669, 721), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 867), 'asyncio.create_task', 'asyncio.create_task', (['coroutine'], {}), '(coroutine)\n', (856, 867), False, 'import asyncio\n'), ((1149, 1183), 'fastapi.responses.FileResponse', 'FileResponse', (['"""statics/index.html"""'], {}), "('statics/index.html')\n", (1161, 1183), False, 'from fastapi.responses import FileResponse, StreamingResponse\n'), ((1216, 1263), 'uvicorn.run', 'uvicorn.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8888)', 'app': 'app'}), "(host='0.0.0.0', port=8888, app=app)\n", (1227, 1263), False, 'import uvicorn\n'), ((776, 806), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'question'}), '(content=question)\n', (788, 806), False, 'from langchain.schema import HumanMessage\n')]
import asyncio import uvicorn from typing import AsyncIterable, Awaitable from dotenv import load_dotenv from fastapi import FastAPI from fastapi.responses import FileResponse, StreamingResponse from langchain.callbacks import AsyncIteratorCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.schema import HumanMessage load_dotenv() async def wait_done(fn: Awaitable, event: asyncio.Event): try: await fn except Exception as e: print(e) event.set() finally: event.set() async def call_openai(question: str) -> AsyncIterable[str]: callback = AsyncIteratorCallbackHandler() model = ChatOpenAI(streaming=True, verbose=True, callbacks=[callback]) coroutine = wait_done(model.agenerate(messages=[[HumanMessage(content=question)]]), callback.done) task = asyncio.create_task(coroutine) async for token in callback.aiter(): yield f"{token}" await task app = FastAPI() @app.post("/ask") def ask(body: dict): return StreamingResponse(call_openai(body['question']), media_type="text/event-stream") @app.get("/") async def homepage(): return FileResponse('statics/index.html') if __name__ == "__main__": uvicorn.run(host="0.0.0.0", port=8888, app=app)
[ "langchain.callbacks.AsyncIteratorCallbackHandler", "langchain.schema.HumanMessage", "langchain.chat_models.ChatOpenAI" ]
[((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCallbackHandler', ([], {}), '()\n', (644, 646), False, 'from langchain.callbacks import AsyncIteratorCallbackHandler\n'), ((659, 721), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'verbose': '(True)', 'callbacks': '[callback]'}), '(streaming=True, verbose=True, callbacks=[callback])\n', (669, 721), False, 'from langchain.chat_models import ChatOpenAI\n'), ((837, 867), 'asyncio.create_task', 'asyncio.create_task', (['coroutine'], {}), '(coroutine)\n', (856, 867), False, 'import asyncio\n'), ((1149, 1183), 'fastapi.responses.FileResponse', 'FileResponse', (['"""statics/index.html"""'], {}), "('statics/index.html')\n", (1161, 1183), False, 'from fastapi.responses import FileResponse, StreamingResponse\n'), ((1216, 1263), 'uvicorn.run', 'uvicorn.run', ([], {'host': '"""0.0.0.0"""', 'port': '(8888)', 'app': 'app'}), "(host='0.0.0.0', port=8888, app=app)\n", (1227, 1263), False, 'import uvicorn\n'), ((776, 806), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'question'}), '(content=question)\n', (788, 806), False, 'from langchain.schema import HumanMessage\n')]
# -*- coding: UTF-8 -*- """ @Project : AI-Vtuber @File : claude_model.py @Author : HildaM @Email : [email protected] @Date : 2023/06/17 下午 4:44 @Description : 本地向量数据库模型设置 """ from langchain.embeddings import HuggingFaceEmbeddings import os # 项目根路径 TEC2VEC_MODELS_PATH = os.getcwd() + "\\" + "data" + "\\" + "text2vec_models" "\\" # 默认模型 DEFAULT_MODEL_NAME = "sebastian-hofstaetter_distilbert-dot-tas_b-b256-msmarco" def get_default_model(): return HuggingFaceEmbeddings(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME) def get_text2vec_model(model_name): """ 0. 判空。若为空,加载内置模型 1. 先判断项目data/tec2vec_models目录中是否存在模型 2. 存在则直接加载 3. 不存在,则从Huggingface中下载到本地,保存在系统cache中 """ if model_name is None: return HuggingFaceEmbeddings(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME) model_path = TEC2VEC_MODELS_PATH + model_name if os.path.exists(model_path): return HuggingFaceEmbeddings(model_name=model_path) else: return HuggingFaceEmbeddings(model_name=model_name)
[ "langchain.embeddings.HuggingFaceEmbeddings" ]
[((468, 542), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)'}), '(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)\n', (489, 542), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((908, 934), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (922, 934), False, 'import os\n'), ((775, 849), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)'}), '(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)\n', (796, 849), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((951, 995), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_path'}), '(model_name=model_path)\n', (972, 995), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((1021, 1065), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1042, 1065), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((283, 294), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (292, 294), False, 'import os\n')]
""" Adapted from https://github.com/QwenLM/Qwen-7B/blob/main/examples/react_demo.py """ import json import os from langchain.llms import OpenAI llm = OpenAI( model_name="qwen", temperature=0, openai_api_base="http://192.168.0.53:7891/v1", openai_api_key="xxx", ) # 将一个插件的关键信息拼接成一段文本的模版。 TOOL_DESC = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters}""" # ReAct prompting 的 instruction 模版,将包含插件的详细信息。 PROMPT_REACT = """Answer the following questions as best you can. You have access to the following tools: {tools_text} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tools_name_text}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Final Answer: the final answer to the original input question Begin! Question: {query}""" def llm_with_plugin(prompt: str, history, list_of_plugin_info=()): """ Args: prompt: 用户的最新一个问题。 history: 用户与模型的对话历史,是一个 list, list 中的每个元素为 {"user": "用户输入", "bot": "模型输出"} 的一轮对话。 最新的一轮对话放 list 末尾。不包含最新一个问题。 list_of_plugin_info: 候选插件列表,是一个 list,list 中的每个元素为一个插件的关键信息。 比如 list_of_plugin_info = [plugin_info_0, plugin_info_1, plugin_info_2], 其中 plugin_info_0, plugin_info_1, plugin_info_2 这几个样例见本文档前文。 Returns: 模型对用户最新一个问题的回答。 """ chat_history = [(x["user"], x["bot"]) for x in history] + [(prompt, '')] # 需要让模型进行续写的初始文本 planning_prompt = build_input_text(chat_history, list_of_plugin_info) text = "" while True: output = text_completion(planning_prompt + text, stop_words=["Observation:", "Observation:\n"]) action, action_input, output = parse_latest_plugin_call(output) if action: # 需要调用插件 # action、action_input 分别为需要调用的插件代号、输入参数 # observation是插件返回的结果,为字符串 observation = call_plugin(action, action_input) output += f"\nObservation: {observation}\nThought:" text += output else: # 生成结束,并且不再需要调用插件 text += output break new_history = [] new_history.extend(history) new_history.append({"user": prompt, "bot": text}) return text, new_history def build_input_text(chat_history, list_of_plugin_info) -> str: """ 将对话历史、插件信息聚合成一段初始文本 """ tools_text = [] for plugin_info in list_of_plugin_info: tool = TOOL_DESC.format( name_for_model=plugin_info["name_for_model"], name_for_human=plugin_info["name_for_human"], description_for_model=plugin_info["description_for_model"], parameters=json.dumps(plugin_info["parameters"], ensure_ascii=False), ) if plugin_info.get("args_format", "json") == "json": tool += " Format the arguments as a JSON object." elif plugin_info['args_format'] == 'code': tool += " Enclose the code within triple backticks (`) at the beginning and end of the code." else: raise NotImplementedError tools_text.append(tool) tools_text = '\n\n'.join(tools_text) # 候选插件的代号 tools_name_text = ", ".join([plugin_info["name_for_model"] for plugin_info in list_of_plugin_info]) im_start = "<|im_start|>" im_end = "<|im_end|>" prompt = f"{im_start}system\nYou are a helpful assistant.{im_end}" for i, (query, response) in enumerate(chat_history): if list_of_plugin_info: # 如果有候选插件 # 倒数第一轮或倒数第二轮对话填入详细的插件信息,但具体什么位置填可以自行判断 if (len(chat_history) == 1) or (i == len(chat_history) - 2): query = PROMPT_REACT.format( tools_text=tools_text, tools_name_text=tools_name_text, query=query, ) query = query.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。 response = response.lstrip("\n").rstrip() # 重要!若不 strip 会与训练时数据的构造方式产生差异。 # 使用续写模式(text completion)时,需要用如下格式区分用户和AI: prompt += f"\n{im_start}user\n{query}{im_end}" prompt += f"\n{im_start}assistant\n{response}{im_end}" assert prompt.endswith(f"\n{im_start}assistant\n{im_end}") prompt = prompt[: -len(f"{im_end}")] return prompt def text_completion(input_text: str, stop_words) -> str: # 作为一个文本续写模型来使用 im_end = "<|im_end|>" if im_end not in stop_words: stop_words = stop_words + [im_end] return llm(input_text, stop=stop_words) # 续写 input_text 的结果,不包含 input_text 的内容 def parse_latest_plugin_call(text): plugin_name, plugin_args = "", "" i = text.rfind("\nAction:") j = text.rfind("\nAction Input:") k = text.rfind("\nObservation:") if 0 <= i < j: # If the text has `Action` and `Action input`, if k < j: # but does not contain `Observation`, # then it is likely that `Observation` is ommited by the LLM, # because the output text may have discarded the stop word. text = text.rstrip() + "\nObservation:" # Add it back. k = text.rfind("\nObservation:") plugin_name = text[i + len("\nAction:"): j].strip() plugin_args = text[j + len("\nAction Input:"): k].strip() text = text[:k] return plugin_name, plugin_args, text def call_plugin(plugin_name: str, plugin_args: str) -> str: """ 请开发者自行完善这部分内容。这里的参考实现仅是 demo 用途,非生产用途 """ if plugin_name == "google_search": # 使用 SerpAPI 需要在这里填入您的 SERPAPI_API_KEY! os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY", default="") from langchain import SerpAPIWrapper return SerpAPIWrapper().run(json.loads(plugin_args)["search_query"]) elif plugin_name == "image_gen": import urllib.parse prompt = json.loads(plugin_args)["prompt"] prompt = urllib.parse.quote(prompt) return json.dumps({"image_url": f"https://image.pollinations.ai/prompt/{prompt}"}, ensure_ascii=False) else: raise NotImplementedError def test(): tools = [ { "name_for_human": "谷歌搜索", "name_for_model": "google_search", "description_for_model": "谷歌搜索是一个通用搜索引擎,可用于访问互联网、查询百科知识、了解时事新闻等。", "parameters": [ { "name": "search_query", "description": "搜索关键词或短语", "required": True, "schema": {"type": "string"}, } ], }, { "name_for_human": "文生图", "name_for_model": "image_gen", "description_for_model": "文生图是一个AI绘画(图像生成)服务,输入文本描述,返回根据文本作画得到的图片的URL", "parameters": [ { "name": "prompt", "description": "英文关键词,描述了希望图像具有什么内容", "required": True, "schema": {"type": "string"}, } ], }, ] history = [] for query in ["你好", "谁是周杰伦", "他老婆是谁", "给我画个可爱的小猫吧,最好是黑猫"]: print(f"User's Query:\n{query}\n") response, history = llm_with_plugin(prompt=query, history=history, list_of_plugin_info=tools) print(f"Qwen's Response:\n{response}\n") if __name__ == "__main__": test() """如果执行成功,在终端下应当能看到如下输出: User's Query: 你好 Qwen's Response: Thought: 提供的工具对回答该问题帮助较小,我将不使用工具直接作答。 Final Answer: 你好!很高兴见到你。有什么我可以帮忙的吗? User's Query: 谁是周杰伦 Qwen's Response: Thought: 我应该使用Google搜索查找相关信息。 Action: google_search Action Input: {"search_query": "周杰伦"} Observation: Jay Chou is a Taiwanese singer, songwriter, record producer, rapper, actor, television personality, and businessman. Thought: I now know the final answer. Final Answer: 周杰伦(Jay Chou)是一位来自台湾的歌手、词曲创作人、音乐制作人、说唱歌手、演员、电视节目主持人和企业家。他以其独特的音乐风格和才华在华语乐坛享有很高的声誉。 User's Query: 他老婆是谁 Qwen's Response: Thought: 我应该使用Google搜索查找相关信息。 Action: google_search Action Input: {"search_query": "周杰伦 老婆"} Observation: Hannah Quinlivan Thought: I now know the final answer. Final Answer: 周杰伦的老婆是Hannah Quinlivan,她是一位澳大利亚籍的模特和演员。两人于2015年结婚,并育有一子。 User's Query: 给我画个可爱的小猫吧,最好是黑猫 Qwen's Response: Thought: 我应该使用文生图API来生成一张可爱的小猫图片。 Action: image_gen Action Input: {"prompt": "cute black cat"} Observation: {"image_url": "https://image.pollinations.ai/prompt/cute%20black%20cat"} Thought: I now know the final answer. Final Answer: 生成的可爱小猫图片的URL为https://image.pollinations.ai/prompt/cute%20black%20cat。你可以点击这个链接查看图片。 """
[ "langchain.SerpAPIWrapper", "langchain.llms.OpenAI" ]
[((153, 267), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""qwen"""', 'temperature': '(0)', 'openai_api_base': '"""http://192.168.0.53:7891/v1"""', 'openai_api_key': '"""xxx"""'}), "(model_name='qwen', temperature=0, openai_api_base=\n 'http://192.168.0.53:7891/v1', openai_api_key='xxx')\n", (159, 267), False, 'from langchain.llms import OpenAI\n'), ((5778, 5818), 'os.getenv', 'os.getenv', (['"""SERPAPI_API_KEY"""'], {'default': '""""""'}), "('SERPAPI_API_KEY', default='')\n", (5787, 5818), False, 'import os\n'), ((6118, 6217), 'json.dumps', 'json.dumps', (["{'image_url': f'https://image.pollinations.ai/prompt/{prompt}'}"], {'ensure_ascii': '(False)'}), "({'image_url': f'https://image.pollinations.ai/prompt/{prompt}'},\n ensure_ascii=False)\n", (6128, 6217), False, 'import json\n'), ((2933, 2990), 'json.dumps', 'json.dumps', (["plugin_info['parameters']"], {'ensure_ascii': '(False)'}), "(plugin_info['parameters'], ensure_ascii=False)\n", (2943, 2990), False, 'import json\n'), ((5880, 5896), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (5894, 5896), False, 'from langchain import SerpAPIWrapper\n'), ((5901, 5924), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (5911, 5924), False, 'import json\n'), ((6025, 6048), 'json.loads', 'json.loads', (['plugin_args'], {}), '(plugin_args)\n', (6035, 6048), False, 'import json\n')]
"""Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Cohere APIs.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional from pydantic import Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]: import cohere min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=(retry_if_exception_type(cohere.error.CohereError)), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.generate(**kwargs) return _completion_with_retry(**kwargs) class Cohere(LLM): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" max_retries: int = 10 """Maximum number of retries to make when generating.""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ImportError( "Could not import cohere python package. " "Please install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""cohere_api_key"""', '"""COHERE_API_KEY"""'], {}), "(values, 'cohere_api_key', 'COHERE_API_KEY')\n", (3215, 3259), False, 'from langchain.utils import get_from_dict_or_env\n'), ((866, 901), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['llm.max_retries'], {}), '(llm.max_retries)\n', (884, 901), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((916, 980), 'tenacity.wait_exponential', 'wait_exponential', ([], {'multiplier': '(1)', 'min': 'min_seconds', 'max': 'max_seconds'}), '(multiplier=1, min=min_seconds, max=max_seconds)\n', (932, 980), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((997, 1046), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['cohere.error.CohereError'], {}), '(cohere.error.CohereError)\n', (1020, 1046), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((1070, 1111), 'tenacity.before_sleep_log', 'before_sleep_log', (['logger', 'logging.WARNING'], {}), '(logger, logging.WARNING)\n', (1086, 1111), False, 'from tenacity import before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential\n'), ((3353, 3382), 'cohere.Client', 'cohere.Client', (['cohere_api_key'], {}), '(cohere_api_key)\n', (3366, 3382), False, 'import cohere\n'), ((5575, 5626), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', "params['stop_sequences']"], {}), "(text, params['stop_sequences'])\n", (5594, 5626), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class GooseAI(LLM): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text
[ "langchain.utils.get_from_dict_or_env" ]
[((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836, 1863), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1841, 1863), False, 'from pydantic import Extra, Field, root_validator\n'), ((2085, 2109), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (2099, 2109), False, 'from pydantic import Extra, Field, root_validator\n'), ((2995, 3011), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3009, 3011), False, 'from pydantic import Extra, Field, root_validator\n'), ((3173, 3239), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""gooseai_api_key"""', '"""GOOSEAI_API_KEY"""'], {}), "(values, 'gooseai_api_key', 'GOOSEAI_API_KEY')\n", (3193, 3239), False, 'from langchain.utils import get_from_dict_or_env\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
"""Wrapper around Anyscale""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import Extra, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env class Anyscale(LLM): """Wrapper around Anyscale Services. To use, you should have the environment variable ``ANYSCALE_SERVICE_URL``, ``ANYSCALE_SERVICE_ROUTE`` and ``ANYSCALE_SERVICE_TOKEN`` set with your Anyscale Service, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Anyscale anyscale = Anyscale(anyscale_service_url="SERVICE_URL", anyscale_service_route="SERVICE_ROUTE", anyscale_service_token="SERVICE_TOKEN") # Use Ray for distributed processing import ray prompt_list=[] @ray.remote def send_query(llm, prompt): resp = llm(prompt) return resp futures = [send_query.remote(anyscale, prompt) for prompt in prompt_list] results = ray.get(futures) """ model_kwargs: Optional[dict] = None """Key word arguments to pass to the model. Reserved for future use""" anyscale_service_url: Optional[str] = None anyscale_service_route: Optional[str] = None anyscale_service_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anyscale_service_url = get_from_dict_or_env( values, "anyscale_service_url", "ANYSCALE_SERVICE_URL" ) anyscale_service_route = get_from_dict_or_env( values, "anyscale_service_route", "ANYSCALE_SERVICE_ROUTE" ) anyscale_service_token = get_from_dict_or_env( values, "anyscale_service_token", "ANYSCALE_SERVICE_TOKEN" ) try: anyscale_service_endpoint = f"{anyscale_service_url}/-/route" headers = {"Authorization": f"Bearer {anyscale_service_token}"} requests.get(anyscale_service_endpoint, headers=headers) except requests.exceptions.RequestException as e: raise ValueError(e) values["anyscale_service_url"] = anyscale_service_url values["anyscale_service_route"] = anyscale_service_route values["anyscale_service_token"] = anyscale_service_token return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { "anyscale_service_url": self.anyscale_service_url, "anyscale_service_route": self.anyscale_service_route, } @property def _llm_type(self) -> str: """Return type of llm.""" return "anyscale" def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call out to Anyscale Service endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = anyscale("Tell me a joke.") """ anyscale_service_endpoint = ( f"{self.anyscale_service_url}/{self.anyscale_service_route}" ) headers = {"Authorization": f"Bearer {self.anyscale_service_token}"} body = {"prompt": prompt} resp = requests.post(anyscale_service_endpoint, headers=headers, json=body) if resp.status_code != 200: raise ValueError( f"Error returned by service, status code {resp.status_code}" ) text = resp.text if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text
[ "langchain.llms.utils.enforce_stop_tokens", "langchain.utils.get_from_dict_or_env" ]
[((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'anyscale_service_url', 'ANYSCALE_SERVICE_URL')\n", (1882, 1938), False, 'from langchain.utils import get_from_dict_or_env\n'), ((1994, 2079), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_route"""', '"""ANYSCALE_SERVICE_ROUTE"""'], {}), "(values, 'anyscale_service_route', 'ANYSCALE_SERVICE_ROUTE'\n )\n", (2014, 2079), False, 'from langchain.utils import get_from_dict_or_env\n'), ((2130, 2215), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_token"""', '"""ANYSCALE_SERVICE_TOKEN"""'], {}), "(values, 'anyscale_service_token', 'ANYSCALE_SERVICE_TOKEN'\n )\n", (2150, 2215), False, 'from langchain.utils import get_from_dict_or_env\n'), ((3943, 4011), 'requests.post', 'requests.post', (['anyscale_service_endpoint'], {'headers': 'headers', 'json': 'body'}), '(anyscale_service_endpoint, headers=headers, json=body)\n', (3956, 4011), False, 'import requests\n'), ((2408, 2464), 'requests.get', 'requests.get', (['anyscale_service_endpoint'], {'headers': 'headers'}), '(anyscale_service_endpoint, headers=headers)\n', (2420, 2464), False, 'import requests\n'), ((4390, 4421), 'langchain.llms.utils.enforce_stop_tokens', 'enforce_stop_tokens', (['text', 'stop'], {}), '(text, stop)\n', (4409, 4421), False, 'from langchain.llms.utils import enforce_stop_tokens\n')]
from langchain.prompts import PromptTemplate _symptom_extract_template = """Consider the following conversation patient note: Patient note: {note} Choose on of the symptoms to be the chief complaint (it is usually the first symptom mentioned). Provide your response strictly in the following format, replacing only the name_of_chief_complaint (keeping : yes), and refrain from including any additional text: <symptom> name_of_chief_complaint </symptom> """ _symptom_match_template = """Given the symptom: {symptom} which of the following retrievals is the best match? Retrievals: {retrievals} Select only one and write it below in the following format: <match> choice </match> Remember, do not include any other text, ensure your choice is in the provided retrievals, and follow the output format. """ CC_EXTRACT_PROMPT = PromptTemplate.from_template(_symptom_extract_template) CC_MATCH_PROMPT = PromptTemplate.from_template(_symptom_match_template)
[ "langchain.prompts.PromptTemplate.from_template" ]
[((830, 885), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_extract_template'], {}), '(_symptom_extract_template)\n', (858, 885), False, 'from langchain.prompts import PromptTemplate\n'), ((904, 957), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_match_template'], {}), '(_symptom_match_template)\n', (932, 957), False, 'from langchain.prompts import PromptTemplate\n')]