date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~javelin_ai_gateway.py | from __future__ import annotations
from typing import Any, Iterator, List, Optional
from langchain.pydantic_v1 import BaseModel
from langchain.schema.embeddings import Embeddings
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class JavelinAIGatewayEmbeddings(Embeddings, BaseModel):
"""
Wrapper around embeddings LLMs in the Javelin AI Gateway.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain.embeddings import JavelinAIGatewayEmbeddings
embeddings = JavelinAIGatewayEmbeddings(
gateway_uri="<javelin-ai-gateway-uri>",
route="<your-javelin-gateway-embeddings-route>"
)
"""
client: Any
"""javelin client."""
route: str
"""The route to use for the Javelin AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the Javelin AI Gateway API."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
def _query(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = self.client.query_route(self.route, query_body={"input": txt})
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get("llm_response", {}).get("data", [])
for item in embeddings_chunk:
if "embedding" in item:
embeddings.append(item["embedding"])
except ValueError as e:
print("Failed to query route: " + str(e))
return embeddings
async def _aquery(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = await self.client.aquery_route(
self.route, query_body={"input": txt}
)
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get("llm_response", {}).get("data", [])
for item in embeddings_chunk:
if "embedding" in item:
embeddings.append(item["embedding"])
except ValueError as e:
print("Failed to query route: " + str(e))
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._query(texts)
def embed_query(self, text: str) -> List[float]:
return self._query([text])[0]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return await self._aquery(texts)
async def aembed_query(self, text: str) -> List[float]:
result = await self._aquery([text])
return result[0]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~document_loaders~test_news.py | import random
import pytest
import requests
from langchain.document_loaders import NewsURLLoader
def get_random_news_url() -> str:
from bs4 import BeautifulSoup
response = requests.get("https://news.google.com")
soup = BeautifulSoup(response.text, "html.parser")
article_links = [
a["href"] for a in soup.find_all("a", href=True) if "/articles/" in a["href"]
]
random_article_link = random.choice(article_links)
return "https://news.google.com" + random_article_link
def test_news_loader() -> None:
loader = NewsURLLoader([get_random_news_url()])
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
def test_news_loader_with_nlp() -> None:
loader = NewsURLLoader([get_random_news_url()], nlp=True)
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
assert "keywords" in metadata
assert "summary" in metadata
def test_continue_on_failure_true() -> None:
"""Test exception is not raised when continue_on_failure=True."""
loader = NewsURLLoader(["badurl.foobar"])
loader.load()
def test_continue_on_failure_false() -> None:
"""Test exception is raised when continue_on_failure=False."""
loader = NewsURLLoader(["badurl.foobar"], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~llm_checker~prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
input_variables=["question"], template=_CREATE_DRAFT_ANSWER_TEMPLATE
)
_LIST_ASSERTIONS_TEMPLATE = """Вот утверждение:
{statement}
Составь список предположений, которые ты сделал, формулируя вышеуказанное утверждение.\n\n"""
LIST_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["statement"], template=_LIST_ASSERTIONS_TEMPLATE
)
_CHECK_ASSERTIONS_TEMPLATE = """Вот список утверждений:
{assertions}
Для каждого утверждения определи, верно оно или нет. Если оно неверно, объясни почему.\n\n"""
CHECK_ASSERTIONS_PROMPT = PromptTemplate(
input_variables=["assertions"], template=_CHECK_ASSERTIONS_TEMPLATE
)
_REVISED_ANSWER_TEMPLATE = """{checked_assertions}
Question: Учитывая вышеуказанные утверждения и проверки, как бы ты ответил на вопрос '{question}'?
Ответ:"""
REVISED_ANSWER_PROMPT = PromptTemplate(
input_variables=["checked_assertions", "question"],
template=_REVISED_ANSWER_TEMPLATE,
)
| [
"{checked_assertions}\n\nQuestion: Учитывая вышеуказанные утверждения и проверки, как бы ты ответил на вопрос '{question}'?\n\nОтвет:",
"question",
"statement",
"Вот список утверждений:\n{assertions}\nДля каждого утверждения определи, верно оно или нет. Если оно неверно, объясни почему.\n\n",
"{question}\n\n",
"checked_assertions",
"assertions",
"Вот утверждение:\n{statement}\nСоставь список предположений, которые ты сделал, формулируя вышеуказанное утверждение.\n\n",
"{question}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~javelin_ai_gateway.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.pydantic_v1 import BaseModel, Extra
from langchain.schema import (
ChatGeneration,
ChatResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
logger = logging.getLogger(__name__)
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg]
class ChatParams(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""Parameters for the `Javelin AI Gateway` LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class ChatJavelinAIGateway(BaseChatModel):
"""`Javelin AI Gateway` chat models API.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain.chat_models import ChatJavelinAIGateway
chat = ChatJavelinAIGateway(
gateway_uri="<javelin-ai-gateway-uri>",
route="<javelin-ai-gateway-chat-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the Javelin AI Gateway API."""
params: Optional[ChatParams] = None
"""Parameters for the Javelin AI Gateway LLM."""
client: Any
"""javelin client."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"javelin_api_key": self.javelin_api_key,
"route": self.route,
**(self.params.dict() if self.params else {}),
}
return params
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = self.client.query_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = await self.client.aquery_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "javelin-ai-gateway-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatJavelinAIGateway._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
"Additional message arguments are unsupported by Javelin AI Gateway "
" and will be ignored: %s",
message.additional_kwargs,
)
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for candidate in response["llm_response"]["choices"]:
message = ChatJavelinAIGateway._convert_dict_to_message(
candidate["message"]
)
message_metadata = candidate.get("metadata", {})
gen = ChatGeneration(
message=message,
generation_info=dict(message_metadata),
)
generations.append(gen)
response_metadata = response.get("metadata", {})
return ChatResult(generations=generations, llm_output=response_metadata)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~agent_toolkits~vectorstore~toolkit.py | """Toolkit for interacting with a vector store."""
from typing import List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.llms.openai import OpenAI
from langchain.pydantic_v1 import BaseModel, Field
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.vectorstore import VectorStore
from langchain.tools import BaseTool
from langchain.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
description = VectorStoreQATool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: List[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name, vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~edenai.py | from typing import Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utilities.requests import Requests
from langchain.utils import get_from_dict_or_env
class EdenAiEmbeddings(BaseModel, Embeddings):
"""EdenAI embedding.
environment variable ``EDENAI_API_KEY`` set with your API key, or pass
it as a named parameter.
"""
edenai_api_key: Optional[str] = Field(None, description="EdenAI API Token")
provider: Optional[str] = "openai"
"""embedding provider to use (eg: openai,google etc.)"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Compute embeddings using EdenAi api."""
url = "https://api.edenai.run/v2/text/embeddings"
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.edenai_api_key}",
}
payload = {"texts": texts, "providers": self.provider}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
temp = response.json()
embeddings = []
for embed_item in temp[self.provider]["items"]:
embedding = embed_item["embedding"]
embeddings.append(embedding)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a query using EdenAI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._generate_embeddings([text])[0]
| [
"application/json"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~ai21.py | from typing import Any, Dict, List, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class AI21PenaltyData(BaseModel):
"""Parameters for AI21 penalty data."""
scale: int = 0
applyToWhitespaces: bool = True
applyToPunctuations: bool = True
applyToNumbers: bool = True
applyToStopwords: bool = True
applyToEmojis: bool = True
class AI21(LLM):
"""AI21 large language models.
To use, you should have the environment variable ``AI21_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain.llms import AI21
ai21 = AI21(model="j2-jumbo-instruct")
"""
model: str = "j2-jumbo-instruct"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
maxTokens: int = 256
"""The maximum number of tokens to generate in the completion."""
minTokens: int = 0
"""The minimum number of tokens to generate in the completion."""
topP: float = 1.0
"""Total probability mass of tokens to consider at each step."""
presencePenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens."""
countPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to count."""
frequencyPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to frequency."""
numResults: int = 1
"""How many completions to generate for each prompt."""
logitBias: Optional[Dict[str, float]] = None
"""Adjust the probability of specific tokens being generated."""
ai21_api_key: Optional[str] = None
stop: Optional[List[str]] = None
base_url: Optional[str] = None
"""Base url to use, if None decides based on model name."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY")
values["ai21_api_key"] = ai21_api_key
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling AI21 API."""
return {
"temperature": self.temperature,
"maxTokens": self.maxTokens,
"minTokens": self.minTokens,
"topP": self.topP,
"presencePenalty": self.presencePenalty.dict(),
"countPenalty": self.countPenalty.dict(),
"frequencyPenalty": self.frequencyPenalty.dict(),
"numResults": self.numResults,
"logitBias": self.logitBias,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ai21"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to AI21's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ai21("Tell me a joke.")
"""
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
if self.base_url is not None:
base_url = self.base_url
else:
if self.model in ("j1-grande-instruct",):
base_url = "https://api.ai21.com/studio/v1/experimental"
else:
base_url = "https://api.ai21.com/studio/v1"
params = {**self._default_params, **kwargs}
response = requests.post(
url=f"{base_url}/{self.model}/complete",
headers={"Authorization": f"Bearer {self.ai21_api_key}"},
json={"prompt": prompt, "stopSequences": stop, **params},
)
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"AI21 /complete call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
response_json = response.json()
return response_json["completions"][0]["data"]["text"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~koboldai.py | import logging
from typing import Any, Dict, List, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
def clean_url(url: str) -> str:
"""Remove trailing slash and /api from url if present."""
if url.endswith("/api"):
return url[:-4]
elif url.endswith("/"):
return url[:-1]
else:
return url
class KoboldApiLLM(LLM):
"""Kobold API language model.
It includes several fields that can be used to control the text generation process.
To use this class, instantiate it with the required parameters and call it with a
prompt to generate text. For example:
kobold = KoboldApiLLM(endpoint="http://localhost:5000")
result = kobold("Write a story about a dragon.")
This will send a POST request to the Kobold API with the provided prompt and
generate text.
"""
endpoint: str
"""The API endpoint to use for generating text."""
use_story: Optional[bool] = False
""" Whether or not to use the story from the KoboldAI GUI when generating text. """
use_authors_note: Optional[bool] = False
"""Whether to use the author's note from the KoboldAI GUI when generating text.
This has no effect unless use_story is also enabled.
"""
use_world_info: Optional[bool] = False
"""Whether to use the world info from the KoboldAI GUI when generating text."""
use_memory: Optional[bool] = False
"""Whether to use the memory from the KoboldAI GUI when generating text."""
max_context_length: Optional[int] = 1600
"""Maximum number of tokens to send to the model.
minimum: 1
"""
max_length: Optional[int] = 80
"""Number of tokens to generate.
maximum: 512
minimum: 1
"""
rep_pen: Optional[float] = 1.12
"""Base repetition penalty value.
minimum: 1
"""
rep_pen_range: Optional[int] = 1024
"""Repetition penalty range.
minimum: 0
"""
rep_pen_slope: Optional[float] = 0.9
"""Repetition penalty slope.
minimum: 0
"""
temperature: Optional[float] = 0.6
"""Temperature value.
exclusiveMinimum: 0
"""
tfs: Optional[float] = 0.9
"""Tail free sampling value.
maximum: 1
minimum: 0
"""
top_a: Optional[float] = 0.9
"""Top-a sampling value.
minimum: 0
"""
top_p: Optional[float] = 0.95
"""Top-p sampling value.
maximum: 1
minimum: 0
"""
top_k: Optional[int] = 0
"""Top-k sampling value.
minimum: 0
"""
typical: Optional[float] = 0.5
"""Typical sampling value.
maximum: 1
minimum: 0
"""
@property
def _llm_type(self) -> str:
return "koboldai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the API and return the output.
Args:
prompt: The prompt to use for generation.
stop: A list of strings to stop generation when encountered.
Returns:
The generated text.
Example:
.. code-block:: python
from langchain.llms import KoboldApiLLM
llm = KoboldApiLLM(endpoint="http://localhost:5000")
llm("Write a story about dragons.")
"""
data: Dict[str, Any] = {
"prompt": prompt,
"use_story": self.use_story,
"use_authors_note": self.use_authors_note,
"use_world_info": self.use_world_info,
"use_memory": self.use_memory,
"max_context_length": self.max_context_length,
"max_length": self.max_length,
"rep_pen": self.rep_pen,
"rep_pen_range": self.rep_pen_range,
"rep_pen_slope": self.rep_pen_slope,
"temperature": self.temperature,
"tfs": self.tfs,
"top_a": self.top_a,
"top_p": self.top_p,
"top_k": self.top_k,
"typical": self.typical,
}
if stop is not None:
data["stop_sequence"] = stop
response = requests.post(
f"{clean_url(self.endpoint)}/api/v1/generate", json=data
)
response.raise_for_status()
json_response = response.json()
if (
"results" in json_response
and len(json_response["results"]) > 0
and "text" in json_response["results"][0]
):
text = json_response["results"][0]["text"].strip()
if stop is not None:
for sequence in stop:
if text.endswith(sequence):
text = text[: -len(sequence)].rstrip()
return text
else:
raise ValueError(
f"Unexpected response format from Kobold API: {json_response}"
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_loaders~telegram.py | import json
import logging
import os
import tempfile
import zipfile
from pathlib import Path
from typing import Iterator, List, Union
from langchain.chat_loaders.base import BaseChatLoader
from langchain.schema import AIMessage, BaseMessage, HumanMessage
from langchain.schema.chat import ChatSession
logger = logging.getLogger(__name__)
class TelegramChatLoader(BaseChatLoader):
"""Load `telegram` conversations to LangChain chat messages.
To export, use the Telegram Desktop app from
https://desktop.telegram.org/, select a conversation, click the three dots
in the top right corner, and select "Export chat history". Then select
"Machine-readable JSON" (preferred) to export. Note: the 'lite' versions of
the desktop app (like "Telegram for MacOS") do not support exporting chat
history.
"""
def __init__(
self,
path: Union[str, Path],
):
"""Initialize the TelegramChatLoader.
Args:
path (Union[str, Path]): Path to the exported Telegram chat zip,
directory, json, or HTML file.
"""
self.path = path if isinstance(path, str) else str(path)
def _load_single_chat_session_html(self, file_path: str) -> ChatSession:
"""Load a single chat session from an HTML file.
Args:
file_path (str): Path to the HTML file.
Returns:
ChatSession: The loaded chat session.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Please install the 'beautifulsoup4' package to load"
" Telegram HTML files. You can do this by running"
"'pip install beautifulsoup4' in your terminal."
)
with open(file_path, "r", encoding="utf-8") as file:
soup = BeautifulSoup(file, "html.parser")
results: List[Union[HumanMessage, AIMessage]] = []
previous_sender = None
for message in soup.select(".message.default"):
timestamp = message.select_one(".pull_right.date.details")["title"]
from_name_element = message.select_one(".from_name")
if from_name_element is None and previous_sender is None:
logger.debug("from_name not found in message")
continue
elif from_name_element is None:
from_name = previous_sender
else:
from_name = from_name_element.text.strip()
text = message.select_one(".text").text.strip()
results.append(
HumanMessage(
content=text,
additional_kwargs={
"sender": from_name,
"events": [{"message_time": timestamp}],
},
)
)
previous_sender = from_name
return ChatSession(messages=results)
def _load_single_chat_session_json(self, file_path: str) -> ChatSession:
"""Load a single chat session from a JSON file.
Args:
file_path (str): Path to the JSON file.
Returns:
ChatSession: The loaded chat session.
"""
with open(file_path, "r", encoding="utf-8") as file:
data = json.load(file)
messages = data.get("messages", [])
results: List[BaseMessage] = []
for message in messages:
text = message.get("text", "")
timestamp = message.get("date", "")
from_name = message.get("from", "")
results.append(
HumanMessage(
content=text,
additional_kwargs={
"sender": from_name,
"events": [{"message_time": timestamp}],
},
)
)
return ChatSession(messages=results)
def _iterate_files(self, path: str) -> Iterator[str]:
"""Iterate over files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: Path to each file.
"""
if os.path.isfile(path) and path.endswith((".html", ".json")):
yield path
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith((".html", ".json")):
yield os.path.join(root, file)
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist():
if file.endswith((".html", ".json")):
with tempfile.TemporaryDirectory() as temp_dir:
yield zip_file.extract(file, path=temp_dir)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy load the messages from the chat file and yield them
in as chat sessions.
Yields:
ChatSession: The loaded chat session.
"""
for file_path in self._iterate_files(self.path):
if file_path.endswith(".html"):
yield self._load_single_chat_session_html(file_path)
elif file_path.endswith(".json"):
yield self._load_single_chat_session_json(file_path)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~open_city_data.py | from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class OpenCityDataLoader(BaseLoader):
"""Load from `Open City`."""
def __init__(self, city_id: str, dataset_id: str, limit: int):
"""Initialize with dataset_id.
Example: https://dev.socrata.com/foundry/data.sfgov.org/vw6y-z8j6
e.g., city_id = data.sfgov.org
e.g., dataset_id = vw6y-z8j6
Args:
city_id: The Open City city identifier.
dataset_id: The Open City dataset identifier.
limit: The maximum number of documents to load.
"""
self.city_id = city_id
self.dataset_id = dataset_id
self.limit = limit
def lazy_load(self) -> Iterator[Document]:
"""Lazy load records."""
from sodapy import Socrata
client = Socrata(self.city_id, None)
results = client.get(self.dataset_id, limit=self.limit)
for record in results:
yield Document(
page_content=str(record),
metadata={
"source": self.city_id + "_" + self.dataset_id,
},
)
def load(self) -> List[Document]:
"""Load records."""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~dingo.py | from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class Dingo(VectorStore):
"""`Dingo` vector store.
To use, you should have the ``dingodb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Dingo
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
dingo = Dingo(embeddings, "text")
"""
def __init__(
self,
embedding: Embeddings,
text_key: str,
*,
client: Any = None,
index_name: Optional[str] = None,
dimension: int = 1024,
host: Optional[List[str]] = None,
user: str = "root",
password: str = "123123",
self_id: bool = False,
):
"""Initialize with Dingo client."""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb."
)
host = host if host is not None else ["172.20.31.10:13000"]
# collection
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
self._text_key = text_key
self._client = dingo_client
if index_name is not None and index_name not in dingo_client.get_index():
if self_id is True:
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
dingo_client.create_index(index_name, dimension=dimension)
self._index_name = index_name
self._embedding = embedding
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
batch_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = self._client.vector_add(
self._index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return ids
def similarity_search(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, search_params=search_params
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs = []
query_obj = self._embedding.embed_query(query)
results = self._client.vector_search(
self._index_name, xq=query_obj, top_k=k, search_params=search_params
)
if not results:
return []
for res in results[0]["vectorWithDistances"]:
metadatas = res["scalarData"]
id = res["id"]
score = res["distance"]
text = metadatas[self._text_key]["fields"][0]["data"]
metadata = {"id": id, "text": text, "score": score}
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self._client.vector_search(
self._index_name, [embedding], search_params=search_params, top_k=k
)
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
[
item["vector"]["floatValues"]
for item in results[0]["vectorWithDistances"]
],
k=k,
lambda_mult=lambda_mult,
)
selected = []
for i in mmr_selected:
meta_data = {}
for k, v in results[0]["vectorWithDistances"][i]["scalarData"].items():
meta_data.update({str(k): v["fields"][0]["data"]})
selected.append(meta_data)
return [
Document(page_content=metadata.pop(self._text_key), metadata=metadata)
for metadata in selected
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, search_params
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
index_name: Optional[str] = None,
dimension: int = 1024,
client: Any = None,
host: List[str] = ["172.20.31.10:13000"],
user: str = "root",
password: str = "123123",
batch_size: int = 500,
**kwargs: Any,
) -> Dingo:
"""Construct Dingo wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Dingo index
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Dingo
from langchain.embeddings import OpenAIEmbeddings
import dingodb
sss
embeddings = OpenAIEmbeddings()
dingo = Dingo.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb`."
)
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
if kwargs is not None and kwargs.get("self_id") is True:
if index_name not in dingo_client.get_index():
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
if index_name not in dingo_client.get_index():
dingo_client.create_index(index_name, dimension=dimension)
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = dingo_client.vector_add(
index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return cls(embedding, text_key, client=dingo_client, index_name=index_name)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
"""Delete by vector IDs or filter.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
return self._client.vector_delete(self._index_name, ids=ids)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~cube_semantic.py | import json
import logging
import time
from typing import List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class CubeSemanticLoader(BaseLoader):
"""Load `Cube semantic layer` metadata.
Args:
cube_api_url: REST API endpoint.
Use the REST API of your Cube's deployment.
Please find out more information here:
https://cube.dev/docs/http-api/rest#configuration-base-path
cube_api_token: Cube API token.
Authentication tokens are generated based on your Cube's API secret.
Please find out more information here:
https://cube.dev/docs/security#generating-json-web-tokens-jwt
load_dimension_values: Whether to load dimension values for every string
dimension or not.
dimension_values_limit: Maximum number of dimension values to load.
dimension_values_max_retries: Maximum number of retries to load dimension
values.
dimension_values_retry_delay: Delay between retries to load dimension values.
"""
def __init__(
self,
cube_api_url: str,
cube_api_token: str,
load_dimension_values: bool = True,
dimension_values_limit: int = 10_000,
dimension_values_max_retries: int = 10,
dimension_values_retry_delay: int = 3,
):
self.cube_api_url = cube_api_url
self.cube_api_token = cube_api_token
self.load_dimension_values = load_dimension_values
self.dimension_values_limit = dimension_values_limit
self.dimension_values_max_retries = dimension_values_max_retries
self.dimension_values_retry_delay = dimension_values_retry_delay
def _get_dimension_values(self, dimension_name: str) -> List[str]:
"""Makes a call to Cube's REST API load endpoint to retrieve
values for dimensions.
These values can be used to achieve a more accurate filtering.
"""
logger.info("Loading dimension values for: {dimension_name}...")
headers = {
"Content-Type": "application/json",
"Authorization": self.cube_api_token,
}
query = {
"query": {
"dimensions": [dimension_name],
"limit": self.dimension_values_limit,
}
}
retries = 0
while retries < self.dimension_values_max_retries:
response = requests.request(
"POST",
f"{self.cube_api_url}/load",
headers=headers,
data=json.dumps(query),
)
if response.status_code == 200:
response_data = response.json()
if (
"error" in response_data
and response_data["error"] == "Continue wait"
):
logger.info("Retrying...")
retries += 1
time.sleep(self.dimension_values_retry_delay)
continue
else:
dimension_values = [
item[dimension_name] for item in response_data["data"]
]
return dimension_values
else:
logger.error("Request failed with status code:", response.status_code)
break
if retries == self.dimension_values_max_retries:
logger.info("Maximum retries reached.")
return []
def load(self) -> List[Document]:
"""Makes a call to Cube's REST API metadata endpoint.
Returns:
A list of documents with attributes:
- page_content=column_title + column_description
- metadata
- table_name
- column_name
- column_data_type
- column_member_type
- column_title
- column_description
- column_values
- cube_data_obj_type
"""
headers = {
"Content-Type": "application/json",
"Authorization": self.cube_api_token,
}
logger.info(f"Loading metadata from {self.cube_api_url}...")
response = requests.get(f"{self.cube_api_url}/meta", headers=headers)
response.raise_for_status()
raw_meta_json = response.json()
cube_data_objects = raw_meta_json.get("cubes", [])
logger.info(f"Found {len(cube_data_objects)} cube data objects in metadata.")
if not cube_data_objects:
raise ValueError("No cubes found in metadata.")
docs = []
for cube_data_obj in cube_data_objects:
cube_data_obj_name = cube_data_obj.get("name")
cube_data_obj_type = cube_data_obj.get("type")
cube_data_obj_is_public = cube_data_obj.get("public")
measures = cube_data_obj.get("measures", [])
dimensions = cube_data_obj.get("dimensions", [])
logger.info(f"Processing {cube_data_obj_name}...")
if not cube_data_obj_is_public:
logger.info(f"Skipping {cube_data_obj_name} because it is not public.")
continue
for item in measures + dimensions:
column_member_type = "measure" if item in measures else "dimension"
dimension_values = []
item_name = str(item.get("name"))
item_type = str(item.get("type"))
if (
self.load_dimension_values
and column_member_type == "dimension"
and item_type == "string"
):
dimension_values = self._get_dimension_values(item_name)
metadata = dict(
table_name=str(cube_data_obj_name),
column_name=item_name,
column_data_type=item_type,
column_title=str(item.get("title")),
column_description=str(item.get("description")),
column_member_type=column_member_type,
column_values=dimension_values,
cube_data_obj_type=cube_data_obj_type,
)
page_content = f"{str(item.get('title'))}, "
page_content += f"{str(item.get('description'))}"
docs.append(Document(page_content=page_content, metadata=metadata))
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~chaindesk.py | from typing import Any, List, Optional
import aiohttp
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.schema import BaseRetriever, Document
class ChaindeskRetriever(BaseRetriever):
"""`Chaindesk API` retriever."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def __init__(
self,
datastore_url: str,
top_k: Optional[int] = None,
api_key: Optional[str] = None,
):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~amadeus~closest_airport.py | from typing import Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.amadeus.base import AmadeusBaseTool
class ClosestAirportSchema(BaseModel):
"""Schema for the AmadeusClosestAirport tool."""
location: str = Field(
description=(
" The location for which you would like to find the nearest airport "
" along with optional details such as country, state, region, or "
" province, allowing for easy processing and identification of "
" the closest airport. Examples of the format are the following:\n"
" Cali, Colombia\n "
" Lincoln, Nebraska, United States\n"
" New York, United States\n"
" Sydney, New South Wales, Australia\n"
" Rome, Lazio, Italy\n"
" Toronto, Ontario, Canada\n"
)
)
class AmadeusClosestAirport(AmadeusBaseTool):
"""Tool for finding the closest airport to a particular location."""
name: str = "closest_airport"
description: str = (
"Use this tool to find the closest airport to a particular location."
)
args_schema: Type[ClosestAirportSchema] = ClosestAirportSchema
def _run(
self,
location: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
template = (
" Какой аэропорт ближе всего к {location}? Ответь, пожалуйста, с "
" кодом аэропорта по международной ассоциации воздушного транспорта (IATA) "
' в следующем формате JSON. JSON: "iataCode": "Код местоположения IATA" '
)
llm = ChatOpenAI(temperature=0)
llm_chain = LLMChain.from_string(llm=llm, template=template)
output = llm_chain.run(location=location)
return output
| [
" Какой аэропорт ближе всего к {location}? Ответь, пожалуйста, с кодом аэропорта по международной ассоциации воздушного транспорта (IATA) в следующем формате JSON. JSON: \"iataCode\": \"Код местоположения IATA\" "
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~apify.py | from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.document import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from langchain.document_loaders import ApifyDatasetLoader
class ApifyWrapper(BaseModel):
"""Wrapper around Apify.
To use, you should have the ``apify-client`` python package installed,
and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass
`apify_api_token` as a named parameter to the constructor.
"""
apify_client: Any
apify_client_async: Any
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(
values, "apify_api_token", "APIFY_API_TOKEN"
)
try:
from apify_client import ApifyClient, ApifyClientAsync
values["apify_client"] = ApifyClient(apify_api_token)
values["apify_client_async"] = ApifyClientAsync(apify_api_token)
except ImportError:
raise ImportError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
def call_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
actor_call = self.apify_client.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
actor_call = await self.apify_client_async.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
def call_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
task_call = self.apify_client.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
task_call = await self.apify_client_async.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~prompts~pipeline.py | from typing import Any, Dict, List, Tuple
from langchain.prompts.chat import BaseChatPromptTemplate
from langchain.pydantic_v1 import root_validator
from langchain.schema import BasePromptTemplate, PromptValue
def _get_inputs(inputs: dict, input_variables: List[str]) -> dict:
return {k: inputs[k] for k in input_variables}
class PipelinePromptTemplate(BasePromptTemplate):
"""A prompt template for composing multiple prompt templates together.
This can be useful when you want to reuse parts of prompts.
A PipelinePrompt consists of two main parts:
- final_prompt: This is the final prompt that is returned
- pipeline_prompts: This is a list of tuples, consisting
of a string (`name`) and a Prompt Template.
Each PromptTemplate will be formatted and then passed
to future prompt templates as a variable with
the same name as `name`
"""
final_prompt: BasePromptTemplate
"""The final prompt that is returned."""
pipeline_prompts: List[Tuple[str, BasePromptTemplate]]
"""A list of tuples, consisting of a string (`name`) and a Prompt Template."""
@root_validator(pre=True)
def get_input_variables(cls, values: Dict) -> Dict:
"""Get input variables."""
created_variables = set()
all_variables = set()
for k, prompt in values["pipeline_prompts"]:
created_variables.add(k)
all_variables.update(prompt.input_variables)
values["input_variables"] = list(all_variables.difference(created_variables))
return values
def format_prompt(self, **kwargs: Any) -> PromptValue:
for k, prompt in self.pipeline_prompts:
_inputs = _get_inputs(kwargs, prompt.input_variables)
if isinstance(prompt, BaseChatPromptTemplate):
kwargs[k] = prompt.format_messages(**_inputs)
else:
kwargs[k] = prompt.format(**_inputs)
_inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
return self.final_prompt.format_prompt(**_inputs)
def format(self, **kwargs: Any) -> str:
return self.format_prompt(**kwargs).to_string()
@property
def _prompt_type(self) -> str:
raise ValueError
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~prompts~example_selector~ngram_overlap.py | """Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from typing import Dict, List
import numpy as np
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
from langchain.pydantic_v1 import BaseModel, root_validator
def ngram_overlap_score(source: List[str], example: List[str]) -> float:
"""Compute ngram overlap score of source and example as sentence_bleu score.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import (
SmoothingFunction, # type: ignore
sentence_bleu,
)
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(
sentence_bleu(
references,
hypotheses,
smoothing_function=SmoothingFunction().method1,
auto_reweigh=True,
)
)
class NGramOverlapExampleSelector(BaseExampleSelector, BaseModel):
"""Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
threshold: float = -1.0
"""Threshold at which algorithm stops. Set to -1.0 by default.
For negative threshold:
select_examples sorts examples by ngram_overlap_score, but excludes none.
For threshold greater than 1.0:
select_examples excludes all examples, and returns an empty list.
For threshold equal to 0.0:
select_examples sorts examples by ngram_overlap_score,
and excludes examples with no ngram overlap with input.
"""
@root_validator(pre=True)
def check_dependencies(cls, values: Dict) -> Dict:
"""Check that valid dependencies exist."""
try:
from nltk.translate.bleu_score import ( # noqa: F401
SmoothingFunction,
sentence_bleu,
)
except ImportError as e:
raise ImportError(
"Not all the correct dependencies for this ExampleSelect exist."
"Please install nltk with `pip install nltk`."
) from e
return values
def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Return list of examples sorted by ngram_overlap_score with input.
Descending order.
Excludes any examples with ngram_overlap_score less than or equal to threshold.
"""
inputs = list(input_variables.values())
examples = []
k = len(self.examples)
score = [0.0] * k
first_prompt_template_key = self.example_prompt.input_variables[0]
for i in range(k):
score[i] = ngram_overlap_score(
inputs, [self.examples[i][first_prompt_template_key]]
)
while True:
arg_max = np.argmax(score)
if (score[arg_max] < self.threshold) or abs(
score[arg_max] - self.threshold
) < 1e-9:
break
examples.append(self.examples[arg_max])
score[arg_max] = self.threshold - 1.0
return examples
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~question_answering~stuff_prompt.py | # flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain.prompts import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
prompt_template = """Используй следующие части контекста, чтобы ответить на вопрос в конце. Если ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.
{context}
Question: {question}
Полезный ответ:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
system_template = """Используй следующие части контекста, чтобы ответить на вопрос пользователя.
Если ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.
----------------
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
| [
"question",
"Используй следующие части контекста, чтобы ответить на вопрос пользователя. \nЕсли ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.\n----------------\n{context}",
"context",
"Используй следующие части контекста, чтобы ответить на вопрос в конце. Если ты не знаешь ответа, просто скажи, что не знаешь, не пытайся придумать ответ.\n\n{context}\n\nQuestion: {question}\nПолезный ответ:",
"{question}"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~callbacks~test_wandb_tracer.py | """Integration tests for the langchain tracer module."""
import asyncio
import os
import pytest
from aiohttp import ClientSession
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.callbacks.manager import wandb_tracing_enabled
from langchain.llms import OpenAI
questions = [
(
"Who won the US Open men's final in 2019? "
"What is his age raised to the 0.334 power?"
),
(
"Who is Olivia Wilde's boyfriend? "
"What is his current age raised to the 0.23 power?"
),
(
"Who won the most recent formula 1 grand prix? "
"What is their age raised to the 0.23 power?"
),
(
"Who won the US Open women's final in 2019? "
"What is her age raised to the 0.34 power?"
),
("Who is Beyonce's husband? " "What is his age raised to the 0.19 power?"),
]
def test_tracing_sequential() -> None:
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
os.environ["WANDB_PROJECT"] = "langchain-tracing"
for q in questions[:3]:
llm = OpenAI(temperature=0)
tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(q)
def test_tracing_session_env_var() -> None:
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
llm = OpenAI(temperature=0)
tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent.run(questions[0])
@pytest.mark.asyncio
async def test_tracing_concurrent() -> None:
os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
aiosession = ClientSession()
llm = OpenAI(temperature=0)
async_tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
aiosession=aiosession,
)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
tasks = [agent.arun(q) for q in questions[:3]]
await asyncio.gather(*tasks)
await aiosession.close()
def test_tracing_context_manager() -> None:
llm = OpenAI(temperature=0)
tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
if "LANGCHAIN_WANDB_TRACING" in os.environ:
del os.environ["LANGCHAIN_WANDB_TRACING"]
with wandb_tracing_enabled():
agent.run(questions[0]) # this should be traced
agent.run(questions[0]) # this should not be traced
@pytest.mark.asyncio
async def test_tracing_context_manager_async() -> None:
llm = OpenAI(temperature=0)
async_tools = load_tools(
["llm-math", "serpapi"],
llm=llm,
)
agent = initialize_agent(
async_tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
if "LANGCHAIN_WANDB_TRACING" in os.environ:
del os.environ["LANGCHAIN_TRACING"]
# start a background task
task = asyncio.create_task(agent.arun(questions[0])) # this should not be traced
with wandb_tracing_enabled():
tasks = [agent.arun(q) for q in questions[1:4]] # these should be traced
await asyncio.gather(*tasks)
await task
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~evaluation~agents~trajectory_eval_chain.py | """A chain for evaluating ReAct style agents.
This chain is used to evaluate ReAct style agents by reasoning about
the sequence of actions taken and their outcomes. It uses a language model
chain (LLMChain) to generate the reasoning and scores.
"""
import re
from typing import (
Any,
Dict,
List,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
cast,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.evaluation.agents.trajectory_eval_prompt import (
EVAL_CHAT_PROMPT,
TOOL_FREE_EVAL_CHAT_PROMPT,
)
from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain
from langchain.pydantic_v1 import Extra, Field
from langchain.schema import AgentAction, BaseOutputParser, OutputParserException
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.base import BaseTool
class TrajectoryEval(TypedDict):
"""A named tuple containing the score and reasoning for a trajectory."""
score: float
"""The score for the trajectory, normalized from 0 to 1."""
reasoning: str
"""The reasoning for the score."""
class TrajectoryOutputParser(BaseOutputParser):
"""Trajectory output parser."""
@property
def _type(self) -> str:
return "agent_trajectory"
def parse(self, text: str) -> TrajectoryEval:
"""Parse the output text and extract the score and reasoning.
Args:
text (str): The output text to parse.
Returns:
TrajectoryEval: A named tuple containing the normalized score and reasoning.
Raises:
OutputParserException: If the score is not found in the output text or
if the LLM's score is not a digit in the range 1-5.
"""
if "Score:" not in text:
raise OutputParserException(
f"Could not find score in model eval output: {text}"
)
reasoning, score_str = text.split("Score: ", maxsplit=1)
reasoning, score_str = reasoning.strip(), score_str.strip()
# Use regex to extract the score.
# This will get the number in the string, even if it is a float or more than 10.
# E.g. "Score: 1" will return 1, "Score: 3.5" will return 3.5, and
# "Score: 10" will return 10.
# The score should be an integer digit in the range 1-5.
_score = re.search(r"(\d+(\.\d+)?)", score_str)
# If the score is not found or is a float, raise an exception.
if _score is None or "." in _score.group(1):
raise OutputParserException(
f"Score is not an integer digit in the range 1-5: {text}"
)
score = int(_score.group(1))
# If the score is not in the range 1-5, raise an exception.
if not 1 <= score <= 5:
raise OutputParserException(
f"Score is not a digit in the range 1-5: {text}"
)
normalized_score = (score - 1) / 4
return TrajectoryEval(score=normalized_score, reasoning=reasoning)
class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
"""A chain for evaluating ReAct style agents.
This chain is used to evaluate ReAct style agents by reasoning about
the sequence of actions taken and their outcomes.
Example:
.. code-block:: python
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.evaluation import TrajectoryEvalChain
from langchain.tools import tool
@tool
def geography_answers(country: str, question: str) -> str:
\"\"\"Very helpful answers to geography questions.\"\"\"
return f"{country}? IDK - We may never know {question}."
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
agent = initialize_agent(
tools=[geography_answers],
llm=llm,
agent=AgentType.OPENAI_FUNCTIONS,
return_intermediate_steps=True,
)
question = "How many dwell in the largest minor region in Argentina?"
response = agent(question)
eval_chain = TrajectoryEvalChain.from_llm(
llm=llm, agent_tools=[geography_answers], return_reasoning=True
)
result = eval_chain.evaluate_agent_trajectory(
input=question,
agent_trajectory=response["intermediate_steps"],
prediction=response["output"],
reference="Paris",
)
print(result["score"])
# 0
""" # noqa: E501
agent_tools: Optional[List[BaseTool]] = None
"""A list of tools available to the agent."""
eval_chain: LLMChain
"""The language model chain used for evaluation."""
output_parser: TrajectoryOutputParser = Field(
default_factory=TrajectoryOutputParser
)
"""The output parser used to parse the output."""
return_reasoning: bool = False # :meta private:
"""DEPRECATED. Reasoning always returned."""
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
"""Whether this evaluator requires a reference label."""
return False
@property
def _tools_description(self) -> str:
"""Get the description of the agent tools.
Returns:
str: The description of the agent tools.
"""
if self.agent_tools is None:
return ""
return "\n\n".join(
[
f"""Tool {i}: {tool.name}
Description: {tool.description}"""
for i, tool in enumerate(self.agent_tools, 1)
]
)
@staticmethod
def get_agent_trajectory(
steps: Union[str, Sequence[Tuple[AgentAction, str]]]
) -> str:
"""Get the agent trajectory as a formatted string.
Args:
steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory.
Returns:
str: The formatted agent trajectory.
"""
if isinstance(steps, str):
return steps
return "\n\n".join(
[
f"""Step {i}:
Tool used: {action.tool}
Tool input: {action.tool_input}
Tool output: {output}"""
for i, (action, output) in enumerate(steps, 1)
]
)
@staticmethod
def _format_reference(reference: Optional[str]) -> str:
"""Format the reference text.
Args:
reference (str): The reference text.
Returns:
str: The formatted reference text.
"""
if not reference:
return ""
return f"""
The following is the expected answer. Use this to measure correctness:
[GROUND_TRUTH]
{reference}
[END_GROUND_TRUTH]
"""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
agent_tools: Optional[Sequence[BaseTool]] = None,
output_parser: Optional[TrajectoryOutputParser] = None,
**kwargs: Any,
) -> "TrajectoryEvalChain":
"""Create a TrajectoryEvalChain object from a language model chain.
Args:
llm (BaseChatModel): The language model chain.
agent_tools (Optional[Sequence[BaseTool]]): A list of tools
available to the agent.
output_parser (Optional[TrajectoryOutputParser]): The output parser
used to parse the chain output into a score.
Returns:
TrajectoryEvalChain: The TrajectoryEvalChain object.
"""
if not isinstance(llm, BaseChatModel):
raise NotImplementedError(
"Only chat models supported by the current trajectory eval"
)
if agent_tools:
prompt = EVAL_CHAT_PROMPT
else:
prompt = TOOL_FREE_EVAL_CHAT_PROMPT
eval_chain = LLMChain(llm=llm, prompt=prompt)
return cls(
agent_tools=agent_tools,
eval_chain=eval_chain,
output_parser=output_parser or TrajectoryOutputParser(),
**kwargs,
)
@property
def input_keys(self) -> List[str]:
"""Get the input keys for the chain.
Returns:
List[str]: The input keys.
"""
return ["question", "agent_trajectory", "answer", "reference"]
@property
def output_keys(self) -> List[str]:
"""Get the output keys for the chain.
Returns:
List[str]: The output keys.
"""
return ["score", "reasoning"]
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if "reference" not in inputs:
inputs["reference"] = self._format_reference(inputs.get("reference"))
return super().prep_inputs(inputs)
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input["tool_descriptions"] = self._tools_description
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
raw_output = self.eval_chain.run(
chain_input, callbacks=_run_manager.get_child()
)
return cast(dict, self.output_parser.parse(raw_output))
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input["tool_descriptions"] = self._tools_description
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
raw_output = await self.eval_chain.arun(
chain_input, callbacks=_run_manager.get_child()
)
return cast(dict, self.output_parser.parse(raw_output))
def _evaluate_agent_trajectory(
self,
*,
prediction: str,
input: str,
agent_trajectory: Sequence[Tuple[AgentAction, str]],
reference: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate a trajectory.
Args:
prediction (str): The final predicted response.
input (str): The input to the agent.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
reference (Optional[str]): The reference answer.
callbacks (Callbacks): Callbacks to use for this chain run.
Returns:
dict: The evaluation result, which includes the score and optionally
the reasoning for reaching that.
"""
inputs = {
"question": input,
"agent_trajectory": self.get_agent_trajectory(agent_trajectory),
"answer": prediction,
"reference": reference,
}
return self.__call__(
inputs=inputs,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
return_only_outputs=True,
)
async def _aevaluate_agent_trajectory(
self,
*,
prediction: str,
input: str,
agent_trajectory: Sequence[Tuple[AgentAction, str]],
reference: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate a trajectory.
Args:
prediction (str): The final predicted response.
input (str): The input to the agent.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
reference (Optional[str]): The reference answer.
callbacks (Callbacks): Callbacks to use for this chain run.
Returns:
dict: The evaluation result, which includes the score and optionally
the reasoning for reaching that.
"""
inputs = {
"question": input,
"agent_trajectory": self.get_agent_trajectory(agent_trajectory),
"answer": prediction,
"reference": reference,
}
return await self.acall(
inputs=inputs,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
return_only_outputs=True,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~hunyuan.py | import base64
import hashlib
import hmac
import json
import logging
import time
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type, Union
from urllib.parse import urlparse
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
from langchain.pydantic_v1 import Field, SecretStr, root_validator
from langchain.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
)
from langchain.schema.messages import (
AIMessageChunk,
BaseMessageChunk,
ChatMessageChunk,
HumanMessageChunk,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env, get_pydantic_field_names
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://hunyuan.cloud.tencent.com"
DEFAULT_PATH = "/hyllm/v1/chat/completions"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
# signature generation
# https://cloud.tencent.com/document/product/1729/97732#532252ce-e960-48a7-8821-940a9ce2ccf3
def _signature(secret_key: SecretStr, url: str, payload: Dict[str, Any]) -> str:
sorted_keys = sorted(payload.keys())
url_info = urlparse(url)
sign_str = url_info.netloc + url_info.path + "?"
for key in sorted_keys:
value = payload[key]
if isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value, separators=(",", ":"))
elif isinstance(value, float):
value = "%g" % value
sign_str = sign_str + key + "=" + str(value) + "&"
sign_str = sign_str[:-1]
hmacstr = hmac.new(
key=secret_key.get_secret_value().encode("utf-8"),
msg=sign_str.encode("utf-8"),
digestmod=hashlib.sha1,
).digest()
return base64.b64encode(hmacstr).decode("utf-8")
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for choice in response["choices"]:
message = _convert_dict_to_message(choice["messages"])
generations.append(ChatGeneration(message=message))
token_usage = response["usage"]
llm_output = {"token_usage": token_usage}
return ChatResult(generations=generations, llm_output=llm_output)
def _to_secret(value: Union[SecretStr, str]) -> SecretStr:
"""Convert a string to a SecretStr if needed."""
if isinstance(value, SecretStr):
return value
return SecretStr(value)
class ChatHunyuan(BaseChatModel):
"""Tencent Hunyuan chat models API by Tencent.
For more information, see https://cloud.tencent.com/document/product/1729
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"hunyuan_app_id": "HUNYUAN_APP_ID",
"hunyuan_secret_id": "HUNYUAN_SECRET_ID",
"hunyuan_secret_key": "HUNYUAN_SECRET_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
hunyuan_api_base: str = Field(default=DEFAULT_API_BASE)
"""Hunyuan custom endpoints"""
hunyuan_app_id: Optional[str] = None
"""Hunyuan App ID"""
hunyuan_secret_id: Optional[str] = None
"""Hunyuan Secret ID"""
hunyuan_secret_key: Optional[SecretStr] = None
"""Hunyuan Secret Key"""
streaming: bool = False
"""Whether to stream the results or not."""
request_timeout: int = 60
"""Timeout for requests to Hunyuan API. Default is 60 seconds."""
query_id: Optional[str] = None
"""Query id for troubleshooting"""
temperature: float = 1.0
"""What sampling temperature to use."""
top_p: float = 1.0
"""What probability mass to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["hunyuan_api_base"] = get_from_dict_or_env(
values,
"hunyuan_api_base",
"HUNYUAN_API_BASE",
DEFAULT_API_BASE,
)
values["hunyuan_app_id"] = get_from_dict_or_env(
values,
"hunyuan_app_id",
"HUNYUAN_APP_ID",
)
values["hunyuan_secret_id"] = get_from_dict_or_env(
values,
"hunyuan_secret_id",
"HUNYUAN_SECRET_ID",
)
values["hunyuan_secret_key"] = _to_secret(
get_from_dict_or_env(
values,
"hunyuan_secret_key",
"HUNYUAN_SECRET_KEY",
)
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Hunyuan API."""
normal_params = {
"app_id": self.hunyuan_app_id,
"secret_id": self.hunyuan_secret_id,
"temperature": self.temperature,
"top_p": self.top_p,
}
if self.query_id is not None:
normal_params["query_id"] = self.query_id
return {**normal_params, **self.model_kwargs}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if "error" in response:
raise ValueError(f"Error from Hunyuan api response: {response}")
return _create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, **kwargs)
default_chunk_class = AIMessageChunk
for chunk in res.iter_lines():
response = json.loads(chunk)
if "error" in response:
raise ValueError(f"Error from Hunyuan api response: {response}")
for choice in response["choices"]:
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
if self.hunyuan_secret_key is None:
raise ValueError("Hunyuan secret key is not set.")
parameters = {**self._default_params, **kwargs}
headers = parameters.pop("headers", {})
timestamp = parameters.pop("timestamp", int(time.time()))
expired = parameters.pop("expired", timestamp + 24 * 60 * 60)
payload = {
"timestamp": timestamp,
"expired": expired,
"messages": [_convert_message_to_dict(m) for m in messages],
**parameters,
}
if self.streaming:
payload["stream"] = 1
url = self.hunyuan_api_base + DEFAULT_PATH
res = requests.post(
url=url,
timeout=self.request_timeout,
headers={
"Content-Type": "application/json",
"Authorization": _signature(
secret_key=self.hunyuan_secret_key, url=url, payload=payload
),
**headers,
},
json=payload,
stream=self.streaming,
)
return res
@property
def _llm_type(self) -> str:
return "hunyuan-chat"
| [
"content"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~tencentvectordb.py | """Wrapper around the Tencent vector database."""
from __future__ import annotations
import json
import logging
import time
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import guard_import
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class ConnectionParams:
"""Tencent vector DB Connection params.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/95820
Attribute:
url (str) : The access address of the vector database server
that the client needs to connect to.
key (str): API key for client to access the vector database server,
which is used for authentication.
username (str) : Account for client to access the vector database server.
timeout (int) : Request Timeout.
"""
def __init__(self, url: str, key: str, username: str = "root", timeout: int = 10):
self.url = url
self.key = key
self.username = username
self.timeout = timeout
class IndexParams:
"""Tencent vector DB Index params.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/95826
"""
def __init__(
self,
dimension: int,
shard: int = 1,
replicas: int = 2,
index_type: str = "HNSW",
metric_type: str = "L2",
params: Optional[Dict] = None,
):
self.dimension = dimension
self.shard = shard
self.replicas = replicas
self.index_type = index_type
self.metric_type = metric_type
self.params = params
class TencentVectorDB(VectorStore):
"""Initialize wrapper around the tencent vector database.
In order to use this you need to have a database instance.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/94951
"""
field_id: str = "id"
field_vector: str = "vector"
field_text: str = "text"
field_metadata: str = "metadata"
def __init__(
self,
embedding: Embeddings,
connection_params: ConnectionParams,
index_params: IndexParams = IndexParams(128),
database_name: str = "LangChainDatabase",
collection_name: str = "LangChainCollection",
drop_old: Optional[bool] = False,
):
self.document = guard_import("tcvectordb.model.document")
tcvectordb = guard_import("tcvectordb")
self.embedding_func = embedding
self.index_params = index_params
self.vdb_client = tcvectordb.VectorDBClient(
url=connection_params.url,
username=connection_params.username,
key=connection_params.key,
timeout=connection_params.timeout,
)
db_list = self.vdb_client.list_databases()
db_exist: bool = False
for db in db_list:
if database_name == db.database_name:
db_exist = True
break
if db_exist:
self.database = self.vdb_client.database(database_name)
else:
self.database = self.vdb_client.create_database(database_name)
try:
self.collection = self.database.describe_collection(collection_name)
if drop_old:
self.database.drop_collection(collection_name)
self._create_collection(collection_name)
except tcvectordb.exceptions.VectorDBException:
self._create_collection(collection_name)
def _create_collection(self, collection_name: str) -> None:
enum = guard_import("tcvectordb.model.enum")
vdb_index = guard_import("tcvectordb.model.index")
index_type = None
for k, v in enum.IndexType.__members__.items():
if k == self.index_params.index_type:
index_type = v
if index_type is None:
raise ValueError("unsupported index_type")
metric_type = None
for k, v in enum.MetricType.__members__.items():
if k == self.index_params.metric_type:
metric_type = v
if metric_type is None:
raise ValueError("unsupported metric_type")
if self.index_params.params is None:
params = vdb_index.HNSWParams(m=16, efconstruction=200)
else:
params = vdb_index.HNSWParams(
m=self.index_params.params.get("M", 16),
efconstruction=self.index_params.params.get("efConstruction", 200),
)
index = vdb_index.Index(
vdb_index.FilterIndex(
self.field_id, enum.FieldType.String, enum.IndexType.PRIMARY_KEY
),
vdb_index.VectorIndex(
self.field_vector,
self.index_params.dimension,
index_type,
metric_type,
params,
),
vdb_index.FilterIndex(
self.field_text, enum.FieldType.String, enum.IndexType.FILTER
),
vdb_index.FilterIndex(
self.field_metadata, enum.FieldType.String, enum.IndexType.FILTER
),
)
self.collection = self.database.create_collection(
name=collection_name,
shard=self.index_params.shard,
replicas=self.index_params.replicas,
description="Collection for LangChain",
index=index,
)
@property
def embeddings(self) -> Embeddings:
return self.embedding_func
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
connection_params: Optional[ConnectionParams] = None,
index_params: Optional[IndexParams] = None,
database_name: str = "LangChainDatabase",
collection_name: str = "LangChainCollection",
drop_old: Optional[bool] = False,
**kwargs: Any,
) -> TencentVectorDB:
"""Create a collection, indexes it with HNSW, and insert data."""
if len(texts) == 0:
raise ValueError("texts is empty")
if connection_params is None:
raise ValueError("connection_params is empty")
try:
embeddings = embedding.embed_documents(texts[0:1])
except NotImplementedError:
embeddings = [embedding.embed_query(texts[0])]
dimension = len(embeddings[0])
if index_params is None:
index_params = IndexParams(dimension=dimension)
else:
index_params.dimension = dimension
vector_db = cls(
embedding=embedding,
connection_params=connection_params,
index_params=index_params,
database_name=database_name,
collection_name=collection_name,
drop_old=drop_old,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
timeout: Optional[int] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Insert text data into TencentVectorDB."""
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
pks: list[str] = []
total_count = len(embeddings)
for start in range(0, total_count, batch_size):
# Grab end index
docs = []
end = min(start + batch_size, total_count)
for id in range(start, end, 1):
metadata = "{}"
if metadatas is not None:
metadata = json.dumps(metadatas[id])
doc = self.document.Document(
id="{}-{}-{}".format(time.time_ns(), hash(texts[id]), id),
vector=embeddings[id],
text=texts[id],
metadata=metadata,
)
docs.append(doc)
pks.append(str(id))
self.collection.upsert(docs, timeout)
return pks
def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score(
query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score."""
# Embed the query text.
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return res
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score."""
filter = None if expr is None else self.document.Filter(expr)
ef = 10 if param is None else param.get("ef", 10)
res: List[List[Dict]] = self.collection.search(
vectors=[embedding],
filter=filter,
params=self.document.HNSWSearchParams(ef=ef),
retrieve_vector=False,
limit=k,
timeout=timeout,
)
# Organize results.
ret: List[Tuple[Document, float]] = []
if res is None or len(res) == 0:
return ret
for result in res[0]:
meta = result.get(self.field_metadata)
if meta is not None:
meta = json.loads(meta)
doc = Document(page_content=result.get(self.field_text), metadata=meta)
pair = (doc, result.get("score", 0.0))
ret.append(pair)
return ret
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR."""
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
param=param,
expr=expr,
timeout=timeout,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR."""
filter = None if expr is None else self.document.Filter(expr)
ef = 10 if param is None else param.get("ef", 10)
res: List[List[Dict]] = self.collection.search(
vectors=[embedding],
filter=filter,
params=self.document.HNSWSearchParams(ef=ef),
retrieve_vector=True,
limit=fetch_k,
timeout=timeout,
)
# Organize results.
documents = []
ordered_result_embeddings = []
for result in res[0]:
meta = result.get(self.field_metadata)
if meta is not None:
meta = json.loads(meta)
doc = Document(page_content=result.get(self.field_text), metadata=meta)
documents.append(doc)
ordered_result_embeddings.append(result.get(self.field_vector))
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
else:
ret.append(documents[x])
return ret
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~retrievers~test_wikipedia.py | """Integration test for Wikipedia API Wrapper."""
from typing import List
import pytest
from langchain.retrievers import WikipediaRetriever
from langchain.schema import Document
@pytest.fixture
def retriever() -> WikipediaRetriever:
return WikipediaRetriever()
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(retriever: WikipediaRetriever) -> None:
retriever.load_all_available_meta = True
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert_docs(docs, all_meta=True)
def test_load_success_init_args() -> None:
retriever = WikipediaRetriever(
lang="en", top_k_results=1, load_all_available_meta=True
)
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) == 1
assert_docs(docs, all_meta=True)
def test_load_no_result(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~vectorstores~conftest.py | import os
from typing import Generator, List, Union
import pytest
from vcr.request import Request
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
from langchain.text_splitter import CharacterTextSplitter
# Those environment variables turn on Deep Lake pytest mode.
# It significantly makes tests run much faster.
# Need to run before `import deeplake`
os.environ["BUGGER_OFF"] = "true"
os.environ["DEEPLAKE_DOWNLOAD_PATH"] = "./testing/local_storage"
os.environ["DEEPLAKE_PYTEST_ENABLED"] = "true"
# This fixture returns a dictionary containing filter_headers options
# for replacing certain headers with dummy values during cassette playback
# Specifically, it replaces the authorization header with a dummy value to
# prevent sensitive data from being recorded in the cassette.
# It also filters request to certain hosts (specified in the `ignored_hosts` list)
# to prevent data from being recorded in the cassette.
@pytest.fixture(scope="module")
def vcr_config() -> dict:
skipped_host = ["pinecone.io"]
def before_record_response(response: dict) -> Union[dict, None]:
return response
def before_record_request(request: Request) -> Union[Request, None]:
for host in skipped_host:
if request.host.startswith(host) or request.host.endswith(host):
return None
return request
return {
"before_record_request": before_record_request,
"before_record_response": before_record_response,
"filter_headers": [
("authorization", "authorization-DUMMY"),
("X-OpenAI-Client-User-Agent", "X-OpenAI-Client-User-Agent-DUMMY"),
("Api-Key", "Api-Key-DUMMY"),
("User-Agent", "User-Agent-DUMMY"),
],
"ignore_localhost": True,
}
# Define a fixture that yields a generator object returning a list of documents
@pytest.fixture(scope="function")
def documents() -> Generator[List[Document], None, None]:
"""Return a generator that yields a list of documents."""
# Create a CharacterTextSplitter object for splitting the documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# Load the documents from a file located in the fixtures directory
documents = TextLoader(
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
).load()
# Yield the documents split into chunks
yield text_splitter.split_documents(documents)
@pytest.fixture(scope="function")
def texts() -> Generator[List[str], None, None]:
# Load the documents from a file located in the fixtures directory
documents = TextLoader(
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
).load()
yield [doc.page_content for doc in documents]
@pytest.fixture(scope="module")
def embedding_openai() -> OpenAIEmbeddings:
return OpenAIEmbeddings()
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~vectorstores~fake_embeddings.py | """Fake Embedding class for testing purposes."""
import math
from typing import List
from langchain.schema.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[float(1.0)] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [float(1.0)] * 9 + [float(0.0)]
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: List[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> List[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
if text not in self.known_texts:
return [float(1.0)] * (self.dimensionality - 1) + [float(0.0)]
return [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~mongodb_atlas.py | from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from pymongo.collection import Collection
MongoDBDocumentType = TypeVar("MongoDBDocumentType", bound=Dict[str, Any])
logger = logging.getLogger(__name__)
DEFAULT_INSERT_BATCH_SIZE = 100
class MongoDBAtlasVectorSearch(VectorStore):
"""`MongoDB Atlas Vector Search` vector store.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with a MongoDB Atlas Cluster having deployed an
Atlas Search index
Example:
.. code-block:: python
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.embeddings.openai import OpenAIEmbeddings
from pymongo import MongoClient
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch(collection, embeddings)
"""
def __init__(
self,
collection: Collection[MongoDBDocumentType],
embedding: Embeddings,
*,
index_name: str = "default",
text_key: str = "text",
embedding_key: str = "embedding",
):
"""
Args:
collection: MongoDB collection to add the texts to.
embedding: Text embedding model to use.
text_key: MongoDB field that will contain the text for each
document.
embedding_key: MongoDB field that will contain the embedding for
each document.
index_name: Name of the Atlas Search index.
"""
self._collection = collection
self._embedding = embedding
self._index_name = index_name
self._text_key = text_key
self._embedding_key = embedding_key
@property
def embeddings(self) -> Embeddings:
return self._embedding
@classmethod
def from_connection_string(
cls,
connection_string: str,
namespace: str,
embedding: Embeddings,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
"""Construct a `MongoDB Atlas Vector Search` vector store
from a MongoDB connection URI.
Args:
connection_string: A valid MongoDB connection URI.
namespace: A valid MongoDB namespace (database and collection).
embedding: The text embedding model to use for the vector store.
Returns:
A new MongoDBAtlasVectorSearch instance.
"""
try:
from pymongo import MongoClient
except ImportError:
raise ImportError(
"Could not import pymongo, please install it with "
"`pip install pymongo`."
)
client: MongoClient = MongoClient(connection_string)
db_name, collection_name = namespace.split(".")
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None,
**kwargs: Any,
) -> List:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List:
if not texts:
return []
# Embed and create the documents
embeddings = self._embedding.embed_documents(texts)
to_insert = [
{self._text_key: t, self._embedding_key: embedding, **m}
for t, m, embedding in zip(texts, metadatas, embeddings)
]
# insert the documents in MongoDB Atlas
insert_result = self._collection.insert_many(to_insert) # type: ignore
return insert_result.inserted_ids
def _similarity_search_with_score(
self,
embedding: List[float],
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
) -> List[Tuple[Document, float]]:
params = {
"queryVector": embedding,
"path": self._embedding_key,
"numCandidates": k * 10,
"limit": k,
"index": self._index_name,
}
if pre_filter:
params["filter"] = pre_filter
query = {"$vectorSearch": params}
pipeline = [
query,
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
]
if post_filter_pipeline is not None:
pipeline.extend(post_filter_pipeline)
cursor = self._collection.aggregate(pipeline) # type: ignore[arg-type]
docs = []
for res in cursor:
text = res.pop(self._text_key)
score = res.pop("score")
docs.append((Document(page_content=text, metadata=res), score))
return docs
def similarity_search_with_score(
self,
query: str,
*,
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
) -> List[Tuple[Document, float]]:
"""Return MongoDB documents most similar to the given query and their scores.
Uses the knnBeta Operator available in MongoDB Atlas Search.
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we
may introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
pre_filter: (Optional) dictionary of argument(s) to prefilter document
fields on.
post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents most similar to the query and their scores.
"""
embedding = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(
embedding,
k=k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
return docs
def similarity_search(
self,
query: str,
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return MongoDB documents most similar to the given query.
Uses the knnBeta Operator available in MongoDB Atlas Search.
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we
may introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
pre_filter: (Optional) dictionary of argument(s) to prefilter document
fields on.
post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents most similar to the query and their scores.
"""
docs_and_scores = self.similarity_search_with_score(
query,
k=k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return documents selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
fetch_k: (Optional) number of documents to fetch before passing to MMR
algorithm. Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
pre_filter: (Optional) dictionary of argument(s) to prefilter on document
fields.
post_filter_pipeline: (Optional) pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents selected by maximal marginal relevance.
"""
query_embedding = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(
query_embedding,
k=fetch_k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
mmr_doc_indexes = maximal_marginal_relevance(
np.array(query_embedding),
[doc.metadata[self._embedding_key] for doc, _ in docs],
k=k,
lambda_mult=lambda_mult,
)
mmr_docs = [docs[i][0] for i in mmr_doc_indexes]
return mmr_docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict]] = None,
collection: Optional[Collection[MongoDBDocumentType]] = None,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
"""Construct a `MongoDB Atlas Vector Search` vector store from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided MongoDB Atlas Vector Search index
(Lucene)
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from pymongo import MongoClient
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.embeddings import OpenAIEmbeddings
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embeddings,
metadatas=metadatas,
collection=collection
)
"""
if collection is None:
raise ValueError("Must provide 'collection' named parameter.")
vectorstore = cls(collection, embedding, **kwargs)
vectorstore.add_texts(texts, metadatas=metadatas)
return vectorstore
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~combine_documents~map_rerank.py | """Combining documents by mapping a chain over them first, then reranking results."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, Union, cast
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.output_parsers.regex import RegexParser
from langchain.pydantic_v1 import BaseModel, Extra, create_model, root_validator
from langchain.schema.runnable.config import RunnableConfig
class MapRerankDocumentsChain(BaseCombineDocumentsChain):
"""Combining documents by mapping a chain over them, then reranking results.
This algorithm calls an LLMChain on each input document. The LLMChain is expected
to have an OutputParser that parses the result into both an answer (`answer_key`)
and a score (`rank_key`). The answer with the highest score is then returned.
Example:
.. code-block:: python
from langchain.chains import StuffDocumentsChain, LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.output_parsers.regex import RegexParser
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
# The actual prompt will need to be a lot more complex, this is just
# an example.
prompt_template = (
"Use the following context to tell me the chemical formula "
"for water. Output both your answer and a score of how confident "
"you are. Context: {content}"
)
output_parser = RegexParser(
regex=r"(.*?)\nScore: (.*)",
output_keys=["answer", "score"],
)
prompt = PromptTemplate(
template=prompt_template,
input_variables=["context"],
output_parser=output_parser,
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
chain = MapRerankDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
rank_key="score",
answer_key="answer",
)
"""
llm_chain: LLMChain
"""Chain to apply to each document individually."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
rank_key: str
"""Key in output of llm_chain to rank on."""
answer_key: str
"""Key in output of llm_chain to return as answer."""
metadata_keys: Optional[List[str]] = None
"""Additional metadata from the chosen document to return."""
return_intermediate_steps: bool = False
"""Return intermediate steps.
Intermediate steps include the results of calling llm_chain on each document."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
schema: Dict[str, Any] = {
self.output_key: (str, None),
}
if self.return_intermediate_steps:
schema["intermediate_steps"] = (List[str], None)
if self.metadata_keys:
schema.update({key: (Any, None) for key in self.metadata_keys})
return create_model("MapRerankOutput", **schema)
@property
def output_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ["intermediate_steps"]
if self.metadata_keys is not None:
_output_keys += self.metadata_keys
return _output_keys
@root_validator()
def validate_llm_output(cls, values: Dict) -> Dict:
"""Validate that the combine chain outputs a dictionary."""
output_parser = values["llm_chain"].prompt.output_parser
if not isinstance(output_parser, RegexParser):
raise ValueError(
"Output parser of llm_chain should be a RegexParser,"
f" got {output_parser}"
)
output_keys = output_parser.output_keys
if values["rank_key"] not in output_keys:
raise ValueError(
f"Got {values['rank_key']} as key to rank on, but did not find "
f"it in the llm_chain output keys ({output_keys})"
)
if values["answer_key"] not in output_keys:
raise ValueError(
f"Got {values['answer_key']} as key to return, but did not find "
f"it in the llm_chain output keys ({output_keys})"
)
return values
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
def combine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any
) -> Tuple[str, dict]:
"""Combine documents in a map rerank manner.
Combine by mapping first chain over all documents, then reranking the results.
Args:
docs: List of documents to combine
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
results = self.llm_chain.apply_and_parse(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs],
callbacks=callbacks,
)
return self._process_results(docs, results)
async def acombine_docs(
self, docs: List[Document], callbacks: Callbacks = None, **kwargs: Any
) -> Tuple[str, dict]:
"""Combine documents in a map rerank manner.
Combine by mapping first chain over all documents, then reranking the results.
Args:
docs: List of documents to combine
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
results = await self.llm_chain.aapply_and_parse(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs],
callbacks=callbacks,
)
return self._process_results(docs, results)
def _process_results(
self,
docs: List[Document],
results: Sequence[Union[str, List[str], Dict[str, str]]],
) -> Tuple[str, dict]:
typed_results = cast(List[dict], results)
sorted_res = sorted(
zip(typed_results, docs), key=lambda x: -int(x[0][self.rank_key])
)
output, document = sorted_res[0]
extra_info = {}
if self.metadata_keys is not None:
for key in self.metadata_keys:
extra_info[key] = document.metadata[key]
if self.return_intermediate_steps:
extra_info["intermediate_steps"] = results
return output[self.answer_key], extra_info
@property
def _chain_type(self) -> str:
return "map_rerank_documents_chain"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~tools~test_json.py | """Test functionality of JSON tools."""
from pathlib import Path
from langchain.tools.json.tool import JsonSpec
def test_json_spec_from_file(tmp_path: Path) -> None:
"""Test JsonSpec can be constructed from a file."""
path = tmp_path / "test.json"
path.write_text('{"foo": "bar"}')
spec = JsonSpec.from_file(path)
assert spec.dict_ == {"foo": "bar"}
def test_json_spec_keys() -> None:
"""Test JsonSpec can return keys of a dict at given path."""
spec = JsonSpec(dict_={"foo": "bar", "baz": {"test": {"foo": [1, 2, 3]}}})
assert spec.keys("data") == "['foo', 'baz']"
assert "ValueError" in spec.keys('data["foo"]')
assert spec.keys('data["baz"]') == "['test']"
assert spec.keys('data["baz"]["test"]') == "['foo']"
assert "ValueError" in spec.keys('data["baz"]["test"]["foo"]')
def test_json_spec_value() -> None:
"""Test JsonSpec can return value of a dict at given path."""
spec = JsonSpec(dict_={"foo": "bar", "baz": {"test": {"foo": [1, 2, 3]}}})
assert spec.value("data") == "{'foo': 'bar', 'baz': {'test': {'foo': [1, 2, 3]}}}"
assert spec.value('data["foo"]') == "bar"
assert spec.value('data["baz"]') == "{'test': {'foo': [1, 2, 3]}}"
assert spec.value('data["baz"]["test"]') == "{'foo': [1, 2, 3]}"
assert spec.value('data["baz"]["test"]["foo"]') == "[1, 2, 3]"
assert spec.value("data['foo']") == "bar"
assert spec.value("data['baz']") == "{'test': {'foo': [1, 2, 3]}}"
assert spec.value("data['baz']['test']") == "{'foo': [1, 2, 3]}"
assert spec.value("data['baz']['test']['foo']") == "[1, 2, 3]"
def test_json_spec_value_max_length() -> None:
"""Test JsonSpec can return value of a dict at given path."""
spec = JsonSpec(
dict_={"foo": "bar", "baz": {"test": {"foo": [1, 2, 3]}}}, max_value_length=5
)
assert spec.value('data["foo"]') == "bar"
assert (
spec.value('data["baz"]')
== "Value is a large dictionary, should explore its keys directly"
)
assert (
spec.value('data["baz"]["test"]')
== "Value is a large dictionary, should explore its keys directly"
)
assert spec.value('data["baz"]["test"]["foo"]') == "[1, 2..."
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~ernie.py | import asyncio
import logging
import threading
from functools import partial
from typing import Dict, List, Optional
import requests
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class ErnieEmbeddings(BaseModel, Embeddings):
"""`Ernie Embeddings V1` embedding models."""
ernie_api_base: Optional[str] = None
ernie_client_id: Optional[str] = None
ernie_client_secret: Optional[str] = None
access_token: Optional[str] = None
chunk_size: int = 16
model_name = "ErnieBot-Embedding-V1"
_lock = threading.Lock()
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["ernie_api_base"] = get_from_dict_or_env(
values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com"
)
values["ernie_client_id"] = get_from_dict_or_env(
values,
"ernie_client_id",
"ERNIE_CLIENT_ID",
)
values["ernie_client_secret"] = get_from_dict_or_env(
values,
"ernie_client_secret",
"ERNIE_CLIENT_SECRET",
)
return values
def _embedding(self, json: object) -> dict:
base_url = (
f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings"
)
resp = requests.post(
f"{base_url}/embedding-v1",
headers={
"Content-Type": "application/json",
},
params={"access_token": self.access_token},
json=json,
)
return resp.json()
def _refresh_access_token_with_lock(self) -> None:
with self._lock:
logger.debug("Refreshing access token")
base_url: str = f"{self.ernie_api_base}/oauth/2.0/token"
resp = requests.post(
base_url,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
params={
"grant_type": "client_credentials",
"client_id": self.ernie_client_id,
"client_secret": self.ernie_client_secret,
},
)
self.access_token = str(resp.json().get("access_token"))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed search docs.
Args:
texts: The list of texts to embed
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
text_in_chunks = [
texts[i : i + self.chunk_size]
for i in range(0, len(texts), self.chunk_size)
]
lst = []
for chunk in text_in_chunks:
resp = self._embedding({"input": [text for text in chunk]})
if resp.get("error_code"):
if resp.get("error_code") == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text for text in chunk]})
else:
raise ValueError(f"Error from Ernie: {resp}")
lst.extend([i["embedding"] for i in resp["data"]])
return lst
def embed_query(self, text: str) -> List[float]:
"""Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text]})
if resp.get("error_code"):
if resp.get("error_code") == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text]})
else:
raise ValueError(f"Error from Ernie: {resp}")
return resp["data"][0]["embedding"]
async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.embed_query, text)
)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Asynchronous Embed search docs.
Args:
texts: The list of texts to embed
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
result = await asyncio.gather(*[self.aembed_query(text) for text in texts])
return list(result)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~edenai.py | """Wrapper around EdenAI's Generation API."""
import logging
from typing import Any, Dict, List, Literal, Optional
from aiohttp import ClientSession
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utilities.requests import Requests
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class EdenAI(LLM):
"""Wrapper around edenai models.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
`feature` and `subfeature` are required, but any other model parameters can also be
passed in with the format params={model_param: value, ...}
for api reference check edenai documentation: http://docs.edenai.co.
"""
base_url: str = "https://api.edenai.run/v2"
edenai_api_key: Optional[str] = None
feature: Literal["text", "image"] = "text"
"""Which generative feature to use, use text by default"""
subfeature: Literal["generation"] = "generation"
"""Subfeature of above feature, use generation by default"""
provider: str
"""Geneerative provider to use (eg: openai,stabilityai,cohere,google etc.)"""
params: Dict[str, Any]
"""
Parameters to pass to above subfeature (excluding 'providers' & 'text')
ref text: https://docs.edenai.co/reference/text_generation_create
ref image: https://docs.edenai.co/reference/text_generation_create
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra parameters"""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "edenai"
def _format_output(self, output: dict) -> str:
if self.feature == "text":
return output[self.provider]["generated_text"]
else:
return output[self.provider]["items"][0]["image"]
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to EdenAI's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
json formatted str response.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {"Authorization": f"Bearer {self.edenai_api_key}"}
payload = {
**self.params,
"providers": self.provider,
"num_images": 1, # always limit to 1 (ignored for text)
"text": prompt,
**kwargs,
}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
output = self._format_output(response.json())
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call EdenAi model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
stop: A list of stop words (optional).
run_manager: A callback manager for async interaction with LLMs.
Returns:
The string generated by the model.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
print("Running the acall")
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {"Authorization": f"Bearer {self.edenai_api_key}"}
payload = {
**self.params,
"providers": self.provider,
"num_images": 1, # always limit to 1 (ignored for text)
"text": prompt,
**kwargs,
}
async with ClientSession() as session:
print("Requesting")
async with session.post(url, json=payload, headers=headers) as response:
if response.status >= 500:
raise Exception(f"EdenAI Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"EdenAI received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
output = self._format_output(response_json)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~airtable.py | from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class AirtableLoader(BaseLoader):
"""Load the `Airtable` tables."""
def __init__(self, api_token: str, table_id: str, base_id: str):
"""Initialize with API token and the IDs for table and base"""
self.api_token = api_token
"""Airtable API token."""
self.table_id = table_id
"""Airtable table ID."""
self.base_id = base_id
"""Airtable base ID."""
def lazy_load(self) -> Iterator[Document]:
"""Lazy load Documents from table."""
from pyairtable import Table
table = Table(self.api_token, self.base_id, self.table_id)
records = table.all()
for record in records:
# Need to convert record from dict to str
yield Document(
page_content=str(record),
metadata={
"source": self.base_id + "_" + self.table_id,
"base_id": self.base_id,
"table_id": self.table_id,
},
)
def load(self) -> List[Document]:
"""Load Documents from table."""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~sklearn.py | """ Wrapper around scikit-learn NearestNeighbors implementation.
The vector store can be persisted in json, bson or parquet format.
"""
import json
import math
import os
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Type
from uuid import uuid4
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import guard_import
from langchain.vectorstores.utils import maximal_marginal_relevance
DEFAULT_K = 4 # Number of Documents to return.
DEFAULT_FETCH_K = 20 # Number of Documents to initially fetch during MMR search.
class BaseSerializer(ABC):
"""Base class for serializing data."""
def __init__(self, persist_path: str) -> None:
self.persist_path = persist_path
@classmethod
@abstractmethod
def extension(cls) -> str:
"""The file extension suggested by this serializer (without dot)."""
@abstractmethod
def save(self, data: Any) -> None:
"""Saves the data to the persist_path"""
@abstractmethod
def load(self) -> Any:
"""Loads the data from the persist_path"""
class JsonSerializer(BaseSerializer):
"""Serializes data in json using the json package from python standard library."""
@classmethod
def extension(cls) -> str:
return "json"
def save(self, data: Any) -> None:
with open(self.persist_path, "w") as fp:
json.dump(data, fp)
def load(self) -> Any:
with open(self.persist_path, "r") as fp:
return json.load(fp)
class BsonSerializer(BaseSerializer):
"""Serializes data in binary json using the `bson` python package."""
def __init__(self, persist_path: str) -> None:
super().__init__(persist_path)
self.bson = guard_import("bson")
@classmethod
def extension(cls) -> str:
return "bson"
def save(self, data: Any) -> None:
with open(self.persist_path, "wb") as fp:
fp.write(self.bson.dumps(data))
def load(self) -> Any:
with open(self.persist_path, "rb") as fp:
return self.bson.loads(fp.read())
class ParquetSerializer(BaseSerializer):
"""Serializes data in `Apache Parquet` format using the `pyarrow` package."""
def __init__(self, persist_path: str) -> None:
super().__init__(persist_path)
self.pd = guard_import("pandas")
self.pa = guard_import("pyarrow")
self.pq = guard_import("pyarrow.parquet")
@classmethod
def extension(cls) -> str:
return "parquet"
def save(self, data: Any) -> None:
df = self.pd.DataFrame(data)
table = self.pa.Table.from_pandas(df)
if os.path.exists(self.persist_path):
backup_path = str(self.persist_path) + "-backup"
os.rename(self.persist_path, backup_path)
try:
self.pq.write_table(table, self.persist_path)
except Exception as exc:
os.rename(backup_path, self.persist_path)
raise exc
else:
os.remove(backup_path)
else:
self.pq.write_table(table, self.persist_path)
def load(self) -> Any:
table = self.pq.read_table(self.persist_path)
df = table.to_pandas()
return {col: series.tolist() for col, series in df.items()}
SERIALIZER_MAP: Dict[str, Type[BaseSerializer]] = {
"json": JsonSerializer,
"bson": BsonSerializer,
"parquet": ParquetSerializer,
}
class SKLearnVectorStoreException(RuntimeError):
"""Exception raised by SKLearnVectorStore."""
pass
class SKLearnVectorStore(VectorStore):
"""Simple in-memory vector store based on the `scikit-learn` library
`NearestNeighbors` implementation."""
def __init__(
self,
embedding: Embeddings,
*,
persist_path: Optional[str] = None,
serializer: Literal["json", "bson", "parquet"] = "json",
metric: str = "cosine",
**kwargs: Any,
) -> None:
np = guard_import("numpy")
sklearn_neighbors = guard_import("sklearn.neighbors", pip_name="scikit-learn")
# non-persistent properties
self._np = np
self._neighbors = sklearn_neighbors.NearestNeighbors(metric=metric, **kwargs)
self._neighbors_fitted = False
self._embedding_function = embedding
self._persist_path = persist_path
self._serializer: Optional[BaseSerializer] = None
if self._persist_path is not None:
serializer_cls = SERIALIZER_MAP[serializer]
self._serializer = serializer_cls(persist_path=self._persist_path)
# data properties
self._embeddings: List[List[float]] = []
self._texts: List[str] = []
self._metadatas: List[dict] = []
self._ids: List[str] = []
# cache properties
self._embeddings_np: Any = np.asarray([])
if self._persist_path is not None and os.path.isfile(self._persist_path):
self._load()
@property
def embeddings(self) -> Embeddings:
return self._embedding_function
def persist(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
"You must specify a persist_path on creation to persist the "
"collection."
)
data = {
"ids": self._ids,
"texts": self._texts,
"metadatas": self._metadatas,
"embeddings": self._embeddings,
}
self._serializer.save(data)
def _load(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
"You must specify a persist_path on creation to load the " "collection."
)
data = self._serializer.load()
self._embeddings = data["embeddings"]
self._texts = data["texts"]
self._metadatas = data["metadatas"]
self._ids = data["ids"]
self._update_neighbors()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
_texts = list(texts)
_ids = ids or [str(uuid4()) for _ in _texts]
self._texts.extend(_texts)
self._embeddings.extend(self._embedding_function.embed_documents(_texts))
self._metadatas.extend(metadatas or ([{}] * len(_texts)))
self._ids.extend(_ids)
self._update_neighbors()
return _ids
def _update_neighbors(self) -> None:
if len(self._embeddings) == 0:
raise SKLearnVectorStoreException(
"No data was added to SKLearnVectorStore."
)
self._embeddings_np = self._np.asarray(self._embeddings)
self._neighbors.fit(self._embeddings_np)
self._neighbors_fitted = True
def _similarity_index_search_with_score(
self, query_embedding: List[float], *, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[int, float]]:
"""Search k embeddings similar to the query embedding. Returns a list of
(index, distance) tuples."""
if not self._neighbors_fitted:
raise SKLearnVectorStoreException(
"No data was added to SKLearnVectorStore."
)
neigh_dists, neigh_idxs = self._neighbors.kneighbors(
[query_embedding], n_neighbors=k
)
return list(zip(neigh_idxs[0], neigh_dists[0]))
def similarity_search_with_score(
self, query: str, *, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[Document, float]]:
query_embedding = self._embedding_function.embed_query(query)
indices_dists = self._similarity_index_search_with_score(
query_embedding, k=k, **kwargs
)
return [
(
Document(
page_content=self._texts[idx],
metadata={"id": self._ids[idx], **self._metadatas[idx]},
),
dist,
)
for idx, dist in indices_dists
]
def similarity_search(
self, query: str, k: int = DEFAULT_K, **kwargs: Any
) -> List[Document]:
docs_scores = self.similarity_search_with_score(query, k=k, **kwargs)
return [doc for doc, _ in docs_scores]
def _similarity_search_with_relevance_scores(
self, query: str, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[Document, float]]:
docs_dists = self.similarity_search_with_score(query, k=k, **kwargs)
docs, dists = zip(*docs_dists)
scores = [1 / math.exp(dist) for dist in dists]
return list(zip(list(docs), scores))
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
fetch_k: int = DEFAULT_FETCH_K,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
indices_dists = self._similarity_index_search_with_score(
embedding, k=fetch_k, **kwargs
)
indices, _ = zip(*indices_dists)
result_embeddings = self._embeddings_np[indices,]
mmr_selected = maximal_marginal_relevance(
self._np.array(embedding, dtype=self._np.float32),
result_embeddings,
k=k,
lambda_mult=lambda_mult,
)
mmr_indices = [indices[i] for i in mmr_selected]
return [
Document(
page_content=self._texts[idx],
metadata={"id": self._ids[idx], **self._metadatas[idx]},
)
for idx in mmr_indices
]
def max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = DEFAULT_FETCH_K,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mul=lambda_mult
)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
persist_path: Optional[str] = None,
**kwargs: Any,
) -> "SKLearnVectorStore":
vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs)
vs.add_texts(texts, metadatas=metadatas, ids=ids)
return vs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~document_loaders~test_youtube.py | import pytest
from langchain.document_loaders import YoutubeLoader
@pytest.mark.parametrize(
"youtube_url, expected_video_id",
[
("http://www.youtube.com/watch?v=-wtIMTCHWuI", "-wtIMTCHWuI"),
("http://youtube.com/watch?v=-wtIMTCHWuI", "-wtIMTCHWuI"),
("http://m.youtube.com/watch?v=-wtIMTCHWuI", "-wtIMTCHWuI"),
("http://youtu.be/-wtIMTCHWuI", "-wtIMTCHWuI"),
("https://youtu.be/-wtIMTCHWuI", "-wtIMTCHWuI"),
("https://www.youtube.com/watch?v=lalOy8Mbfdc", "lalOy8Mbfdc"),
("https://m.youtube.com/watch?v=lalOy8Mbfdc", "lalOy8Mbfdc"),
("https://youtube.com/watch?v=lalOy8Mbfdc", "lalOy8Mbfdc"),
("http://youtu.be/lalOy8Mbfdc?t=1", "lalOy8Mbfdc"),
("http://youtu.be/lalOy8Mbfdc?t=1s", "lalOy8Mbfdc"),
("https://youtu.be/lalOy8Mbfdc?t=1", "lalOy8Mbfdc"),
("http://www.youtube-nocookie.com/embed/lalOy8Mbfdc?rel=0", "lalOy8Mbfdc"),
("https://youtu.be/lalOy8Mbfdc?t=1s", "lalOy8Mbfdc"),
("https://www.youtube.com/shorts/cd0Fy92_w_s", "cd0Fy92_w_s"),
],
)
def test_video_id_extraction(youtube_url: str, expected_video_id: str) -> None:
"""Test that the video id is extracted from a youtube url"""
assert YoutubeLoader.extract_video_id(youtube_url) == expected_video_id
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~file_management~write.py | from typing import Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.base import BaseTool
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class WriteFileInput(BaseModel):
"""Input for WriteFileTool."""
file_path: str = Field(..., description="name of file")
text: str = Field(..., description="text to write to file")
append: bool = Field(
default=False, description="Whether to append to an existing file."
)
class WriteFileTool(BaseFileToolMixin, BaseTool):
"""Tool that writes a file to disk."""
name: str = "write_file"
args_schema: Type[BaseModel] = WriteFileInput
description: str = "Write file to disk"
def _run(
self,
file_path: str,
text: str,
append: bool = False,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
write_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
try:
write_path.parent.mkdir(exist_ok=True, parents=False)
mode = "a" if append else "w"
with write_path.open(mode, encoding="utf-8") as f:
f.write(text)
return f"File written successfully to {file_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [
"Write file to disk"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~document_loaders~test_whatsapp_chat.py | from pathlib import Path
from langchain.document_loaders import WhatsAppChatLoader
def test_whatsapp_chat_loader() -> None:
"""Test WhatsAppChatLoader."""
file_path = Path(__file__).parent.parent / "examples" / "whatsapp_chat.txt"
loader = WhatsAppChatLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["source"] == str(file_path)
assert docs[0].page_content == (
"James on 05.05.23, 15:48:11: Hi here\n\n"
"User name on 11/8/21, 9:41:32 AM: Message 123\n\n"
"User 2 on 1/23/23, 3:19 AM: Bye!\n\n"
"User 1 on 1/23/23, 3:22_AM: And let me know if anything changes\n\n"
"~ User name 2 on 1/24/21, 12:41:03 PM: Of course!\n\n"
"~ User 2 on 2023/5/4, 16:13:23: See you!\n\n"
"User 1 on 7/19/22, 11:32 PM: Hello\n\n"
"User 2 on 7/20/22, 11:32 am: Goodbye\n\n"
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~neo4j_vector.py | from __future__ import annotations
import enum
import logging
import os
import uuid
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_env
from langchain.vectorstores.utils import DistanceStrategy
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
DISTANCE_MAPPING = {
DistanceStrategy.EUCLIDEAN_DISTANCE: "euclidean",
DistanceStrategy.COSINE: "cosine",
}
class SearchType(str, enum.Enum):
"""Enumerator of the Distance strategies."""
VECTOR = "vector"
HYBRID = "hybrid"
DEFAULT_SEARCH_TYPE = SearchType.VECTOR
def _get_search_index_query(search_type: SearchType) -> str:
type_to_query_map = {
SearchType.VECTOR: (
"CALL db.index.vector.queryNodes($index, $k, $embedding) YIELD node, score "
),
SearchType.HYBRID: (
"CALL { "
"CALL db.index.vector.queryNodes($index, $k, $embedding) "
"YIELD node, score "
"RETURN node, score UNION "
"CALL db.index.fulltext.queryNodes($keyword_index, $query, {limit: $k}) "
"YIELD node, score "
"WITH collect({node:node, score:score}) AS nodes, max(score) AS max "
"UNWIND nodes AS n "
"RETURN n.node AS node, (n.score / max) AS score " # We use 0 as min
"} "
"WITH node, max(score) AS score ORDER BY score DESC LIMIT $k " # dedup
),
}
return type_to_query_map[search_type]
def check_if_not_null(props: List[str], values: List[Any]) -> None:
"""Check if the values are not None or empty string"""
for prop, value in zip(props, values):
if not value:
raise ValueError(f"Parameter `{prop}` must not be None or empty string")
def sort_by_index_name(
lst: List[Dict[str, Any]], index_name: str
) -> List[Dict[str, Any]]:
"""Sort first element to match the index_name if exists"""
return sorted(lst, key=lambda x: x.get("index_name") != index_name)
class Neo4jVector(VectorStore):
"""`Neo4j` vector index.
To use, you should have the ``neo4j`` python package installed.
Args:
url: Neo4j connection url
username: Neo4j username.
password: Neo4j password
database: Optionally provide Neo4j database
Defaults to "neo4j"
embedding: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete existing data if it exists.
(default: False). Useful for testing.
Example:
.. code-block:: python
from langchain.vectorstores.neo4j_vector import Neo4jVector
from langchain.embeddings.openai import OpenAIEmbeddings
url="bolt://localhost:7687"
username="neo4j"
password="pleaseletmein"
embeddings = OpenAIEmbeddings()
vectorestore = Neo4jVector.from_documents(
embedding=embeddings,
documents=docs,
url=url
username=username,
password=password,
)
"""
def __init__(
self,
embedding: Embeddings,
*,
search_type: SearchType = SearchType.VECTOR,
username: Optional[str] = None,
password: Optional[str] = None,
url: Optional[str] = None,
keyword_index_name: Optional[str] = "keyword",
database: str = "neo4j",
index_name: str = "vector",
node_label: str = "Chunk",
embedding_node_property: str = "embedding",
text_node_property: str = "text",
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
logger: Optional[logging.Logger] = None,
pre_delete_collection: bool = False,
retrieval_query: str = "",
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
try:
import neo4j
except ImportError:
raise ImportError(
"Could not import neo4j python package. "
"Please install it with `pip install neo4j`."
)
# Allow only cosine and euclidean distance strategies
if distance_strategy not in [
DistanceStrategy.EUCLIDEAN_DISTANCE,
DistanceStrategy.COSINE,
]:
raise ValueError(
"distance_strategy must be either 'EUCLIDEAN_DISTANCE' or 'COSINE'"
)
# Handle if the credentials are environment variables
# Support URL for backwards compatibility
url = os.environ.get("NEO4J_URL", url)
url = get_from_env("url", "NEO4J_URI", url)
username = get_from_env("username", "NEO4J_USERNAME", username)
password = get_from_env("password", "NEO4J_PASSWORD", password)
database = get_from_env("database", "NEO4J_DATABASE", database)
self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password))
self._database = database
self.schema = ""
# Verify connection
try:
self._driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the url is correct"
)
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the username and password are correct"
)
# Verify if the version support vector index
self.verify_version()
# Verify that required values are not null
check_if_not_null(
[
"index_name",
"node_label",
"embedding_node_property",
"text_node_property",
],
[index_name, node_label, embedding_node_property, text_node_property],
)
self.embedding = embedding
self._distance_strategy = distance_strategy
self.index_name = index_name
self.keyword_index_name = keyword_index_name
self.node_label = node_label
self.embedding_node_property = embedding_node_property
self.text_node_property = text_node_property
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self.retrieval_query = retrieval_query
self.search_type = search_type
# Calculate embedding dimension
self.embedding_dimension = len(embedding.embed_query("foo"))
# Delete existing data if flagged
if pre_delete_collection:
from neo4j.exceptions import DatabaseError
self.query(
f"MATCH (n:`{self.node_label}`) "
"CALL { WITH n DETACH DELETE n } "
"IN TRANSACTIONS OF 10000 ROWS;"
)
# Delete index
try:
self.query(f"DROP INDEX {self.index_name}")
except DatabaseError: # Index didn't exist yet
pass
def query(
self, query: str, *, params: Optional[dict] = None
) -> List[Dict[str, Any]]:
"""
This method sends a Cypher query to the connected Neo4j database
and returns the results as a list of dictionaries.
Args:
query (str): The Cypher query to execute.
params (dict, optional): Dictionary of query parameters. Defaults to {}.
Returns:
List[Dict[str, Any]]: List of dictionaries containing the query results.
"""
from neo4j.exceptions import CypherSyntaxError
params = params or {}
with self._driver.session(database=self._database) as session:
try:
data = session.run(query, params)
return [r.data() for r in data]
except CypherSyntaxError as e:
raise ValueError(f"Cypher Statement is not valid\n{e}")
def verify_version(self) -> None:
"""
Check if the connected Neo4j database version supports vector indexing.
Queries the Neo4j database to retrieve its version and compares it
against a target version (5.11.0) that is known to support vector
indexing. Raises a ValueError if the connected Neo4j version is
not supported.
"""
version = self.query("CALL dbms.components()")[0]["versions"][0]
if "aura" in version:
version_tuple = tuple(map(int, version.split("-")[0].split("."))) + (0,)
else:
version_tuple = tuple(map(int, version.split(".")))
target_version = (5, 11, 0)
if version_tuple < target_version:
raise ValueError(
"Version index is only supported in Neo4j version 5.11 or greater"
)
def retrieve_existing_index(self) -> Optional[int]:
"""
Check if the vector index exists in the Neo4j database
and returns its embedding dimension.
This method queries the Neo4j database for existing indexes
and attempts to retrieve the dimension of the vector index
with the specified name. If the index exists, its dimension is returned.
If the index doesn't exist, `None` is returned.
Returns:
int or None: The embedding dimension of the existing index if found.
"""
index_information = self.query(
"SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options "
"WHERE type = 'VECTOR' AND (name = $index_name "
"OR (labelsOrTypes[0] = $node_label AND "
"properties[0] = $embedding_node_property)) "
"RETURN name, labelsOrTypes, properties, options ",
params={
"index_name": self.index_name,
"node_label": self.node_label,
"embedding_node_property": self.embedding_node_property,
},
)
# sort by index_name
index_information = sort_by_index_name(index_information, self.index_name)
try:
self.index_name = index_information[0]["name"]
self.node_label = index_information[0]["labelsOrTypes"][0]
self.embedding_node_property = index_information[0]["properties"][0]
embedding_dimension = index_information[0]["options"]["indexConfig"][
"vector.dimensions"
]
return embedding_dimension
except IndexError:
return None
def retrieve_existing_fts_index(
self, text_node_properties: List[str] = []
) -> Optional[str]:
"""
Check if the fulltext index exists in the Neo4j database
This method queries the Neo4j database for existing fts indexes
with the specified name.
Returns:
(Tuple): keyword index information
"""
index_information = self.query(
"SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options "
"WHERE type = 'FULLTEXT' AND (name = $keyword_index_name "
"OR (labelsOrTypes = [$node_label] AND "
"properties = $text_node_property)) "
"RETURN name, labelsOrTypes, properties, options ",
params={
"keyword_index_name": self.keyword_index_name,
"node_label": self.node_label,
"text_node_property": text_node_properties or [self.text_node_property],
},
)
# sort by index_name
index_information = sort_by_index_name(index_information, self.index_name)
try:
self.keyword_index_name = index_information[0]["name"]
self.text_node_property = index_information[0]["properties"][0]
node_label = index_information[0]["labelsOrTypes"][0]
return node_label
except IndexError:
return None
def create_new_index(self) -> None:
"""
This method constructs a Cypher query and executes it
to create a new vector index in Neo4j.
"""
index_query = (
"CALL db.index.vector.createNodeIndex("
"$index_name,"
"$node_label,"
"$embedding_node_property,"
"toInteger($embedding_dimension),"
"$similarity_metric )"
)
parameters = {
"index_name": self.index_name,
"node_label": self.node_label,
"embedding_node_property": self.embedding_node_property,
"embedding_dimension": self.embedding_dimension,
"similarity_metric": DISTANCE_MAPPING[self._distance_strategy],
}
self.query(index_query, params=parameters)
def create_new_keyword_index(self, text_node_properties: List[str] = []) -> None:
"""
This method constructs a Cypher query and executes it
to create a new full text index in Neo4j.
"""
node_props = text_node_properties or [self.text_node_property]
fts_index_query = (
f"CREATE FULLTEXT INDEX {self.keyword_index_name} "
f"FOR (n:`{self.node_label}`) ON EACH "
f"[{', '.join(['n.`' + el + '`' for el in node_props])}]"
)
self.query(fts_index_query)
@property
def embeddings(self) -> Embeddings:
return self.embedding
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
create_id_index: bool = True,
search_type: SearchType = SearchType.VECTOR,
**kwargs: Any,
) -> Neo4jVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
store = cls(
embedding=embedding,
search_type=search_type,
**kwargs,
)
# Check if the vector index already exists
embedding_dimension = store.retrieve_existing_index()
# If the vector index doesn't exist yet
if not embedding_dimension:
store.create_new_index()
# If the index already exists, check if embedding dimensions match
elif not store.embedding_dimension == embedding_dimension:
raise ValueError(
f"Index with name {store.index_name} already exists."
"The provided embedding function and vector index "
"dimensions do not match.\n"
f"Embedding function dimension: {store.embedding_dimension}\n"
f"Vector index dimension: {embedding_dimension}"
)
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index()
# If the FTS index doesn't exist yet
if not fts_node_label:
store.create_new_keyword_index()
else: # Validate that FTS and Vector index use the same information
if not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label"
)
# Create unique constraint for faster import
if create_id_index:
store.query(
"CREATE CONSTRAINT IF NOT EXISTS "
f"FOR (n:`{store.node_label}`) REQUIRE n.id IS UNIQUE;"
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
import_query = (
"UNWIND $data AS row "
"CALL { WITH row "
f"MERGE (c:`{self.node_label}` {{id: row.id}}) "
"WITH c, row "
f"CALL db.create.setVectorProperty(c, "
f"'{self.embedding_node_property}', row.embedding) "
"YIELD node "
f"SET c.`{self.text_node_property}` = row.text "
"SET c += row.metadata } IN TRANSACTIONS OF 1000 ROWS"
)
parameters = {
"data": [
{"text": text, "metadata": metadata, "embedding": embedding, "id": id}
for text, metadata, embedding, id in zip(
texts, metadatas, embeddings, ids
)
]
}
self.query(import_query, params=parameters)
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Neo4jVector.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
query=query,
)
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, query=query
)
return docs
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""
Perform a similarity search in the Neo4j database using a
given vector and return the top k similar documents with their scores.
This method uses a Cypher query to find the top k documents that
are most similar to a given embedding. The similarity is measured
using a vector index in the Neo4j database. The results are returned
as a list of tuples, each containing a Document object and
its similarity score.
Args:
embedding (List[float]): The embedding vector to compare against.
k (int, optional): The number of top similar documents to retrieve.
Returns:
List[Tuple[Document, float]]: A list of tuples, each containing
a Document object and its similarity score.
"""
default_retrieval = (
f"RETURN node.`{self.text_node_property}` AS text, score, "
f"node {{.*, `{self.text_node_property}`: Null, "
f"`{self.embedding_node_property}`: Null, id: Null }} AS metadata"
)
retrieval_query = (
self.retrieval_query if self.retrieval_query else default_retrieval
)
read_query = _get_search_index_query(self.search_type) + retrieval_query
parameters = {
"index": self.index_name,
"k": k,
"embedding": embedding,
"keyword_index": self.keyword_index_name,
"query": kwargs["query"],
}
results = self.query(read_query, params=parameters)
docs = [
(
Document(
page_content=result["text"],
metadata={
k: v for k, v in result["metadata"].items() if v is not None
},
),
result["score"],
)
for result in results
]
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls: Type[Neo4jVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Neo4jVector:
"""
Return Neo4jVector initialized from texts and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
distance_strategy=distance_strategy,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> Neo4jVector:
"""Construct Neo4jVector wrapper from raw documents and pre-
generated embeddings.
Return Neo4jVector initialized from documents and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
Example:
.. code-block:: python
from langchain.vectorstores.neo4j_vector import Neo4jVector
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
vectorstore = Neo4jVector.from_embeddings(
text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[Neo4jVector],
embedding: Embeddings,
index_name: str,
search_type: SearchType = DEFAULT_SEARCH_TYPE,
keyword_index_name: Optional[str] = None,
**kwargs: Any,
) -> Neo4jVector:
"""
Get instance of an existing Neo4j vector index. This method will
return the instance of the store without inserting any new
embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters along with
the `index_name` definition.
"""
if search_type == SearchType.HYBRID and not keyword_index_name:
raise ValueError(
"keyword_index name has to be specified "
"when using hybrid search option"
)
store = cls(
embedding=embedding,
index_name=index_name,
keyword_index_name=keyword_index_name,
search_type=search_type,
**kwargs,
)
embedding_dimension = store.retrieve_existing_index()
if not embedding_dimension:
raise ValueError(
"The specified vector index name does not exist. "
"Make sure to check if you spelled it correctly"
)
# Check if embedding function and vector index dimensions match
if not store.embedding_dimension == embedding_dimension:
raise ValueError(
"The provided embedding function and vector index "
"dimensions do not match.\n"
f"Embedding function dimension: {store.embedding_dimension}\n"
f"Vector index dimension: {embedding_dimension}"
)
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index()
# If the FTS index doesn't exist yet
if not fts_node_label:
raise ValueError(
"The specified keyword index name does not exist. "
"Make sure to check if you spelled it correctly"
)
else: # Validate that FTS and Vector index use the same information
if not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label"
)
return store
@classmethod
def from_documents(
cls: Type[Neo4jVector],
documents: List[Document],
embedding: Embeddings,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Neo4jVector:
"""
Return Neo4jVector initialized from documents and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
**kwargs,
)
@classmethod
def from_existing_graph(
cls: Type[Neo4jVector],
embedding: Embeddings,
node_label: str,
embedding_node_property: str,
text_node_properties: List[str],
*,
keyword_index_name: Optional[str] = "keyword",
index_name: str = "vector",
search_type: SearchType = DEFAULT_SEARCH_TYPE,
retrieval_query: str = "",
**kwargs: Any,
) -> Neo4jVector:
"""
Initialize and return a Neo4jVector instance from an existing graph.
This method initializes a Neo4jVector instance using the provided
parameters and the existing graph. It validates the existence of
the indices and creates new ones if they don't exist.
Returns:
Neo4jVector: An instance of Neo4jVector initialized with the provided parameters
and existing graph.
Example:
>>> neo4j_vector = Neo4jVector.from_existing_graph(
... embedding=my_embedding,
... node_label="Document",
... embedding_node_property="embedding",
... text_node_properties=["title", "content"]
... )
Note:
Neo4j credentials are required in the form of `url`, `username`, and `password`,
and optional `database` parameters passed as additional keyword arguments.
"""
# Validate the list is not empty
if not text_node_properties:
raise ValueError(
"Parameter `text_node_properties` must not be an empty list"
)
# Prefer retrieval query from params, otherwise construct it
if not retrieval_query:
retrieval_query = (
f"RETURN reduce(str='', k IN {text_node_properties} |"
" str + '\\n' + k + ': ' + coalesce(node[k], '')) AS text, "
"node {.*, `"
+ embedding_node_property
+ "`: Null, id: Null, "
+ ", ".join([f"`{prop}`: Null" for prop in text_node_properties])
+ "} AS metadata, score"
)
store = cls(
embedding=embedding,
index_name=index_name,
keyword_index_name=keyword_index_name,
search_type=search_type,
retrieval_query=retrieval_query,
node_label=node_label,
embedding_node_property=embedding_node_property,
**kwargs,
)
# Check if the vector index already exists
embedding_dimension = store.retrieve_existing_index()
# If the vector index doesn't exist yet
if not embedding_dimension:
store.create_new_index()
# If the index already exists, check if embedding dimensions match
elif not store.embedding_dimension == embedding_dimension:
raise ValueError(
f"Index with name {store.index_name} already exists."
"The provided embedding function and vector index "
"dimensions do not match.\n"
f"Embedding function dimension: {store.embedding_dimension}\n"
f"Vector index dimension: {embedding_dimension}"
)
# FTS index for Hybrid search
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index(text_node_properties)
# If the FTS index doesn't exist yet
if not fts_node_label:
store.create_new_keyword_index(text_node_properties)
else: # Validate that FTS and Vector index use the same information
if not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label"
)
# Populate embeddings
while True:
fetch_query = (
f"MATCH (n:`{node_label}`) "
f"WHERE n.{embedding_node_property} IS null "
"AND any(k in $props WHERE n[k] IS NOT null) "
f"RETURN elementId(n) AS id, reduce(str='',"
"k IN $props | str + '\\n' + k + ':' + coalesce(n[k], '')) AS text "
"LIMIT 1000"
)
data = store.query(fetch_query, params={"props": text_node_properties})
text_embeddings = embedding.embed_documents([el["text"] for el in data])
params = {
"data": [
{"id": el["id"], "embedding": embedding}
for el, embedding in zip(data, text_embeddings)
]
}
store.query(
"UNWIND $data AS row "
f"MATCH (n:`{node_label}`) "
"WHERE elementId(n) = row.id "
f"CALL db.create.setVectorProperty(n, "
f"'{embedding_node_property}', row.embedding) "
"YIELD node RETURN count(*)",
params=params,
)
# If embedding calculation should be stopped
if len(data) < 1000:
break
return store
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return lambda x: x
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return lambda x: x
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to PGVector constructor."
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~azureml_endpoint.py | import json
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import SimpleChatModel
from langchain.llms.azureml_endpoint import AzureMLEndpointClient, ContentFormatterBase
from langchain.pydantic_v1 import validator
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain.utils import get_from_dict_or_env
class LlamaContentFormatter(ContentFormatterBase):
"""Content formatter for `LLaMA`."""
SUPPORTED_ROLES: List[str] = ["user", "assistant", "system"]
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> Dict:
"""Converts message to a dict according to role"""
if isinstance(message, HumanMessage):
return {
"role": "user",
"content": ContentFormatterBase.escape_special_characters(
message.content
),
}
elif isinstance(message, AIMessage):
return {
"role": "assistant",
"content": ContentFormatterBase.escape_special_characters(
message.content
),
}
elif isinstance(message, SystemMessage):
return {
"role": "system",
"content": ContentFormatterBase.escape_special_characters(
message.content
),
}
elif (
isinstance(message, ChatMessage)
and message.role in LlamaContentFormatter.SUPPORTED_ROLES
):
return {
"role": message.role,
"content": ContentFormatterBase.escape_special_characters(
message.content
),
}
else:
supported = ",".join(
[role for role in LlamaContentFormatter.SUPPORTED_ROLES]
)
raise ValueError(
f"""Received unsupported role.
Supported roles for the LLaMa Foundation Model: {supported}"""
)
def _format_request_payload(
self, messages: List[BaseMessage], model_kwargs: Dict
) -> bytes:
chat_messages = [
LlamaContentFormatter._convert_message_to_dict(message)
for message in messages
]
prompt = json.dumps(
{"input_data": {"input_string": chat_messages, "parameters": model_kwargs}}
)
return self.format_request_payload(prompt=prompt, model_kwargs=model_kwargs)
def format_request_payload(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Formats the request according to the chosen api"""
return str.encode(prompt)
def format_response_payload(self, output: bytes) -> str:
"""Formats response"""
return json.loads(output)["output"]
class AzureMLChatOnlineEndpoint(SimpleChatModel):
"""`AzureML` Chat models API.
Example:
.. code-block:: python
azure_chat = AzureMLChatOnlineEndpoint(
endpoint_url="https://<your-endpoint>.<your_region>.inference.ml.azure.com/score",
endpoint_api_key="my-api-key",
content_formatter=content_formatter,
)
"""
endpoint_url: str = ""
"""URL of pre-existing Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_URL`."""
endpoint_api_key: str = ""
"""Authentication Key for Endpoint. Should be passed to constructor or specified as
env var `AZUREML_ENDPOINT_API_KEY`."""
http_client: Any = None #: :meta private:
content_formatter: Any = None
"""The content formatter that provides an input and output
transform function to handle formats between the LLM and
the endpoint"""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
@validator("http_client", always=True, allow_reuse=True)
@classmethod
def validate_client(cls, field_value: Any, values: Dict) -> AzureMLEndpointClient:
"""Validate that api key and python package exist in environment."""
endpoint_key = get_from_dict_or_env(
values, "endpoint_api_key", "AZUREML_ENDPOINT_API_KEY"
)
endpoint_url = get_from_dict_or_env(
values, "endpoint_url", "AZUREML_ENDPOINT_URL"
)
http_client = AzureMLEndpointClient(endpoint_url, endpoint_key)
return http_client
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azureml_chat_endpoint"
def _call(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to an AzureML Managed Online endpoint.
Args:
messages: The messages in the conversation with the chat model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = azureml_model("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
request_payload = self.content_formatter._format_request_payload(
messages, _model_kwargs
)
response_payload = self.http_client.call(request_payload, **kwargs)
generated_text = self.content_formatter.format_response_payload(
response_payload
)
return generated_text
| [
"input_string",
"parameters",
"input_data"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~llms~test_qianfan_endpoint.py | """Test Baidu Qianfan LLM Endpoint."""
from typing import Generator
import pytest
from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
from langchain.schema import LLMResult
def test_call() -> None:
"""Test valid call to qianfan."""
llm = QianfanLLMEndpoint()
output = llm("write a joke")
assert isinstance(output, str)
def test_generate() -> None:
"""Test valid call to qianfan."""
llm = QianfanLLMEndpoint()
output = llm.generate(["write a joke"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_generate_stream() -> None:
"""Test valid call to qianfan."""
llm = QianfanLLMEndpoint()
output = llm.stream("write a joke")
assert isinstance(output, Generator)
@pytest.mark.asyncio
async def test_qianfan_aio() -> None:
llm = QianfanLLMEndpoint(streaming=True)
async for token in llm.astream("hi qianfan."):
assert isinstance(token, str)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~duckduckgo_search.py | """Util that calls DuckDuckGo Search.
No setup required. Free.
https://pypi.org/project/duckduckgo-search/
"""
from typing import Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
class DuckDuckGoSearchAPIWrapper(BaseModel):
"""Wrapper for DuckDuckGo Search API.
Free and does not require any setup.
"""
region: Optional[str] = "wt-wt"
safesearch: str = "moderate"
time: Optional[str] = "y"
max_results: int = 5
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from duckduckgo_search import DDGS # noqa: F401
except ImportError:
raise ImportError(
"Could not import duckduckgo-search python package. "
"Please install it with `pip install duckduckgo-search`."
)
return values
def get_snippets(self, query: str) -> List[str]:
"""Run query through DuckDuckGo and return concatenated results."""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
results = ddgs.text(
query,
region=self.region,
safesearch=self.safesearch,
timelimit=self.time,
)
if results is None:
return ["No good DuckDuckGo Search Result was found"]
snippets = []
for i, res in enumerate(results, 1):
if res is not None:
snippets.append(res["body"])
if len(snippets) == self.max_results:
break
return snippets
def run(self, query: str) -> str:
snippets = self.get_snippets(query)
return " ".join(snippets)
def results(
self, query: str, num_results: int, backend: str = "api"
) -> List[Dict[str, str]]:
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
from duckduckgo_search import DDGS
with DDGS() as ddgs:
results = ddgs.text(
query,
region=self.region,
safesearch=self.safesearch,
timelimit=self.time,
backend=backend,
)
if results is None:
return [{"Result": "No good DuckDuckGo Search Result was found"}]
def to_metadata(result: Dict) -> Dict[str, str]:
if backend == "news":
return {
"date": result["date"],
"title": result["title"],
"snippet": result["body"],
"source": result["source"],
"link": result["url"],
}
return {
"snippet": result["body"],
"title": result["title"],
"link": result["href"],
}
formatted_results = []
for i, res in enumerate(results, 1):
if res is not None:
formatted_results.append(to_metadata(res))
if len(formatted_results) == num_results:
break
return formatted_results
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~tencent_cos_file.py | import os
import tempfile
from typing import Any, Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class TencentCOSFileLoader(BaseLoader):
"""Load from `Tencent Cloud COS` file."""
def __init__(self, conf: Any, bucket: str, key: str):
"""Initialize with COS config, bucket and key name.
:param conf(CosConfig): COS config.
:param bucket(str): COS bucket.
:param key(str): COS file key.
"""
self.conf = conf
self.bucket = bucket
self.key = key
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""Load documents."""
try:
from qcloud_cos import CosS3Client
except ImportError:
raise ImportError(
"Could not import cos-python-sdk-v5 python package. "
"Please install it with `pip install cos-python-sdk-v5`."
)
# Initialise a client
client = CosS3Client(self.conf)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.bucket}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
# Download the file to a destination
client.download_file(
Bucket=self.bucket, Key=self.key, DestFilePath=file_path
)
loader = UnstructuredFileLoader(file_path)
# UnstructuredFileLoader not implement lazy_load yet
return iter(loader.load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~argilla_callback.py | import os
import warnings
from typing import Any, Dict, List, Optional
from packaging.version import parse
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
class ArgillaCallbackHandler(BaseCallbackHandler):
"""Callback Handler that logs into Argilla.
Args:
dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must
exist in advance. If you need help on how to create a `FeedbackDataset` in
Argilla, please visit
https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html.
workspace_name: name of the workspace in Argilla where the specified
`FeedbackDataset` lives in. Defaults to `None`, which means that the
default workspace will be used.
api_url: URL of the Argilla Server that we want to use, and where the
`FeedbackDataset` lives in. Defaults to `None`, which means that either
`ARGILLA_API_URL` environment variable or the default will be used.
api_key: API Key to connect to the Argilla Server. Defaults to `None`, which
means that either `ARGILLA_API_KEY` environment variable or the default
will be used.
Raises:
ImportError: if the `argilla` package is not installed.
ConnectionError: if the connection to Argilla fails.
FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails.
Examples:
>>> from langchain.llms import OpenAI
>>> from langchain.callbacks import ArgillaCallbackHandler
>>> argilla_callback = ArgillaCallbackHandler(
... dataset_name="my-dataset",
... workspace_name="my-workspace",
... api_url="http://localhost:6900",
... api_key="argilla.apikey",
... )
>>> llm = OpenAI(
... temperature=0,
... callbacks=[argilla_callback],
... verbose=True,
... openai_api_key="API_KEY_HERE",
... )
>>> llm.generate([
... "What is the best NLP-annotation tool out there? (no bias at all)",
... ])
"Argilla, no doubt about it."
"""
REPO_URL: str = "https://github.com/argilla-io/argilla"
ISSUES_URL: str = f"{REPO_URL}/issues"
BLOG_URL: str = "https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html" # noqa: E501
DEFAULT_API_URL: str = "http://localhost:6900"
def __init__(
self,
dataset_name: str,
workspace_name: Optional[str] = None,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> None:
"""Initializes the `ArgillaCallbackHandler`.
Args:
dataset_name: name of the `FeedbackDataset` in Argilla. Note that it must
exist in advance. If you need help on how to create a `FeedbackDataset`
in Argilla, please visit
https://docs.argilla.io/en/latest/guides/llms/practical_guides/use_argilla_callback_in_langchain.html.
workspace_name: name of the workspace in Argilla where the specified
`FeedbackDataset` lives in. Defaults to `None`, which means that the
default workspace will be used.
api_url: URL of the Argilla Server that we want to use, and where the
`FeedbackDataset` lives in. Defaults to `None`, which means that either
`ARGILLA_API_URL` environment variable or the default will be used.
api_key: API Key to connect to the Argilla Server. Defaults to `None`, which
means that either `ARGILLA_API_KEY` environment variable or the default
will be used.
Raises:
ImportError: if the `argilla` package is not installed.
ConnectionError: if the connection to Argilla fails.
FileNotFoundError: if the `FeedbackDataset` retrieval from Argilla fails.
"""
super().__init__()
# Import Argilla (not via `import_argilla` to keep hints in IDEs)
try:
import argilla as rg # noqa: F401
self.ARGILLA_VERSION = rg.__version__
except ImportError:
raise ImportError(
"To use the Argilla callback manager you need to have the `argilla` "
"Python package installed. Please install it with `pip install argilla`"
)
# Check whether the Argilla version is compatible
if parse(self.ARGILLA_VERSION) < parse("1.8.0"):
raise ImportError(
f"The installed `argilla` version is {self.ARGILLA_VERSION} but "
"`ArgillaCallbackHandler` requires at least version 1.8.0. Please "
"upgrade `argilla` with `pip install --upgrade argilla`."
)
# Show a warning message if Argilla will assume the default values will be used
if api_url is None and os.getenv("ARGILLA_API_URL") is None:
warnings.warn(
(
"Since `api_url` is None, and the env var `ARGILLA_API_URL` is not"
f" set, it will default to `{self.DEFAULT_API_URL}`, which is the"
" default API URL in Argilla Quickstart."
),
)
api_url = self.DEFAULT_API_URL
if api_key is None and os.getenv("ARGILLA_API_KEY") is None:
self.DEFAULT_API_KEY = (
"admin.apikey"
if parse(self.ARGILLA_VERSION) < parse("1.11.0")
else "owner.apikey"
)
warnings.warn(
(
"Since `api_key` is None, and the env var `ARGILLA_API_KEY` is not"
f" set, it will default to `{self.DEFAULT_API_KEY}`, which is the"
" default API key in Argilla Quickstart."
),
)
api_url = self.DEFAULT_API_URL
# Connect to Argilla with the provided credentials, if applicable
try:
rg.init(api_key=api_key, api_url=api_url)
except Exception as e:
raise ConnectionError(
f"Could not connect to Argilla with exception: '{e}'.\n"
"Please check your `api_key` and `api_url`, and make sure that "
"the Argilla server is up and running. If the problem persists "
f"please report it to {self.ISSUES_URL} as an `integration` issue."
) from e
# Set the Argilla variables
self.dataset_name = dataset_name
self.workspace_name = workspace_name or rg.get_workspace()
# Retrieve the `FeedbackDataset` from Argilla (without existing records)
try:
extra_args = {}
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
warnings.warn(
f"You have Argilla {self.ARGILLA_VERSION}, but Argilla 1.14.0 or"
" higher is recommended.",
UserWarning,
)
extra_args = {"with_records": False}
self.dataset = rg.FeedbackDataset.from_argilla(
name=self.dataset_name,
workspace=self.workspace_name,
**extra_args,
)
except Exception as e:
raise FileNotFoundError(
f"`FeedbackDataset` retrieval from Argilla failed with exception `{e}`."
f"\nPlease check that the dataset with name={self.dataset_name} in the"
f" workspace={self.workspace_name} exists in advance. If you need help"
" on how to create a `langchain`-compatible `FeedbackDataset` in"
f" Argilla, please visit {self.BLOG_URL}. If the problem persists"
f" please report it to {self.ISSUES_URL} as an `integration` issue."
) from e
supported_fields = ["prompt", "response"]
if supported_fields != [field.name for field in self.dataset.fields]:
raise ValueError(
f"`FeedbackDataset` with name={self.dataset_name} in the workspace="
f"{self.workspace_name} had fields that are not supported yet for the"
f"`langchain` integration. Supported fields are: {supported_fields},"
f" and the current `FeedbackDataset` fields are {[field.name for field in self.dataset.fields]}." # noqa: E501
" For more information on how to create a `langchain`-compatible"
f" `FeedbackDataset` in Argilla, please visit {self.BLOG_URL}."
)
self.prompts: Dict[str, List[str]] = {}
warnings.warn(
(
"The `ArgillaCallbackHandler` is currently in beta and is subject to"
" change based on updates to `langchain`. Please report any issues to"
f" {self.ISSUES_URL} as an `integration` issue."
),
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Save the prompts in memory when an LLM starts."""
self.prompts.update({str(kwargs["parent_run_id"] or kwargs["run_id"]): prompts})
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Do nothing when a new token is generated."""
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Log records to Argilla when an LLM ends."""
# Do nothing if there's a parent_run_id, since we will log the records when
# the chain ends
if kwargs["parent_run_id"]:
return
# Creates the records and adds them to the `FeedbackDataset`
prompts = self.prompts[str(kwargs["run_id"])]
for prompt, generations in zip(prompts, response.generations):
self.dataset.add_records(
records=[
{
"fields": {
"prompt": prompt,
"response": generation.text.strip(),
},
}
for generation in generations
]
)
# Pop current run from `self.runs`
self.prompts.pop(str(kwargs["run_id"]))
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
# Push the records to Argilla
self.dataset.push_to_argilla()
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM outputs an error."""
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""If the key `input` is in `inputs`, then save it in `self.prompts` using
either the `parent_run_id` or the `run_id` as the key. This is done so that
we don't log the same input prompt twice, once when the LLM starts and once
when the chain starts.
"""
if "input" in inputs:
self.prompts.update(
{
str(kwargs["parent_run_id"] or kwargs["run_id"]): (
inputs["input"]
if isinstance(inputs["input"], list)
else [inputs["input"]]
)
}
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""If either the `parent_run_id` or the `run_id` is in `self.prompts`, then
log the outputs to Argilla, and pop the run from `self.prompts`. The behavior
differs if the output is a list or not.
"""
if not any(
key in self.prompts
for key in [str(kwargs["parent_run_id"]), str(kwargs["run_id"])]
):
return
prompts = self.prompts.get(str(kwargs["parent_run_id"])) or self.prompts.get(
str(kwargs["run_id"])
)
for chain_output_key, chain_output_val in outputs.items():
if isinstance(chain_output_val, list):
# Creates the records and adds them to the `FeedbackDataset`
self.dataset.add_records(
records=[
{
"fields": {
"prompt": prompt,
"response": output["text"].strip(),
},
}
for prompt, output in zip(
prompts, chain_output_val # type: ignore
)
]
)
else:
# Creates the records and adds them to the `FeedbackDataset`
self.dataset.add_records(
records=[
{
"fields": {
"prompt": " ".join(prompts), # type: ignore
"response": chain_output_val.strip(),
},
}
]
)
# Pop current run from `self.runs`
if str(kwargs["parent_run_id"]) in self.prompts:
self.prompts.pop(str(kwargs["parent_run_id"]))
if str(kwargs["run_id"]) in self.prompts:
self.prompts.pop(str(kwargs["run_id"]))
if parse(self.ARGILLA_VERSION) < parse("1.14.0"):
# Push the records to Argilla
self.dataset.push_to_argilla()
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when LLM chain outputs an error."""
pass
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
"""Do nothing when tool starts."""
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Do nothing when agent takes a specific action."""
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Do nothing when tool ends."""
pass
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Do nothing when tool outputs an error."""
pass
def on_text(self, text: str, **kwargs: Any) -> None:
"""Do nothing"""
pass
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Do nothing"""
pass
| [
"run_id",
"parent_run_id"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~clickup.py | """Util that calls clickup."""
import json
import warnings
from dataclasses import asdict, dataclass, fields
from typing import Any, Dict, List, Mapping, Optional, Tuple, Type, Union
import requests
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
DEFAULT_URL = "https://api.clickup.com/api/v2"
@dataclass
class Component:
"""Base class for all components."""
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Component":
raise NotImplementedError()
@dataclass
class Task(Component):
"""Class for a task."""
id: int
name: str
text_content: str
description: str
status: str
creator_id: int
creator_username: str
creator_email: str
assignees: List[Dict[str, Any]]
watchers: List[Dict[str, Any]]
priority: Optional[str]
due_date: Optional[str]
start_date: Optional[str]
points: int
team_id: int
project_id: int
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Task":
priority = None if data["priority"] is None else data["priority"]["priority"]
return cls(
id=data["id"],
name=data["name"],
text_content=data["text_content"],
description=data["description"],
status=data["status"]["status"],
creator_id=data["creator"]["id"],
creator_username=data["creator"]["username"],
creator_email=data["creator"]["email"],
assignees=data["assignees"],
watchers=data["watchers"],
priority=priority,
due_date=data["due_date"],
start_date=data["start_date"],
points=data["points"],
team_id=data["team_id"],
project_id=data["project"]["id"],
)
@dataclass
class CUList(Component):
"""Component class for a list."""
folder_id: float
name: str
content: Optional[str] = None
due_date: Optional[int] = None
due_date_time: Optional[bool] = None
priority: Optional[int] = None
assignee: Optional[int] = None
status: Optional[str] = None
@classmethod
def from_data(cls, data: dict) -> "CUList":
return cls(
folder_id=data["folder_id"],
name=data["name"],
content=data.get("content"),
due_date=data.get("due_date"),
due_date_time=data.get("due_date_time"),
priority=data.get("priority"),
assignee=data.get("assignee"),
status=data.get("status"),
)
@dataclass
class Member(Component):
"""Component class for a member."""
id: int
username: str
email: str
initials: str
@classmethod
def from_data(cls, data: Dict) -> "Member":
return cls(
id=data["user"]["id"],
username=data["user"]["username"],
email=data["user"]["email"],
initials=data["user"]["initials"],
)
@dataclass
class Team(Component):
"""Component class for a team."""
id: int
name: str
members: List[Member]
@classmethod
def from_data(cls, data: Dict) -> "Team":
members = [Member.from_data(member_data) for member_data in data["members"]]
return cls(id=data["id"], name=data["name"], members=members)
@dataclass
class Space(Component):
"""Component class for a space."""
id: int
name: str
private: bool
enabled_features: Dict[str, Any]
@classmethod
def from_data(cls, data: Dict[str, Any]) -> "Space":
space_data = data["spaces"][0]
enabled_features = {
feature: value
for feature, value in space_data["features"].items()
if value["enabled"]
}
return cls(
id=space_data["id"],
name=space_data["name"],
private=space_data["private"],
enabled_features=enabled_features,
)
def parse_dict_through_component(
data: dict, component: Type[Component], fault_tolerant: bool = False
) -> Dict:
"""Parse a dictionary by creating
a component and then turning it back into a dictionary.
This helps with two things
1. Extract and format data from a dictionary according to schema
2. Provide a central place to do this in a fault-tolerant way
"""
try:
return asdict(component.from_data(data))
except Exception as e:
if fault_tolerant:
warning_str = f"""Error encountered while trying to parse
{str(data)}: {str(e)}\n Falling back to returning input data."""
warnings.warn(warning_str)
return data
else:
raise e
def extract_dict_elements_from_component_fields(
data: dict, component: Type[Component]
) -> dict:
"""Extract elements from a dictionary.
Args:
data: The dictionary to extract elements from.
component: The component to extract elements from.
Returns:
A dictionary containing the elements from the input dictionary that are also
in the component.
"""
output = {}
for attribute in fields(component):
if attribute.name in data:
output[attribute.name] = data[attribute.name]
return output
def load_query(
query: str, fault_tolerant: bool = False
) -> Tuple[Optional[Dict], Optional[str]]:
"""Attempts to parse a JSON string and return the parsed object.
If parsing fails, returns an error message.
:param query: The JSON string to parse.
:return: A tuple containing the parsed object or None and an error message or None.
"""
try:
return json.loads(query), None
except json.JSONDecodeError as e:
if fault_tolerant:
return (
None,
f"""Input must be a valid JSON. Got the following error: {str(e)}.
"Please reformat and try again.""",
)
else:
raise e
def fetch_first_id(data: dict, key: str) -> Optional[int]:
"""Fetch the first id from a dictionary."""
if key in data and len(data[key]) > 0:
if len(data[key]) > 1:
warnings.warn(f"Found multiple {key}: {data[key]}. Defaulting to first.")
return data[key][0]["id"]
return None
def fetch_data(url: str, access_token: str, query: Optional[dict] = None) -> dict:
"""Fetch data from a URL."""
headers = {"Authorization": access_token}
response = requests.get(url, headers=headers, params=query)
response.raise_for_status()
return response.json()
def fetch_team_id(access_token: str) -> Optional[int]:
"""Fetch the team id."""
url = f"{DEFAULT_URL}/team"
data = fetch_data(url, access_token)
return fetch_first_id(data, "teams")
def fetch_space_id(team_id: int, access_token: str) -> Optional[int]:
"""Fetch the space id."""
url = f"{DEFAULT_URL}/team/{team_id}/space"
data = fetch_data(url, access_token, query={"archived": "false"})
return fetch_first_id(data, "spaces")
def fetch_folder_id(space_id: int, access_token: str) -> Optional[int]:
"""Fetch the folder id."""
url = f"{DEFAULT_URL}/space/{space_id}/folder"
data = fetch_data(url, access_token, query={"archived": "false"})
return fetch_first_id(data, "folders")
def fetch_list_id(space_id: int, folder_id: int, access_token: str) -> Optional[int]:
"""Fetch the list id."""
if folder_id:
url = f"{DEFAULT_URL}/folder/{folder_id}/list"
else:
url = f"{DEFAULT_URL}/space/{space_id}/list"
data = fetch_data(url, access_token, query={"archived": "false"})
# The structure to fetch list id differs based if its folderless
if folder_id and "id" in data:
return data["id"]
else:
return fetch_first_id(data, "lists")
class ClickupAPIWrapper(BaseModel):
"""Wrapper for Clickup API."""
access_token: Optional[str] = None
team_id: Optional[str] = None
space_id: Optional[str] = None
folder_id: Optional[str] = None
list_id: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@classmethod
def get_access_code_url(
cls, oauth_client_id: str, redirect_uri: str = "https://google.com"
) -> str:
"""Get the URL to get an access code."""
url = f"https://app.clickup.com/api?client_id={oauth_client_id}"
return f"{url}&redirect_uri={redirect_uri}"
@classmethod
def get_access_token(
cls, oauth_client_id: str, oauth_client_secret: str, code: str
) -> Optional[str]:
"""Get the access token."""
url = f"{DEFAULT_URL}/oauth/token"
params = {
"client_id": oauth_client_id,
"client_secret": oauth_client_secret,
"code": code,
}
response = requests.post(url, params=params)
data = response.json()
if "access_token" not in data:
print(f"Error: {data}")
if "ECODE" in data and data["ECODE"] == "OAUTH_014":
url = ClickupAPIWrapper.get_access_code_url(oauth_client_id)
print(
"You already used this code once. Generate a new one.",
f"Our best guess for the url to get a new code is:\n{url}",
)
return None
return data["access_token"]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["access_token"] = get_from_dict_or_env(
values, "access_token", "CLICKUP_ACCESS_TOKEN"
)
values["team_id"] = fetch_team_id(values["access_token"])
values["space_id"] = fetch_space_id(values["team_id"], values["access_token"])
values["folder_id"] = fetch_folder_id(
values["space_id"], values["access_token"]
)
values["list_id"] = fetch_list_id(
values["space_id"], values["folder_id"], values["access_token"]
)
return values
def attempt_parse_teams(self, input_dict: dict) -> Dict[str, List[dict]]:
"""Parse appropriate content from the list of teams."""
parsed_teams: Dict[str, List[dict]] = {"teams": []}
for team in input_dict["teams"]:
try:
team = parse_dict_through_component(team, Team, fault_tolerant=False)
parsed_teams["teams"].append(team)
except Exception as e:
warnings.warn(f"Error parsing a team {e}")
return parsed_teams
def get_headers(
self,
) -> Mapping[str, Union[str, bytes]]:
"""Get the headers for the request."""
if not isinstance(self.access_token, str):
raise TypeError(f"Access Token: {self.access_token}, must be str.")
headers = {
"Authorization": str(self.access_token),
"Content-Type": "application/json",
}
return headers
def get_default_params(self) -> Dict:
return {"archived": "false"}
def get_authorized_teams(self) -> Dict[Any, Any]:
"""Get all teams for the user."""
url = f"{DEFAULT_URL}/team"
response = requests.get(url, headers=self.get_headers())
data = response.json()
parsed_teams = self.attempt_parse_teams(data)
return parsed_teams
def get_folders(self) -> Dict:
"""
Get all the folders for the team.
"""
url = f"{DEFAULT_URL}/team/" + str(self.team_id) + "/space"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def get_task(self, query: str, fault_tolerant: bool = True) -> Dict:
"""
Retrieve a specific task.
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {"Error": error}
url = f"{DEFAULT_URL}/task/{params['task_id']}"
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
response = requests.get(url, headers=self.get_headers(), params=params)
data = response.json()
parsed_task = parse_dict_through_component(
data, Task, fault_tolerant=fault_tolerant
)
return parsed_task
def get_lists(self) -> Dict:
"""
Get all available lists.
"""
url = f"{DEFAULT_URL}/folder/{self.folder_id}/list"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def query_tasks(self, query: str) -> Dict:
"""
Query tasks that match certain fields
"""
params, error = load_query(query, fault_tolerant=True)
if params is None:
return {"Error": error}
url = f"{DEFAULT_URL}/list/{params['list_id']}/task"
params = self.get_default_params()
response = requests.get(url, headers=self.get_headers(), params=params)
return {"response": response}
def get_spaces(self) -> Dict:
"""
Get all spaces for the team.
"""
url = f"{DEFAULT_URL}/team/{self.team_id}/space"
response = requests.get(
url, headers=self.get_headers(), params=self.get_default_params()
)
data = response.json()
parsed_spaces = parse_dict_through_component(data, Space, fault_tolerant=True)
return parsed_spaces
def get_task_attribute(self, query: str) -> Dict:
"""
Update an attribute of a specified task.
"""
task = self.get_task(query, fault_tolerant=True)
params, error = load_query(query, fault_tolerant=True)
if not isinstance(params, dict):
return {"Error": error}
if params["attribute_name"] not in task:
return {
"Error": f"""attribute_name = {params['attribute_name']} was not
found in task keys {task.keys()}. Please call again with one of the key names."""
}
return {params["attribute_name"]: task[params["attribute_name"]]}
def update_task(self, query: str) -> Dict:
"""
Update an attribute of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
headers = self.get_headers()
payload = {query_dict["attribute_name"]: query_dict["value"]}
response = requests.put(url, headers=headers, params=params, json=payload)
return {"response": response}
def update_task_assignees(self, query: str) -> Dict:
"""
Add or remove assignees of a specified task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
for user in query_dict["users"]:
if not isinstance(user, int):
return {
"Error": f"""All users must be integers, not strings!
"Got user {user} if type {type(user)}"""
}
url = f"{DEFAULT_URL}/task/{query_dict['task_id']}"
headers = self.get_headers()
if query_dict["operation"] == "add":
assigne_payload = {"add": query_dict["users"], "rem": []}
elif query_dict["operation"] == "rem":
assigne_payload = {"add": [], "rem": query_dict["users"]}
else:
raise ValueError(
f"Invalid operation ({query_dict['operation']}). ",
"Valid options ['add', 'rem'].",
)
params = {
"custom_task_ids": "true",
"team_id": self.team_id,
"include_subtasks": "true",
}
payload = {"assignees": assigne_payload}
response = requests.put(url, headers=headers, params=params, json=payload)
return {"response": response}
def create_task(self, query: str) -> Dict:
"""
Creates a new task.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
list_id = self.list_id
url = f"{DEFAULT_URL}/list/{list_id}/task"
params = {"custom_task_ids": "true", "team_id": self.team_id}
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers, params=params)
data: Dict = response.json()
return parse_dict_through_component(data, Task, fault_tolerant=True)
def create_list(self, query: str) -> Dict:
"""
Creates a new list.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
# Default to using folder as location if it exists.
# If not, fall back to using the space.
location = self.folder_id if self.folder_id else self.space_id
url = f"{DEFAULT_URL}/folder/{location}/list"
payload = extract_dict_elements_from_component_fields(query_dict, Task)
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
parsed_list = parse_dict_through_component(data, CUList, fault_tolerant=True)
# set list id to new list
if "id" in parsed_list:
self.list_id = parsed_list["id"]
return parsed_list
def create_folder(self, query: str) -> Dict:
"""
Creates a new folder.
"""
query_dict, error = load_query(query, fault_tolerant=True)
if query_dict is None:
return {"Error": error}
space_id = self.space_id
url = f"{DEFAULT_URL}/space/{space_id}/folder"
payload = {
"name": query_dict["name"],
}
headers = self.get_headers()
response = requests.post(url, json=payload, headers=headers)
data = response.json()
if "id" in data:
self.list_id = data["id"]
return data
def run(self, mode: str, query: str) -> str:
"""Run the API."""
if mode == "get_task":
output = self.get_task(query)
elif mode == "get_task_attribute":
output = self.get_task_attribute(query)
elif mode == "get_teams":
output = self.get_authorized_teams()
elif mode == "create_task":
output = self.create_task(query)
elif mode == "create_list":
output = self.create_list(query)
elif mode == "create_folder":
output = self.create_folder(query)
elif mode == "get_lists":
output = self.get_lists()
elif mode == "get_folders":
output = self.get_folders()
elif mode == "get_spaces":
output = self.get_spaces()
elif mode == "update_task":
output = self.update_task(query)
elif mode == "update_task_assignees":
output = self.update_task_assignees(query)
else:
output = {"ModeError": f"Got unexpected mode {mode}."}
try:
return json.dumps(output)
except Exception:
return str(output)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~hn.py | from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
class HNLoader(WebBaseLoader):
"""Load `Hacker News` data.
It loads data from either main page results or the comments page."""
def load(self) -> List[Document]:
"""Get important HN webpage information.
HN webpage components are:
- title
- content
- source url,
- time of post
- author of the post
- number of comments
- rank of the post
"""
soup_info = self.scrape()
if "item" in self.web_path:
return self.load_comments(soup_info)
else:
return self.load_results(soup_info)
def load_comments(self, soup_info: Any) -> List[Document]:
"""Load comments from a HN post."""
comments = soup_info.select("tr[class='athing comtr']")
title = soup_info.select_one("tr[id='pagespace']").get("title")
return [
Document(
page_content=comment.text.strip(),
metadata={"source": self.web_path, "title": title},
)
for comment in comments
]
def load_results(self, soup: Any) -> List[Document]:
"""Load items from an HN page."""
items = soup.select("tr[class='athing']")
documents = []
for lineItem in items:
ranking = lineItem.select_one("span[class='rank']").text
link = lineItem.find("span", {"class": "titleline"}).find("a").get("href")
title = lineItem.find("span", {"class": "titleline"}).text.strip()
metadata = {
"source": self.web_path,
"title": title,
"link": link,
"ranking": ranking,
}
documents.append(
Document(
page_content=title, link=link, ranking=ranking, metadata=metadata
)
)
return documents
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~twilio.py | """Util that calls Twilio."""
from typing import Any, Dict, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
class TwilioAPIWrapper(BaseModel):
"""Messaging Client using Twilio.
To use, you should have the ``twilio`` python package installed,
and the environment variables ``TWILIO_ACCOUNT_SID``, ``TWILIO_AUTH_TOKEN``, and
``TWILIO_FROM_NUMBER``, or pass `account_sid`, `auth_token`, and `from_number` as
named parameters to the constructor.
Example:
.. code-block:: python
from langchain.utilities.twilio import TwilioAPIWrapper
twilio = TwilioAPIWrapper(
account_sid="ACxxx",
auth_token="xxx",
from_number="+10123456789"
)
twilio.run('test', '+12484345508')
"""
client: Any #: :meta private:
account_sid: Optional[str] = None
"""Twilio account string identifier."""
auth_token: Optional[str] = None
"""Twilio auth token."""
from_number: Optional[str] = None
"""A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164)
format, an
[alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id),
or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses)
that is enabled for the type of message you want to send. Phone numbers or
[short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from
Twilio also work here. You cannot, for example, spoof messages from a private
cell phone number. If you are using `messaging_service_sid`, this parameter
must be empty.
""" # noqa: E501
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = False
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from twilio.rest import Client
except ImportError:
raise ImportError(
"Could not import twilio python package. "
"Please install it with `pip install twilio`."
)
account_sid = get_from_dict_or_env(values, "account_sid", "TWILIO_ACCOUNT_SID")
auth_token = get_from_dict_or_env(values, "auth_token", "TWILIO_AUTH_TOKEN")
values["from_number"] = get_from_dict_or_env(
values, "from_number", "TWILIO_FROM_NUMBER"
)
values["client"] = Client(account_sid, auth_token)
return values
def run(self, body: str, to: str) -> str:
"""Run body through Twilio and respond with message sid.
Args:
body: The text of the message you want to send. Can be up to 1,600
characters in length.
to: The destination phone number in
[E.164](https://www.twilio.com/docs/glossary/what-e164) format for
SMS/MMS or
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
for other 3rd-party channels.
""" # noqa: E501
message = self.client.messages.create(to, from_=self.from_number, body=body)
return message.sid
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~arxiv.py | """Util that calls Arxiv."""
import logging
import os
import re
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class ArxivAPIWrapper(BaseModel):
"""Wrapper around ArxivAPI.
To use, you should have the ``arxiv`` python package installed.
https://lukasschwab.me/arxiv.py/index.html
This wrapper will use the Arxiv API to conduct searches and
fetch document summaries. By default, it will return the document summaries
of the top-k results.
If the query is in the form of arxiv identifier
(see https://info.arxiv.org/help/find/index.html), it will return the paper
corresponding to the arxiv identifier.
It limits the Document content by doc_content_chars_max.
Set doc_content_chars_max=None if you don't want to limit the content size.
Attributes:
top_k_results: number of the top-scored document used for the arxiv tool
ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool.
load_max_docs: a limit to the number of loaded documents
load_all_available_meta:
if True: the `metadata` of the loaded Documents contains all available
meta info (see https://lukasschwab.me/arxiv.py/index.html#Result),
if False: the `metadata` contains only the published date, title,
authors and summary.
doc_content_chars_max: an optional cut limit for the length of a document's
content
Example:
.. code-block:: python
from langchain.utilities.arxiv import ArxivAPIWrapper
arxiv = ArxivAPIWrapper(
top_k_results = 3,
ARXIV_MAX_QUERY_LENGTH = 300,
load_max_docs = 3,
load_all_available_meta = False,
doc_content_chars_max = 40000
)
arxiv.run("tree of thought llm)
"""
arxiv_search: Any #: :meta private:
arxiv_exceptions: Any # :meta private:
top_k_results: int = 3
ARXIV_MAX_QUERY_LENGTH: int = 300
load_max_docs: int = 100
load_all_available_meta: bool = False
doc_content_chars_max: Optional[int] = 4000
def is_arxiv_identifier(self, query: str) -> bool:
"""Check if a query is an arxiv identifier."""
arxiv_identifier_pattern = r"\d{2}(0[1-9]|1[0-2])\.\d{4,5}(v\d+|)|\d{7}.*"
for query_item in query[: self.ARXIV_MAX_QUERY_LENGTH].split():
match_result = re.match(arxiv_identifier_pattern, query_item)
if not match_result:
return False
assert match_result is not None
if not match_result.group(0) == query_item:
return False
return True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import arxiv
values["arxiv_search"] = arxiv.Search
values["arxiv_exceptions"] = (
arxiv.ArxivError,
arxiv.UnexpectedEmptyPageError,
arxiv.HTTPError,
)
values["arxiv_result"] = arxiv.Result
except ImportError:
raise ImportError(
"Could not import arxiv python package. "
"Please install it with `pip install arxiv`."
)
return values
def run(self, query: str) -> str:
"""
Performs an arxiv search and A single string
with the publish date, title, authors, and summary
for each article separated by two newlines.
If an error occurs or no documents found, error text
is returned instead. Wrapper for
https://lukasschwab.me/arxiv.py/index.html#Search
Args:
query: a plaintext search query
""" # noqa: E501
try:
if self.is_arxiv_identifier(query):
results = self.arxiv_search(
id_list=query.split(),
max_results=self.top_k_results,
).results()
else:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results
).results()
except self.arxiv_exceptions as ex:
return f"Arxiv exception: {ex}"
docs = [
f"Published: {result.updated.date()}\n"
f"Title: {result.title}\n"
f"Authors: {', '.join(a.name for a in result.authors)}\n"
f"Summary: {result.summary}"
for result in results
]
if docs:
return "\n\n".join(docs)[: self.doc_content_chars_max]
else:
return "No good Arxiv Result was found"
def load(self, query: str) -> List[Document]:
"""
Run Arxiv search and get the article texts plus the article meta information.
See https://lukasschwab.me/arxiv.py/index.html#Search
Returns: a list of documents with the document.page_content in text format
Performs an arxiv search, downloads the top k results as PDFs, loads
them as Documents, and returns them in a List.
Args:
query: a plaintext search query
""" # noqa: E501
try:
import fitz
except ImportError:
raise ImportError(
"PyMuPDF package not found, please install it with "
"`pip install pymupdf`"
)
try:
# Remove the ":" and "-" from the query, as they can cause search problems
query = query.replace(":", "").replace("-", "")
if self.is_arxiv_identifier(query):
results = self.arxiv_search(
id_list=query[: self.ARXIV_MAX_QUERY_LENGTH].split(),
max_results=self.load_max_docs,
).results()
else:
results = self.arxiv_search( # type: ignore
query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs
).results()
except self.arxiv_exceptions as ex:
logger.debug("Error on arxiv: %s", ex)
return []
docs: List[Document] = []
for result in results:
try:
doc_file_name: str = result.download_pdf()
with fitz.open(doc_file_name) as doc_file:
text: str = "".join(page.get_text() for page in doc_file)
except (FileNotFoundError, fitz.fitz.FileDataError) as f_ex:
logger.debug(f_ex)
continue
if self.load_all_available_meta:
extra_metadata = {
"entry_id": result.entry_id,
"published_first_time": str(result.published.date()),
"comment": result.comment,
"journal_ref": result.journal_ref,
"doi": result.doi,
"primary_category": result.primary_category,
"categories": result.categories,
"links": [link.href for link in result.links],
}
else:
extra_metadata = {}
metadata = {
"Published": str(result.updated.date()),
"Title": result.title,
"Authors": ", ".join(a.name for a in result.authors),
"Summary": result.summary,
**extra_metadata,
}
doc = Document(
page_content=text[: self.doc_content_chars_max], metadata=metadata
)
docs.append(doc)
os.remove(doc_file_name)
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~utilities~test_jira_api.py | """Integration test for JIRA API Wrapper."""
from langchain.utilities.jira import JiraAPIWrapper
def test_search() -> None:
"""Test for Searching issues on JIRA"""
jql = "project = TP"
jira = JiraAPIWrapper()
output = jira.run("jql", jql)
assert "issues" in output
def test_getprojects() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
output = jira.run("get_projects", "")
assert "projects" in output
def test_create_ticket() -> None:
"""Test the Create Ticket Call that Creates a Issue/Ticket on JIRA."""
issue_string = (
'{"summary": "Test Summary", "description": "Test Description",'
' "issuetype": {"name": "Bug"}, "project": {"key": "TP"}}'
)
jira = JiraAPIWrapper()
output = jira.run("create_issue", issue_string)
assert "id" in output
assert "key" in output
def test_create_confluence_page() -> None:
"""Test for getting projects on JIRA"""
jira = JiraAPIWrapper()
create_page_dict = (
'{"space": "ROC", "title":"This is the title",'
'"body":"This is the body. You can use '
'<strong>HTML tags</strong>!"}'
)
output = jira.run("create_page", create_page_dict)
assert "type" in output
assert "page" in output
def test_other() -> None:
"""Non-exhaustive test for accessing other JIRA API methods"""
jira = JiraAPIWrapper()
issue_create_dict = """
{
"function":"issue_create",
"kwargs": {
"fields": {
"summary": "Test Summary",
"description": "Test Description",
"issuetype": {"name": "Bug"},
"project": {"key": "TP"}
}
}
}
"""
output = jira.run("other", issue_create_dict)
assert "id" in output
assert "key" in output
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~file_management~copy.py | import shutil
from typing import Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.base import BaseTool
from langchain.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileCopyInput(BaseModel):
"""Input for CopyFileTool."""
source_path: str = Field(..., description="Path of the file to copy")
destination_path: str = Field(..., description="Path to save the copied file")
class CopyFileTool(BaseFileToolMixin, BaseTool):
"""Tool that copies a file."""
name: str = "copy_file"
args_schema: Type[BaseModel] = FileCopyInput
description: str = "Create a copy of a file in a specified location"
def _run(
self,
source_path: str,
destination_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path", value=destination_path
)
try:
shutil.copy2(source_path_, destination_path_, follow_symlinks=False)
return f"File copied successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
| [
"Create a copy of a file in a specified location"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~xinference.py | from typing import TYPE_CHECKING, Any, Dict, Generator, List, Mapping, Optional, Union
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
if TYPE_CHECKING:
from xinference.client import RESTfulChatModelHandle, RESTfulGenerateModelHandle
from xinference.model.llm.core import LlamaCppGenerateConfig
class Xinference(LLM):
"""Wrapper for accessing Xinference's large-scale model inference service.
To use, you should have the xinference library installed:
.. code-block:: bash
pip install "xinference[all]"
Check out: https://github.com/xorbitsai/inference
To run, you need to start a Xinference supervisor on one server and Xinference workers on the other servers
Example:
To start a local instance of Xinference, run
.. code-block:: bash
$ xinference
You can also deploy Xinference in a distributed cluster. Here are the steps:
Starting the supervisor:
.. code-block:: bash
$ xinference-supervisor
Starting the worker:
.. code-block:: bash
$ xinference-worker
Then, launch a model using command line interface (CLI).
Example:
.. code-block:: bash
$ xinference launch -n orca -s 3 -q q4_0
It will return a model UID. Then, you can use Xinference with LangChain.
Example:
.. code-block:: python
from langchain.llms import Xinference
llm = Xinference(
server_url="http://0.0.0.0:9997",
model_uid = {model_uid} # replace model_uid with the model UID return from launching the model
)
llm(
prompt="Q: where can we visit in the capital of France? A:",
generate_config={"max_tokens": 1024, "stream": True},
)
To view all the supported builtin models, run:
.. code-block:: bash
$ xinference list --all
""" # noqa: E501
client: Any
server_url: Optional[str]
"""URL of the xinference server"""
model_uid: Optional[str]
"""UID of the launched model"""
model_kwargs: Dict[str, Any]
"""Keyword arguments to be passed to xinference.LLM"""
def __init__(
self,
server_url: Optional[str] = None,
model_uid: Optional[str] = None,
**model_kwargs: Any,
):
try:
from xinference.client import RESTfulClient
except ImportError as e:
raise ImportError(
"Could not import RESTfulClient from xinference. Please install it"
" with `pip install xinference`."
) from e
model_kwargs = model_kwargs or {}
super().__init__(
**{
"server_url": server_url,
"model_uid": model_uid,
"model_kwargs": model_kwargs,
}
)
if self.server_url is None:
raise ValueError("Please provide server URL")
if self.model_uid is None:
raise ValueError("Please provide the model UID")
self.client = RESTfulClient(server_url)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "xinference"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"server_url": self.server_url},
**{"model_uid": self.model_uid},
**{"model_kwargs": self.model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the xinference model and return the output.
Args:
prompt: The prompt to use for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Returns:
The generated string by the model.
"""
model = self.client.get_model(self.model_uid)
generate_config: "LlamaCppGenerateConfig" = kwargs.get("generate_config", {})
generate_config = {**self.model_kwargs, **generate_config}
if stop:
generate_config["stop"] = stop
if generate_config and generate_config.get("stream"):
combined_text_output = ""
for token in self._stream_generate(
model=model,
prompt=prompt,
run_manager=run_manager,
generate_config=generate_config,
):
combined_text_output += token
return combined_text_output
else:
completion = model.generate(prompt=prompt, generate_config=generate_config)
return completion["choices"][0]["text"]
def _stream_generate(
self,
model: Union["RESTfulGenerateModelHandle", "RESTfulChatModelHandle"],
prompt: str,
run_manager: Optional[CallbackManagerForLLMRun] = None,
generate_config: Optional["LlamaCppGenerateConfig"] = None,
) -> Generator[str, None, None]:
"""
Args:
prompt: The prompt to use for generation.
model: The model used for generation.
stop: Optional list of stop words to use when generating.
generate_config: Optional dictionary for the configuration used for
generation.
Yields:
A string token.
"""
streaming_response = model.generate(
prompt=prompt, generate_config=generate_config
)
for chunk in streaming_response:
if isinstance(chunk, dict):
choices = chunk.get("choices", [])
if choices:
choice = choices[0]
if isinstance(choice, dict):
token = choice.get("text", "")
log_probs = choice.get("logprobs")
if run_manager:
run_manager.on_llm_new_token(
token=token, verbose=self.verbose, log_probs=log_probs
)
yield token
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~mediawikidump.py | import logging
from pathlib import Path
from typing import List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class MWDumpLoader(BaseLoader):
"""Load `MediaWiki` dump from an `XML` file.
Example:
.. code-block:: python
from langchain.document_loaders import MWDumpLoader
loader = MWDumpLoader(
file_path="myWiki.xml",
encoding="utf8"
)
docs = loader.load()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, chunk_overlap=0
)
texts = text_splitter.split_documents(docs)
:param file_path: XML local file path
:type file_path: str
:param encoding: Charset encoding, defaults to "utf8"
:type encoding: str, optional
:param namespaces: The namespace of pages you want to parse.
See https://www.mediawiki.org/wiki/Help:Namespaces#Localisation
for a list of all common namespaces
:type namespaces: List[int],optional
:param skip_redirects: TR=rue to skip pages that redirect to other pages,
False to keep them. False by default
:type skip_redirects: bool, optional
:param stop_on_error: False to skip over pages that cause parsing errors,
True to stop. True by default
:type stop_on_error: bool, optional
"""
def __init__(
self,
file_path: Union[str, Path],
encoding: Optional[str] = "utf8",
namespaces: Optional[Sequence[int]] = None,
skip_redirects: Optional[bool] = False,
stop_on_error: Optional[bool] = True,
):
self.file_path = file_path if isinstance(file_path, str) else str(file_path)
self.encoding = encoding
# Namespaces range from -2 to 15, inclusive.
self.namespaces = namespaces or list(range(-2, 16))
self.skip_redirects = skip_redirects
self.stop_on_error = stop_on_error
def load(self) -> List[Document]:
"""Load from a file path."""
try:
import mwparserfromhell
import mwxml
except ImportError as e:
raise ImportError(
"Unable to import 'mwparserfromhell' or 'mwxml'. Please install with"
" `pip install mwparserfromhell mwxml`."
) from e
dump = mwxml.Dump.from_file(open(self.file_path, encoding=self.encoding))
docs = []
for page in dump.pages:
if self.skip_redirects and page.redirect:
continue
if page.namespace not in self.namespaces:
continue
try:
for revision in page:
code = mwparserfromhell.parse(revision.text)
text = code.strip_code(
normalize=True, collapse=True, keep_template_params=False
)
metadata = {"source": page.title}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
logger.error("Parsing error: {}".format(e))
if self.stop_on_error:
raise e
else:
continue
return docs
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~odt.py | from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredODTLoader(UnstructuredFileLoader):
"""Load `OpenOffice ODT` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredODTLoader
loader = UnstructuredODTLoader(
"example.odt", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-odt
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Args:
file_path: The path to the file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.3")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.odt import partition_odt
return partition_odt(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~formatting.py | """DEPRECATED: Kept for backwards compatibility."""
from langchain.utils.formatting import StrictFormatter, formatter
__all__ = ["StrictFormatter", "formatter"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~jina.py | import os
from typing import Any, Dict, List, Optional
import requests
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
class JinaEmbeddings(BaseModel, Embeddings):
"""Jina embedding models."""
client: Any #: :meta private:
model_name: str = "ViT-B-32::openai"
"""Model name to use."""
jina_auth_token: Optional[str] = None
jina_api_url: str = "https://api.clip.jina.ai/api/v1/models/"
request_headers: Optional[dict] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that auth token exists in environment."""
# Set Auth
jina_auth_token = get_from_dict_or_env(
values, "jina_auth_token", "JINA_AUTH_TOKEN"
)
values["jina_auth_token"] = jina_auth_token
values["request_headers"] = (("authorization", jina_auth_token),)
# Test that package is installed
try:
import jina
except ImportError:
raise ImportError(
"Could not import `jina` python package. "
"Please install it with `pip install jina`."
)
# Setup client
jina_api_url = os.environ.get("JINA_API_URL", values["jina_api_url"])
model_name = values["model_name"]
try:
resp = requests.get(
jina_api_url + f"?model_name={model_name}",
headers={"Authorization": jina_auth_token},
)
if resp.status_code == 401:
raise ValueError(
"The given Jina auth token is invalid. "
"Please check your Jina auth token."
)
elif resp.status_code == 404:
raise ValueError(
f"The given model name `{model_name}` is not valid. "
f"Please go to https://cloud.jina.ai/user/inference "
f"and create a model with the given model name."
)
resp.raise_for_status()
endpoint = resp.json()["endpoints"]["grpc"]
values["client"] = jina.Client(host=endpoint)
except requests.exceptions.HTTPError as err:
raise ValueError(f"Error: {err!r}")
return values
def _post(self, docs: List[Any], **kwargs: Any) -> Any:
payload = dict(inputs=docs, metadata=self.request_headers, **kwargs)
return self.client.post(on="/encode", **payload)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Jina's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
from docarray import Document, DocumentArray
embeddings = self._post(
docs=DocumentArray([Document(text=t) for t in texts])
).embeddings
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Call out to Jina's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
from docarray import Document, DocumentArray
embedding = self._post(docs=DocumentArray([Document(text=text)])).embeddings[0]
return list(map(float, embedding))
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~unit_tests~retrievers~test_you.py | import json
import os
from unittest import mock
from requests import Response
from langchain.retrievers.you import YouRetriever
from langchain.schema import Document
class TestYouRetriever:
def test_get_relevant_documents(self) -> None:
os.environ["YDC_API_KEY"] = "MOCK KEY!"
retriever = YouRetriever()
with mock.patch("requests.get") as mock_get:
fixture = {"hits": [{"snippets": ["yo"]}, {"snippets": ["bird up"]}]}
response = Response()
response._content = bytes(json.dumps(fixture).encode("utf-8"))
mock_get.return_value = response
actual = retriever.get_relevant_documents("test")
assert actual == [
Document(page_content="yo"),
Document(page_content="bird up"),
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~whylabs_callback.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Optional
from langchain.callbacks.base import BaseCallbackHandler
from langchain.utils import get_from_env
if TYPE_CHECKING:
from whylogs.api.logger.logger import Logger
diagnostic_logger = logging.getLogger(__name__)
def import_langkit(
sentiment: bool = False,
toxicity: bool = False,
themes: bool = False,
) -> Any:
"""Import the langkit python package and raise an error if it is not installed.
Args:
sentiment: Whether to import the langkit.sentiment module. Defaults to False.
toxicity: Whether to import the langkit.toxicity module. Defaults to False.
themes: Whether to import the langkit.themes module. Defaults to False.
Returns:
The imported langkit module.
"""
try:
import langkit # noqa: F401
import langkit.regexes # noqa: F401
import langkit.textstat # noqa: F401
if sentiment:
import langkit.sentiment # noqa: F401
if toxicity:
import langkit.toxicity # noqa: F401
if themes:
import langkit.themes # noqa: F401
except ImportError:
raise ImportError(
"To use the whylabs callback manager you need to have the `langkit` python "
"package installed. Please install it with `pip install langkit`."
)
return langkit
class WhyLabsCallbackHandler(BaseCallbackHandler):
"""
Callback Handler for logging to WhyLabs. This callback handler utilizes
`langkit` to extract features from the prompts & responses when interacting with
an LLM. These features can be used to guardrail, evaluate, and observe interactions
over time to detect issues relating to hallucinations, prompt engineering,
or output validation. LangKit is an LLM monitoring toolkit developed by WhyLabs.
Here are some examples of what can be monitored with LangKit:
* Text Quality
- readability score
- complexity and grade scores
* Text Relevance
- Similarity scores between prompt/responses
- Similarity scores against user-defined themes
- Topic classification
* Security and Privacy
- patterns - count of strings matching a user-defined regex pattern group
- jailbreaks - similarity scores with respect to known jailbreak attempts
- prompt injection - similarity scores with respect to known prompt attacks
- refusals - similarity scores with respect to known LLM refusal responses
* Sentiment and Toxicity
- sentiment analysis
- toxicity analysis
For more information, see https://docs.whylabs.ai/docs/language-model-monitoring
or check out the LangKit repo here: https://github.com/whylabs/langkit
---
Args:
api_key (Optional[str]): WhyLabs API key. Optional because the preferred
way to specify the API key is with environment variable
WHYLABS_API_KEY.
org_id (Optional[str]): WhyLabs organization id to write profiles to.
Optional because the preferred way to specify the organization id is
with environment variable WHYLABS_DEFAULT_ORG_ID.
dataset_id (Optional[str]): WhyLabs dataset id to write profiles to.
Optional because the preferred way to specify the dataset id is
with environment variable WHYLABS_DEFAULT_DATASET_ID.
sentiment (bool): Whether to enable sentiment analysis. Defaults to False.
toxicity (bool): Whether to enable toxicity analysis. Defaults to False.
themes (bool): Whether to enable theme analysis. Defaults to False.
"""
def __init__(self, logger: Logger, handler: Any):
"""Initiate the rolling logger."""
super().__init__()
if hasattr(handler, "init"):
handler.init(self)
if hasattr(handler, "_get_callbacks"):
self._callbacks = handler._get_callbacks()
else:
self._callbacks = dict()
diagnostic_logger.warning("initialized handler without callbacks.")
self._logger = logger
def flush(self) -> None:
"""Explicitly write current profile if using a rolling logger."""
if self._logger and hasattr(self._logger, "_do_rollover"):
self._logger._do_rollover()
diagnostic_logger.info("Flushing WhyLabs logger, writing profile...")
def close(self) -> None:
"""Close any loggers to allow writing out of any profiles before exiting."""
if self._logger and hasattr(self._logger, "close"):
self._logger.close()
diagnostic_logger.info("Closing WhyLabs logger, see you next time!")
def __enter__(self) -> WhyLabsCallbackHandler:
return self
def __exit__(
self, exception_type: Any, exception_value: Any, traceback: Any
) -> None:
self.close()
@classmethod
def from_params(
cls,
*,
api_key: Optional[str] = None,
org_id: Optional[str] = None,
dataset_id: Optional[str] = None,
sentiment: bool = False,
toxicity: bool = False,
themes: bool = False,
logger: Optional[Logger] = None,
) -> WhyLabsCallbackHandler:
"""Instantiate whylogs Logger from params.
Args:
api_key (Optional[str]): WhyLabs API key. Optional because the preferred
way to specify the API key is with environment variable
WHYLABS_API_KEY.
org_id (Optional[str]): WhyLabs organization id to write profiles to.
If not set must be specified in environment variable
WHYLABS_DEFAULT_ORG_ID.
dataset_id (Optional[str]): The model or dataset this callback is gathering
telemetry for. If not set must be specified in environment variable
WHYLABS_DEFAULT_DATASET_ID.
sentiment (bool): If True will initialize a model to perform
sentiment analysis compound score. Defaults to False and will not gather
this metric.
toxicity (bool): If True will initialize a model to score
toxicity. Defaults to False and will not gather this metric.
themes (bool): If True will initialize a model to calculate
distance to configured themes. Defaults to None and will not gather this
metric.
logger (Optional[Logger]): If specified will bind the configured logger as
the telemetry gathering agent. Defaults to LangKit schema with periodic
WhyLabs writer.
"""
# langkit library will import necessary whylogs libraries
import_langkit(sentiment=sentiment, toxicity=toxicity, themes=themes)
import whylogs as why
from langkit.callback_handler import get_callback_instance
from whylogs.api.writer.whylabs import WhyLabsWriter
from whylogs.experimental.core.udf_schema import udf_schema
if logger is None:
api_key = api_key or get_from_env("api_key", "WHYLABS_API_KEY")
org_id = org_id or get_from_env("org_id", "WHYLABS_DEFAULT_ORG_ID")
dataset_id = dataset_id or get_from_env(
"dataset_id", "WHYLABS_DEFAULT_DATASET_ID"
)
whylabs_writer = WhyLabsWriter(
api_key=api_key, org_id=org_id, dataset_id=dataset_id
)
whylabs_logger = why.logger(
mode="rolling", interval=5, when="M", schema=udf_schema()
)
whylabs_logger.append_writer(writer=whylabs_writer)
else:
diagnostic_logger.info("Using passed in whylogs logger {logger}")
whylabs_logger = logger
callback_handler_cls = get_callback_instance(logger=whylabs_logger, impl=cls)
diagnostic_logger.info(
"Started whylogs Logger with WhyLabsWriter and initialized LangKit. 📝"
)
return callback_handler_cls
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~render.py | """Different methods for rendering Tools to be passed to LLMs.
Depending on the LLM you are using and the prompting strategy you are using,
you may want Tools to be rendered in a different way.
This module contains various ways to render tools.
"""
from typing import List
from langchain.tools.base import BaseTool
from langchain.utils.openai_functions import (
FunctionDescription,
convert_pydantic_to_openai_function,
)
def render_text_description(tools: List[BaseTool]) -> str:
"""Render the tool name and description in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search
calculator: This tool is used for math
"""
return "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
def render_text_description_and_args(tools: List[BaseTool]) -> str:
"""Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
args_schema = str(tool.args)
tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}")
return "\n".join(tool_strings)
def format_tool_to_openai_function(tool: BaseTool) -> FunctionDescription:
"""Format tool into the OpenAI function API."""
if tool.args_schema:
return convert_pydantic_to_openai_function(
tool.args_schema, name=tool.name, description=tool.description
)
else:
return {
"name": tool.name,
"description": tool.description,
"parameters": {
# This is a hack to get around the fact that some tools
# do not expose an args_schema, and expect an argument
# which is a string.
# And Open AI does not support an array type for the
# parameters.
"properties": {
"__arg1": {"title": "__arg1", "type": "string"},
},
"required": ["__arg1"],
"type": "object",
},
}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~__init__.py | """
**Agent** is a class that uses an LLM to choose a sequence of actions to take.
In Chains, a sequence of actions is hardcoded. In Agents,
a language model is used as a reasoning engine to determine which actions
to take and in which order.
Agents select and use **Tools** and **Toolkits** for actions.
**Class hierarchy:**
.. code-block::
BaseSingleActionAgent --> LLMSingleActionAgent
OpenAIFunctionsAgent
XMLAgent
Agent --> <name>Agent # Examples: ZeroShotAgent, ChatAgent
BaseMultiActionAgent --> OpenAIMultiFunctionsAgent
**Main helpers:**
.. code-block::
AgentType, AgentExecutor, AgentOutputParser, AgentExecutorIterator,
AgentAction, AgentFinish
""" # noqa: E501
from langchain.agents.agent import (
Agent,
AgentExecutor,
AgentOutputParser,
BaseMultiActionAgent,
BaseSingleActionAgent,
LLMSingleActionAgent,
)
from langchain.agents.agent_iterator import AgentExecutorIterator
from langchain.agents.agent_toolkits import (
create_csv_agent,
create_json_agent,
create_openapi_agent,
create_pandas_dataframe_agent,
create_pbi_agent,
create_pbi_chat_agent,
create_spark_dataframe_agent,
create_spark_sql_agent,
create_sql_agent,
create_vectorstore_agent,
create_vectorstore_router_agent,
create_xorbits_agent,
)
from langchain.agents.agent_types import AgentType
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents.conversational_chat.base import ConversationalChatAgent
from langchain.agents.conversational_context_chat.base import (
ConversationalChatContextAgent,
)
from langchain.agents.initialize import initialize_agent
from langchain.agents.load_tools import (
get_all_tool_names,
load_huggingface_tool,
load_tools,
)
from langchain.agents.loading import load_agent
from langchain.agents.mrkl.base import MRKLChain, ZeroShotAgent
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFunctionsAgent
from langchain.agents.react.base import ReActChain, ReActTextWorldAgent
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
from langchain.agents.structured_chat.base import StructuredChatAgent
from langchain.agents.tools import Tool, tool
from langchain.agents.xml.base import XMLAgent
__all__ = [
"Agent",
"AgentExecutor",
"AgentExecutorIterator",
"AgentOutputParser",
"AgentType",
"BaseMultiActionAgent",
"BaseSingleActionAgent",
"ConversationalAgent",
"ConversationalChatAgent",
"ConversationalChatContextAgent",
"LLMSingleActionAgent",
"MRKLChain",
"OpenAIFunctionsAgent",
"OpenAIMultiFunctionsAgent",
"ReActChain",
"ReActTextWorldAgent",
"SelfAskWithSearchChain",
"StructuredChatAgent",
"Tool",
"ZeroShotAgent",
"create_csv_agent",
"create_json_agent",
"create_openapi_agent",
"create_pandas_dataframe_agent",
"create_pbi_agent",
"create_pbi_chat_agent",
"create_spark_dataframe_agent",
"create_spark_sql_agent",
"create_sql_agent",
"create_vectorstore_agent",
"create_vectorstore_router_agent",
"get_all_tool_names",
"initialize_agent",
"load_agent",
"load_huggingface_tool",
"load_tools",
"tool",
"create_xorbits_agent",
"XMLAgent",
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~tsv.py | from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredTSVLoader(UnstructuredFileLoader):
"""Load `TSV` files using `Unstructured`.
Like other
Unstructured loaders, UnstructuredTSVLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, the TSV file will be a single Unstructured Table element.
If you use the loader in "elements" mode, an HTML representation
of the table will be available in the "text_as_html" key in the
document metadata.
Examples
--------
from langchain.document_loaders.tsv import UnstructuredTSVLoader
loader = UnstructuredTSVLoader("stanley-cups.tsv", mode="elements")
docs = loader.load()
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.7.6")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.tsv import partition_tsv
return partition_tsv(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~human.py | from typing import Any, Callable, Dict, Optional
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
def _default_approve(_input: str) -> bool:
msg = (
"Do you approve of the following input? "
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no."
)
msg += "\n\n" + _input + "\n"
resp = input(msg)
return resp.lower() in ("yes", "y")
def _default_true(_: Dict[str, Any]) -> bool:
return True
class HumanRejectedException(Exception):
"""Exception to raise when a person manually review and rejects a value."""
class HumanApprovalCallbackHandler(BaseCallbackHandler):
"""Callback for manually validating values."""
raise_error: bool = True
def __init__(
self,
approve: Callable[[Any], bool] = _default_approve,
should_check: Callable[[Dict[str, Any]], bool] = _default_true,
):
self._approve = approve
self._should_check = should_check
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if self._should_check(serialized) and not self._approve(input_str):
raise HumanRejectedException(
f"Inputs {input_str} to tool {serialized} were rejected."
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~deepinfra.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
DEFAULT_MODEL_ID = "sentence-transformers/clip-ViT-B-32"
class DeepInfraEmbeddings(BaseModel, Embeddings):
"""Deep Infra's embedding inference service.
To use, you should have the
environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass
it as a named parameter to the constructor.
There are multiple embeddings models available,
see https://deepinfra.com/models?type=embeddings.
Example:
.. code-block:: python
from langchain.embeddings import DeepInfraEmbeddings
deepinfra_emb = DeepInfraEmbeddings(
model_id="sentence-transformers/clip-ViT-B-32",
deepinfra_api_token="my-api-key"
)
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = deepinfra_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
model_id: str = DEFAULT_MODEL_ID
"""Embeddings model to use."""
normalize: bool = False
"""whether to normalize the computed embeddings"""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
model_kwargs: Optional[dict] = None
"""Other model keyword args"""
deepinfra_api_token: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
deepinfra_api_token = get_from_dict_or_env(
values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN"
)
values["deepinfra_api_token"] = deepinfra_api_token
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"model_id": self.model_id}
def _embed(self, input: List[str]) -> List[List[float]]:
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {
"Authorization": f"bearer {self.deepinfra_api_token}",
"Content-Type": "application/json",
}
# send request
try:
res = requests.post(
f"https://api.deepinfra.com/v1/inference/{self.model_id}",
headers=headers,
json={"inputs": input, "normalize": self.normalize, **_model_kwargs},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
embeddings = t["embeddings"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a Deep Infra deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f"{self.query_instruction}{text}" for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Deep Infra deployed embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = f"{self.query_instruction}{text}"
embedding = self._embed([instruction_pair])[0]
return embedding
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~konko.py | """KonkoAI chat wrapper."""
from __future__ import annotations
import logging
import os
from typing import (
Any,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
)
import requests
from langchain.adapters.openai import convert_dict_to_message, convert_message_to_dict
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import _generate_from_stream
from langchain.chat_models.openai import ChatOpenAI, _convert_delta_to_message_chunk
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import AIMessageChunk, BaseMessage
from langchain.schema.output import ChatGenerationChunk
from langchain.utils import get_from_dict_or_env
DEFAULT_API_BASE = "https://api.konko.ai/v1"
DEFAULT_MODEL = "meta-llama/Llama-2-13b-chat-hf"
logger = logging.getLogger(__name__)
class ChatKonko(ChatOpenAI):
"""`ChatKonko` Chat large language models API.
To use, you should have the ``konko`` python package installed, and the
environment variable ``KONKO_API_KEY`` and ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the konko.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatKonko
llm = ChatKonko(model="meta-llama/Llama-2-13b-chat-hf")
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"konko_api_key": "KONKO_API_KEY", "openai_api_key": "OPENAI_API_KEY"}
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
client: Any = None #: :meta private:
model: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
openai_api_key: Optional[str] = None
konko_api_key: Optional[str] = None
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout for requests to Konko completion API."""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int = 20
"""Maximum number of tokens to generate."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["konko_api_key"] = get_from_dict_or_env(
values, "konko_api_key", "KONKO_API_KEY"
)
try:
import konko
except ImportError:
raise ValueError(
"Could not import konko python package. "
"Please install it with `pip install konko`."
)
try:
values["client"] = konko.ChatCompletion
except AttributeError:
raise ValueError(
"`konko` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the konko package. Try upgrading it "
"with `pip install --upgrade konko`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Konko API."""
return {
"model": self.model,
"request_timeout": self.request_timeout,
"max_tokens": self.max_tokens,
"stream": self.streaming,
"n": self.n,
"temperature": self.temperature,
**self.model_kwargs,
}
@staticmethod
def get_available_models(
konko_api_key: Optional[str] = None,
openai_api_key: Optional[str] = None,
konko_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Konko API."""
# Try to retrieve the OpenAI API key if it's not passed as an argument
if not openai_api_key:
try:
openai_api_key = os.environ["OPENAI_API_KEY"]
except KeyError:
pass # It's okay if it's not set, we just won't use it
# Try to retrieve the Konko API key if it's not passed as an argument
if not konko_api_key:
try:
konko_api_key = os.environ["KONKO_API_KEY"]
except KeyError:
raise ValueError(
"Konko API key must be passed as keyword argument or "
"set in environment variable KONKO_API_KEY."
)
models_url = f"{konko_api_base}/models"
headers = {
"Authorization": f"Bearer {konko_api_key}",
}
if openai_api_key:
headers["X-OpenAI-Api-Key"] = openai_api_key
models_response = requests.get(models_url, headers=headers)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}"
)
return {model["id"] for model in models_response.json()["data"]}
def completion_with_retry(
self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any
) -> Any:
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
return {"token_usage": overall_token_usage, "model_name": self.model}
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class = AIMessageChunk
for chunk in self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
):
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
finish_reason = choice.get("finish_reason")
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=chunk)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs}
response = self.completion_with_retry(
messages=message_dicts, run_manager=run_manager, **params
)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: List[BaseMessage], stop: Optional[List[str]]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
generations = []
for res in response["choices"]:
message = convert_dict_to_message(res["message"])
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.get("finish_reason")),
)
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model}, **self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the konko client."""
return {**self._default_params}
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {
"model": self.model,
**super()._get_invocation_params(stop=stop),
**self._default_params,
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "konko-chat"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~momento_vector_index.py | from typing import (
TYPE_CHECKING,
Any,
Iterable,
List,
Optional,
Tuple,
Type,
TypeVar,
cast,
)
from uuid import uuid4
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_env
from langchain.vectorstores.utils import DistanceStrategy
VST = TypeVar("VST", bound="VectorStore")
if TYPE_CHECKING:
from momento import PreviewVectorIndexClient
class MomentoVectorIndex(VectorStore):
"""`Momento Vector Index` (MVI) vector store.
Momento Vector Index is a serverless vector index that can be used to store and
search vectors. To use you should have the ``momento`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import MomentoVectorIndex
from momento import (
CredentialProvider,
PreviewVectorIndexClient,
VectorIndexConfigurations,
)
vectorstore = MomentoVectorIndex(
embedding=OpenAIEmbeddings(),
client=PreviewVectorIndexClient(
VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_environment_variable(
"MOMENTO_API_KEY"
),
),
index_name="my-index",
)
"""
def __init__(
self,
embedding: Embeddings,
client: "PreviewVectorIndexClient",
index_name: str = "default",
distance_strategy: DistanceStrategy = DistanceStrategy.COSINE,
text_field: str = "text",
ensure_index_exists: bool = True,
**kwargs: Any,
):
"""Initialize a Vector Store backed by Momento Vector Index.
Args:
embedding (Embeddings): The embedding function to use.
configuration (VectorIndexConfiguration): The configuration to initialize
the Vector Index with.
credential_provider (CredentialProvider): The credential provider to
authenticate the Vector Index with.
index_name (str, optional): The name of the index to store the documents in.
Defaults to "default".
distance_strategy (DistanceStrategy, optional): The distance strategy to
use. Defaults to DistanceStrategy.COSINE. If you select
DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses the squared
Euclidean distance.
text_field (str, optional): The name of the metadata field to store the
original text in. Defaults to "text".
ensure_index_exists (bool, optional): Whether to ensure that the index
exists before adding documents to it. Defaults to True.
"""
try:
from momento import PreviewVectorIndexClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
self._client: PreviewVectorIndexClient = client
self._embedding = embedding
self.index_name = index_name
self.__validate_distance_strategy(distance_strategy)
self.distance_strategy = distance_strategy
self.text_field = text_field
self._ensure_index_exists = ensure_index_exists
@staticmethod
def __validate_distance_strategy(distance_strategy: DistanceStrategy) -> None:
if distance_strategy not in [
DistanceStrategy.COSINE,
DistanceStrategy.MAX_INNER_PRODUCT,
DistanceStrategy.MAX_INNER_PRODUCT,
]:
raise ValueError(f"Distance strategy {distance_strategy} not implemented.")
@property
def embeddings(self) -> Embeddings:
return self._embedding
def _create_index_if_not_exists(self, num_dimensions: int) -> bool:
"""Create index if it does not exist."""
from momento.requests.vector_index import SimilarityMetric
from momento.responses.vector_index import CreateIndex
similarity_metric = None
if self.distance_strategy == DistanceStrategy.COSINE:
similarity_metric = SimilarityMetric.COSINE_SIMILARITY
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
similarity_metric = SimilarityMetric.INNER_PRODUCT
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
similarity_metric = SimilarityMetric.EUCLIDEAN_SIMILARITY
else:
raise ValueError(
f"Distance strategy {self.distance_strategy} not implemented."
)
response = self._client.create_index(
self.index_name, num_dimensions, similarity_metric
)
if isinstance(response, CreateIndex.Success):
return True
elif isinstance(response, CreateIndex.IndexAlreadyExists):
return False
elif isinstance(response, CreateIndex.Error):
raise response.inner_exception
else:
raise Exception(f"Unexpected response: {response}")
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Iterable of strings to add to the vectorstore.
metadatas (Optional[List[dict]]): Optional list of metadatas associated with
the texts.
kwargs (Any): Other optional parameters. Specifically:
- ids (List[str], optional): List of ids to use for the texts.
Defaults to None, in which case uuids are generated.
Returns:
List[str]: List of ids from adding the texts into the vectorstore.
"""
from momento.requests.vector_index import Item
from momento.responses.vector_index import UpsertItemBatch
texts = list(texts)
if len(texts) == 0:
return []
if metadatas is not None:
for metadata, text in zip(metadatas, texts):
metadata[self.text_field] = text
else:
metadatas = [{self.text_field: text} for text in texts]
try:
embeddings = self._embedding.embed_documents(texts)
except NotImplementedError:
embeddings = [self._embedding.embed_query(x) for x in texts]
# Create index if it does not exist.
# We assume that if it does exist, then it was created with the desired number
# of dimensions and similarity metric.
if self._ensure_index_exists:
self._create_index_if_not_exists(len(embeddings[0]))
if "ids" in kwargs:
ids = kwargs["ids"]
if len(ids) != len(embeddings):
raise ValueError("Number of ids must match number of texts")
else:
ids = [str(uuid4()) for _ in range(len(embeddings))]
batch_size = 128
for i in range(0, len(embeddings), batch_size):
start = i
end = min(i + batch_size, len(embeddings))
items = [
Item(id=id, vector=vector, metadata=metadata)
for id, vector, metadata in zip(
ids[start:end],
embeddings[start:end],
metadatas[start:end],
)
]
response = self._client.upsert_item_batch(self.index_name, items)
if isinstance(response, UpsertItemBatch.Success):
pass
elif isinstance(response, UpsertItemBatch.Error):
raise response.inner_exception
else:
raise Exception(f"Unexpected response: {response}")
return ids
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID.
Args:
ids (List[str]): List of ids to delete.
kwargs (Any): Other optional parameters (unused)
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
from momento.responses.vector_index import DeleteItemBatch
if ids is None:
return True
response = self._client.delete_item_batch(self.index_name, ids)
return isinstance(response, DeleteItemBatch.Success)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
res = self.similarity_search_with_score(query=query, k=k, **kwargs)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Search for similar documents to the query string.
Args:
query (str): The query string to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
embedding = self._embedding.embed_query(query)
results = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return results
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
kwargs (Any): Vector Store specific search parameters. The following are
forwarded to the Momento Vector Index:
- top_k (int, optional): The number of results to return.
Returns:
List[Tuple[Document, float]]: A list of tuples of the form
(Document, score).
"""
from momento.requests.vector_index import ALL_METADATA
from momento.responses.vector_index import Search
if "top_k" in kwargs:
k = kwargs["k"]
response = self._client.search(
self.index_name, embedding, top_k=k, metadata_fields=ALL_METADATA
)
if not isinstance(response, Search.Success):
return []
results = []
for hit in response.hits:
text = cast(str, hit.metadata.pop(self.text_field))
doc = Document(page_content=text, metadata=hit.metadata)
pair = (doc, hit.distance)
results.append(pair)
return results
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
"""Search for similar documents to the query vector.
Args:
embedding (List[float]): The query vector to search for.
k (int, optional): The number of results to return. Defaults to 4.
Returns:
List[Document]: A list of documents that are similar to the query.
"""
results = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return [doc for doc, _ in results]
@classmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VST:
"""Return the Vector Store initialized from texts and embeddings.
Args:
cls (Type[VST]): The Vector Store class to use to initialize
the Vector Store.
texts (List[str]): The texts to initialize the Vector Store with.
embedding (Embeddings): The embedding function to use.
metadatas (Optional[List[dict]], optional): The metadata associated with
the texts. Defaults to None.
kwargs (Any): Vector Store specific parameters. The following are forwarded
to the Vector Store constructor and required:
- index_name (str, optional): The name of the index to store the documents
in. Defaults to "default".
- text_field (str, optional): The name of the metadata field to store the
original text in. Defaults to "text".
- distance_strategy (DistanceStrategy, optional): The distance strategy to
use. Defaults to DistanceStrategy.COSINE. If you select
DistanceStrategy.EUCLIDEAN_DISTANCE, Momento uses the squared
Euclidean distance.
- ensure_index_exists (bool, optional): Whether to ensure that the index
exists before adding documents to it. Defaults to True.
Additionally you can either pass in a client or an API key
- client (PreviewVectorIndexClient): The Momento Vector Index client to use.
- api_key (Optional[str]): The configuration to use to initialize
the Vector Index with. Defaults to None. If None, the configuration
is initialized from the environment variable `MOMENTO_API_KEY`.
Returns:
VST: Momento Vector Index vector store initialized from texts and
embeddings.
"""
from momento import (
CredentialProvider,
PreviewVectorIndexClient,
VectorIndexConfigurations,
)
if "client" in kwargs:
client = kwargs.pop("client")
else:
supplied_api_key = kwargs.pop("api_key", None)
api_key = supplied_api_key or get_from_env("api_key", "MOMENTO_API_KEY")
client = PreviewVectorIndexClient(
configuration=VectorIndexConfigurations.Default.latest(),
credential_provider=CredentialProvider.from_string(api_key),
)
vector_db = cls(embedding=embedding, client=client, **kwargs) # type: ignore
vector_db.add_texts(texts=texts, metadatas=metadatas, **kwargs)
return vector_db
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~vectorstores~clickhouse.py | from __future__ import annotations
import json
import logging
from hashlib import sha1
from threading import Thread
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from langchain.docstore.document import Document
from langchain.pydantic_v1 import BaseSettings
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
logger = logging.getLogger()
def has_mul_sub_str(s: str, *args: Any) -> bool:
"""
Check if a string contains multiple substrings.
Args:
s: string to check.
*args: substrings to check.
Returns:
True if all substrings are in the string, False otherwise.
"""
for a in args:
if a not in s:
return False
return True
class ClickhouseSettings(BaseSettings):
"""`ClickHouse` client configuration.
Attribute:
host (str) : An URL to connect to MyScale backend.
Defaults to 'localhost'.
port (int) : URL port to connect with HTTP. Defaults to 8443.
username (str) : Username to login. Defaults to None.
password (str) : Password to login. Defaults to None.
index_type (str): index type string.
index_param (list): index build parameter.
index_query_params(dict): index query parameters.
database (str) : Database name to find the table. Defaults to 'default'.
table (str) : Table name to operate on.
Defaults to 'vector_table'.
metric (str) : Metric to compute distance,
supported are ('angular', 'euclidean', 'manhattan', 'hamming',
'dot'). Defaults to 'angular'.
https://github.com/spotify/annoy/blob/main/src/annoymodule.cc#L149-L169
column_map (Dict) : Column type map to project column name onto langchain
semantics. Must have keys: `text`, `id`, `vector`,
must be same size to number of columns. For example:
.. code-block:: python
{
'id': 'text_id',
'uuid': 'global_unique_id'
'embedding': 'text_embedding',
'document': 'text_plain',
'metadata': 'metadata_dictionary_in_json',
}
Defaults to identity map.
"""
host: str = "localhost"
port: int = 8123
username: Optional[str] = None
password: Optional[str] = None
index_type: str = "annoy"
# Annoy supports L2Distance and cosineDistance.
index_param: Optional[Union[List, Dict]] = ["'L2Distance'", 100]
index_query_params: Dict[str, str] = {}
column_map: Dict[str, str] = {
"id": "id",
"uuid": "uuid",
"document": "document",
"embedding": "embedding",
"metadata": "metadata",
}
database: str = "default"
table: str = "langchain"
metric: str = "angular"
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Config:
env_file = ".env"
env_prefix = "clickhouse_"
env_file_encoding = "utf-8"
class Clickhouse(VectorStore):
"""`ClickHouse VectorSearch` vector store.
You need a `clickhouse-connect` python package, and a valid account
to connect to ClickHouse.
ClickHouse can not only search with simple vector indexes,
it also supports complex query with multiple conditions,
constraints and even sub-queries.
For more information, please visit
[ClickHouse official site](https://clickhouse.com/clickhouse)
"""
def __init__(
self,
embedding: Embeddings,
config: Optional[ClickhouseSettings] = None,
**kwargs: Any,
) -> None:
"""ClickHouse Wrapper to LangChain
embedding_function (Embeddings):
config (ClickHouseSettings): Configuration to ClickHouse Client
Other keyword arguments will pass into
[clickhouse-connect](https://docs.clickhouse.com/)
"""
try:
from clickhouse_connect import get_client
except ImportError:
raise ImportError(
"Could not import clickhouse connect python package. "
"Please install it with `pip install clickhouse-connect`."
)
try:
from tqdm import tqdm
self.pgbar = tqdm
except ImportError:
# Just in case if tqdm is not installed
self.pgbar = lambda x, **kwargs: x
super().__init__()
if config is not None:
self.config = config
else:
self.config = ClickhouseSettings()
assert self.config
assert self.config.host and self.config.port
assert (
self.config.column_map
and self.config.database
and self.config.table
and self.config.metric
)
for k in ["id", "embedding", "document", "metadata", "uuid"]:
assert k in self.config.column_map
assert self.config.metric in [
"angular",
"euclidean",
"manhattan",
"hamming",
"dot",
]
# initialize the schema
dim = len(embedding.embed_query("test"))
index_params = (
(
",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()])
if self.config.index_param
else ""
)
if isinstance(self.config.index_param, Dict)
else ",".join([str(p) for p in self.config.index_param])
if isinstance(self.config.index_param, List)
else self.config.index_param
)
self.schema = f"""\
CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}(
{self.config.column_map['id']} Nullable(String),
{self.config.column_map['document']} Nullable(String),
{self.config.column_map['embedding']} Array(Float32),
{self.config.column_map['metadata']} JSON,
{self.config.column_map['uuid']} UUID DEFAULT generateUUIDv4(),
CONSTRAINT cons_vec_len CHECK length({self.config.column_map['embedding']}) = {dim},
INDEX vec_idx {self.config.column_map['embedding']} TYPE \
{self.config.index_type}({index_params}) GRANULARITY 1000
) ENGINE = MergeTree ORDER BY uuid SETTINGS index_granularity = 8192\
"""
self.dim = dim
self.BS = "\\"
self.must_escape = ("\\", "'")
self.embedding_function = embedding
self.dist_order = "ASC" # Only support ConsingDistance and L2Distance
# Create a connection to clickhouse
self.client = get_client(
host=self.config.host,
port=self.config.port,
username=self.config.username,
password=self.config.password,
**kwargs,
)
# Enable JSON type
self.client.command("SET allow_experimental_object_type=1")
# Enable Annoy index
self.client.command("SET allow_experimental_annoy_index=1")
self.client.command(self.schema)
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def escape_str(self, value: str) -> str:
return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value)
def _build_insert_sql(self, transac: Iterable, column_names: Iterable[str]) -> str:
ks = ",".join(column_names)
_data = []
for n in transac:
n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n])
_data.append(f"({n})")
i_str = f"""
INSERT INTO TABLE
{self.config.database}.{self.config.table}({ks})
VALUES
{','.join(_data)}
"""
return i_str
def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None:
_insert_query = self._build_insert_sql(transac, column_names)
self.client.command(_insert_query)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
batch_size: int = 32,
ids: Optional[Iterable[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Insert more texts through the embeddings and add to the VectorStore.
Args:
texts: Iterable of strings to add to the VectorStore.
ids: Optional list of ids to associate with the texts.
batch_size: Batch size of insertion
metadata: Optional column data to be inserted
Returns:
List of ids from adding the texts into the VectorStore.
"""
# Embed and create the documents
ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts]
colmap_ = self.config.column_map
transac = []
column_names = {
colmap_["id"]: ids,
colmap_["document"]: texts,
colmap_["embedding"]: self.embedding_function.embed_documents(list(texts)),
}
metadatas = metadatas or [{} for _ in texts]
column_names[colmap_["metadata"]] = map(json.dumps, metadatas)
assert len(set(colmap_) - set(column_names)) >= 0
keys, values = zip(*column_names.items())
try:
t = None
for v in self.pgbar(
zip(*values), desc="Inserting data...", total=len(metadatas)
):
assert (
len(v[keys.index(self.config.column_map["embedding"])]) == self.dim
)
transac.append(v)
if len(transac) == batch_size:
if t:
t.join()
t = Thread(target=self._insert, args=[transac, keys])
t.start()
transac = []
if len(transac) > 0:
if t:
t.join()
self._insert(transac, keys)
return [i for i in ids]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
config: Optional[ClickhouseSettings] = None,
text_ids: Optional[Iterable[str]] = None,
batch_size: int = 32,
**kwargs: Any,
) -> Clickhouse:
"""Create ClickHouse wrapper with existing texts
Args:
embedding_function (Embeddings): Function to extract text embedding
texts (Iterable[str]): List or tuple of strings to be added
config (ClickHouseSettings, Optional): ClickHouse configuration
text_ids (Optional[Iterable], optional): IDs for the texts.
Defaults to None.
batch_size (int, optional): Batchsize when transmitting data to ClickHouse.
Defaults to 32.
metadata (List[dict], optional): metadata to texts. Defaults to None.
Other keyword arguments will pass into
[clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api)
Returns:
ClickHouse Index
"""
ctx = cls(embedding, config, **kwargs)
ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas)
return ctx
def __repr__(self) -> str:
"""Text representation for ClickHouse Vector Store, prints backends, username
and schemas. Easy to use with `str(ClickHouse())`
Returns:
repr: string to show connection info and data schema
"""
_repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ "
_repr += f"{self.config.host}:{self.config.port}\033[0m\n\n"
_repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n"
_repr += "-" * 51 + "\n"
for r in self.client.query(
f"DESC {self.config.database}.{self.config.table}"
).named_results():
_repr += (
f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n"
)
_repr += "-" * 51 + "\n"
return _repr
def _build_query_sql(
self, q_emb: List[float], topk: int, where_str: Optional[str] = None
) -> str:
q_emb_str = ",".join(map(str, q_emb))
if where_str:
where_str = f"PREWHERE {where_str}"
else:
where_str = ""
settings_strs = []
if self.config.index_query_params:
for k in self.config.index_query_params:
settings_strs.append(f"SETTING {k}={self.config.index_query_params[k]}")
q_str = f"""
SELECT {self.config.column_map['document']},
{self.config.column_map['metadata']}, dist
FROM {self.config.database}.{self.config.table}
{where_str}
ORDER BY L2Distance({self.config.column_map['embedding']}, [{q_emb_str}])
AS dist {self.dist_order}
LIMIT {topk} {' '.join(settings_strs)}
"""
return q_str
def similarity_search(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Document]:
"""Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of Documents
"""
return self.similarity_search_by_vector(
self.embedding_function.embed_query(query), k, where_str, **kwargs
)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
where_str: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search with ClickHouse by vectors
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of documents
"""
q_str = self._build_query_sql(embedding, k, where_str)
try:
return [
Document(
page_content=r[self.config.column_map["document"]],
metadata=r[self.config.column_map["metadata"]],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def similarity_search_with_relevance_scores(
self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Perform a similarity search with ClickHouse
Args:
query (str): query string
k (int, optional): Top K neighbors to retrieve. Defaults to 4.
where_str (Optional[str], optional): where condition string.
Defaults to None.
NOTE: Please do not let end-user to fill this and always be aware
of SQL injection. When dealing with metadatas, remember to
use `{self.metadata_column}.attribute` instead of `attribute`
alone. The default name for it is `metadata`.
Returns:
List[Document]: List of (Document, similarity)
"""
q_str = self._build_query_sql(
self.embedding_function.embed_query(query), k, where_str
)
try:
return [
(
Document(
page_content=r[self.config.column_map["document"]],
metadata=r[self.config.column_map["metadata"]],
),
r["dist"],
)
for r in self.client.query(q_str).named_results()
]
except Exception as e:
logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m")
return []
def drop(self) -> None:
"""
Helper function: Drop data
"""
self.client.command(
f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}"
)
@property
def metadata_column(self) -> str:
return self.config.column_map["metadata"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~ollama.py | import json
from typing import Any, Dict, Iterator, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import BaseLLM
from langchain.pydantic_v1 import Extra
from langchain.schema import LLMResult
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.output import GenerationChunk
def _stream_response_to_generation_chunk(
stream_response: str,
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
parsed_response = json.loads(stream_response)
generation_info = parsed_response if parsed_response.get("done") is True else None
return GenerationChunk(
text=parsed_response.get("response", ""), generation_info=generation_info
)
class _OllamaCommon(BaseLanguageModel):
base_url: str = "http://localhost:11434"
"""Base url the model is hosted under."""
model: str = "llama2"
"""Model name to use."""
mirostat: Optional[int]
"""Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
mirostat_eta: Optional[float]
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)"""
mirostat_tau: Optional[float]
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)"""
num_ctx: Optional[int]
"""Sets the size of the context window used to generate the
next token. (Default: 2048) """
num_gpu: Optional[int]
"""The number of GPUs to use. On macOS it defaults to 1 to
enable metal support, 0 to disable."""
num_thread: Optional[int]
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
repeat_last_n: Optional[int]
"""Sets how far back for the model to look back to prevent
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
repeat_penalty: Optional[float]
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
will penalize repetitions more strongly, while a lower value (e.g., 0.9)
will be more lenient. (Default: 1.1)"""
temperature: Optional[float]
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)"""
stop: Optional[List[str]]
"""Sets the stop tokens to use."""
tfs_z: Optional[float]
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)"""
top_k: Optional[int]
"""Reduces the probability of generating nonsense. A higher value (e.g. 100)
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[int]
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {
"model": self.model,
"options": {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"stop": self.stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
def _create_stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> Iterator[str]:
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
stop = self.stop
elif stop is None:
stop = []
params = {**self._default_params, "stop": stop, **kwargs}
response = requests.post(
url=f"{self.base_url}/api/generate/",
headers={"Content-Type": "application/json"},
json={"prompt": prompt, **params},
stream=True,
)
response.encoding = "utf-8"
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"Ollama call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
return response.iter_lines(decode_unicode=True)
def _stream_with_aggregation(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
verbose: bool = False,
**kwargs: Any,
) -> GenerationChunk:
final_chunk: Optional[GenerationChunk] = None
for stream_resp in self._create_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=verbose,
)
if final_chunk is None:
raise ValueError("No data received from Ollama stream.")
return final_chunk
class Ollama(BaseLLM, _OllamaCommon):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain.llms import Ollama
ollama = Ollama(model="llama2")
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "ollama-llm"
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to Ollama's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ollama("Tell me a joke.")
"""
# TODO: add caching here.
generations = []
for prompt in prompts:
final_chunk = super()._stream_with_aggregation(
prompt,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
generations.append([final_chunk])
return LLMResult(generations=generations)
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
for stream_resp in self._create_stream(prompt, stop, **kwargs):
if stream_resp:
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~arcgis_loader.py | """Document Loader for ArcGIS FeatureLayers."""
from __future__ import annotations
import json
import re
import warnings
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import arcgis
_NOT_PROVIDED = "(Not Provided)"
class ArcGISLoader(BaseLoader):
"""Load records from an ArcGIS FeatureLayer."""
def __init__(
self,
layer: Union[str, arcgis.features.FeatureLayer],
gis: Optional[arcgis.gis.GIS] = None,
where: str = "1=1",
out_fields: Optional[Union[List[str], str]] = None,
return_geometry: bool = False,
result_record_count: Optional[int] = None,
lyr_desc: Optional[str] = None,
**kwargs: Any,
):
try:
import arcgis
except ImportError as e:
raise ImportError(
"arcgis is required to use the ArcGIS Loader. "
"Install it with pip or conda."
) from e
try:
from bs4 import BeautifulSoup # type: ignore
self.BEAUTIFULSOUP = BeautifulSoup
except ImportError:
warnings.warn("BeautifulSoup not found. HTML will not be parsed.")
self.BEAUTIFULSOUP = None
self.gis = gis or arcgis.gis.GIS()
if isinstance(layer, str):
self.url = layer
self.layer = arcgis.features.FeatureLayer(layer, gis=gis)
else:
self.url = layer.url
self.layer = layer
self.layer_properties = self._get_layer_properties(lyr_desc)
self.where = where
if isinstance(out_fields, str):
self.out_fields = out_fields
elif out_fields is None:
self.out_fields = "*"
else:
self.out_fields = ",".join(out_fields)
self.return_geometry = return_geometry
self.result_record_count = result_record_count
self.return_all_records = not isinstance(result_record_count, int)
query_params = dict(
where=self.where,
out_fields=self.out_fields,
return_geometry=self.return_geometry,
return_all_records=self.return_all_records,
result_record_count=self.result_record_count,
)
query_params.update(kwargs)
self.query_params = query_params
def _get_layer_properties(self, lyr_desc: Optional[str] = None) -> dict:
"""Get the layer properties from the FeatureLayer."""
import arcgis
layer_number_pattern = re.compile(r"/\d+$")
props = self.layer.properties
if lyr_desc is None:
# retrieve description from the FeatureLayer if not provided
try:
if self.BEAUTIFULSOUP:
lyr_desc = self.BEAUTIFULSOUP(props["description"]).text
else:
lyr_desc = props["description"]
lyr_desc = lyr_desc or _NOT_PROVIDED
except KeyError:
lyr_desc = _NOT_PROVIDED
try:
item_id = props["serviceItemId"]
item = self.gis.content.get(item_id) or arcgis.features.FeatureLayer(
re.sub(layer_number_pattern, "", self.url),
)
try:
raw_desc = item.description
except AttributeError:
raw_desc = item.properties.description
if self.BEAUTIFULSOUP:
item_desc = self.BEAUTIFULSOUP(raw_desc).text
else:
item_desc = raw_desc
item_desc = item_desc or _NOT_PROVIDED
except KeyError:
item_desc = _NOT_PROVIDED
return {
"layer_description": lyr_desc,
"item_description": item_desc,
"layer_properties": props,
}
def lazy_load(self) -> Iterator[Document]:
"""Lazy load records from FeatureLayer."""
query_response = self.layer.query(**self.query_params)
features = (feature.as_dict for feature in query_response)
for feature in features:
attributes = feature["attributes"]
page_content = json.dumps(attributes)
metadata = {
"accessed": f"{datetime.now(timezone.utc).isoformat()}Z",
"name": self.layer_properties["layer_properties"]["name"],
"url": self.url,
"layer_description": self.layer_properties["layer_description"],
"item_description": self.layer_properties["item_description"],
"layer_properties": self.layer_properties["layer_properties"],
}
if self.return_geometry:
try:
metadata["geometry"] = feature["geometry"]
except KeyError:
warnings.warn(
"Geometry could not be retrieved from the feature layer."
)
yield Document(page_content=page_content, metadata=metadata)
def load(self) -> List[Document]:
"""Load all records from FeatureLayer."""
return list(self.lazy_load())
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~gooseai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""GooseAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
params = {**params, **kwargs}
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~zilliz.py | import warnings
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores.zilliz import Zilliz
# TODO: Update to ZillizClient + Hybrid Search when available
class ZillizRetriever(BaseRetriever):
"""`Zilliz API` retriever."""
embedding_function: Embeddings
"""The underlying embedding function from which documents will be retrieved."""
collection_name: str = "LangChainCollection"
"""The name of the collection in Zilliz."""
connection_args: Optional[Dict[str, Any]] = None
"""The connection arguments for the Zilliz client."""
consistency_level: str = "Session"
"""The consistency level for the Zilliz client."""
search_params: Optional[dict] = None
"""The search parameters for the Zilliz client."""
store: Zilliz
"""The underlying Zilliz store."""
retriever: BaseRetriever
"""The underlying retriever."""
@root_validator(pre=True)
def create_client(cls, values: dict) -> dict:
values["store"] = Zilliz(
values["embedding_function"],
values["collection_name"],
values["connection_args"],
values["consistency_level"],
)
values["retriever"] = values["store"].as_retriever(
search_kwargs={"param": values["search_params"]}
)
return values
def add_texts(
self, texts: List[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Add text to the Zilliz store
Args:
texts (List[str]): The text
metadatas (List[dict]): Metadata dicts, must line up with existing store
"""
self.store.add_texts(texts, metadatas)
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
return self.retriever.get_relevant_documents(
query, run_manager=run_manager.get_child(), **kwargs
)
def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever:
"""Deprecated ZillizRetreiver.
Please use ZillizRetriever ('i' before 'e') instead.
Args:
*args:
**kwargs:
Returns:
ZillizRetriever
"""
warnings.warn(
"ZillizRetreiver will be deprecated in the future. "
"Please use ZillizRetriever ('i' before 'e') instead.",
DeprecationWarning,
)
return ZillizRetriever(*args, **kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~schema~runnable~passthrough.py | """Implementation of the RunnablePassthrough."""
from __future__ import annotations
import asyncio
import inspect
import threading
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
from langchain.pydantic_v1 import BaseModel, create_model
from langchain.schema.runnable.base import (
Other,
Runnable,
RunnableParallel,
RunnableSerializable,
)
from langchain.schema.runnable.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
get_executor_for_config,
)
from langchain.schema.runnable.utils import AddableDict, ConfigurableFieldSpec
from langchain.utils.aiter import atee, py_anext
from langchain.utils.iter import safetee
def identity(x: Other) -> Other:
"""An identity function"""
return x
async def aidentity(x: Other) -> Other:
"""An async identity function"""
return x
class RunnablePassthrough(RunnableSerializable[Other, Other]):
"""A runnable to passthrough inputs unchanged or with additional keys.
This runnable behaves almost like the identity function, except that it
can be configured to add additional keys to the output, if the input is a
dict.
The examples below demonstrate this runnable works using a few simple
chains. The chains rely on simple lambdas to make the examples easy to execute
and experiment with.
Examples:
.. code-block:: python
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
runnable = RunnableParallel(
origin=RunnablePassthrough(),
modified=lambda x: x+1
)
runnable.invoke(1) # {'origin': 1, 'modified': 2}
def fake_llm(prompt: str) -> str: # Fake LLM for the example
return "completion"
chain = RunnableLambda(fake_llm) | {
'original': RunnablePassthrough(), # Original LLM output
'parsed': lambda text: text[::-1] # Parsing logic
}
chain.invoke('hello') # {'original': 'completion', 'parsed': 'noitelpmoc'}
In some cases, it may be useful to pass the input through while adding some
keys to the output. In this case, you can use the `assign` method:
.. code-block:: python
from langchain.schema.runnable import RunnablePassthrough, RunnableParallel
def fake_llm(prompt: str) -> str: # Fake LLM for the example
return "completion"
runnable = {
'llm1': fake_llm,
'llm2': fake_llm,
}
| RunnablePassthrough.assign(
total_chars=lambda inputs: len(inputs['llm1'] + inputs['llm2'])
)
runnable.invoke('hello')
# {'llm1': 'completion', 'llm2': 'completion', 'total_chars': 20}
"""
input_type: Optional[Type[Other]] = None
func: Optional[
Union[Callable[[Other], None], Callable[[Other, RunnableConfig], None]]
] = None
afunc: Optional[
Union[
Callable[[Other], Awaitable[None]],
Callable[[Other, RunnableConfig], Awaitable[None]],
]
] = None
def __init__(
self,
func: Optional[
Union[
Union[Callable[[Other], None], Callable[[Other, RunnableConfig], None]],
Union[
Callable[[Other], Awaitable[None]],
Callable[[Other, RunnableConfig], Awaitable[None]],
],
]
] = None,
afunc: Optional[
Union[
Callable[[Other], Awaitable[None]],
Callable[[Other, RunnableConfig], Awaitable[None]],
]
] = None,
*,
input_type: Optional[Type[Other]] = None,
**kwargs: Any,
) -> None:
if inspect.iscoroutinefunction(func):
afunc = func
func = None
super().__init__(func=func, afunc=afunc, input_type=input_type, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
@property
def InputType(self) -> Any:
return self.input_type or Any
@property
def OutputType(self) -> Any:
return self.input_type or Any
@classmethod
def assign(
cls,
**kwargs: Union[
Runnable[Dict[str, Any], Any],
Callable[[Dict[str, Any]], Any],
Mapping[
str,
Union[Runnable[Dict[str, Any], Any], Callable[[Dict[str, Any]], Any]],
],
],
) -> RunnableAssign:
"""Merge the Dict input with the output produced by the mapping argument.
Args:
mapping: A mapping from keys to runnables or callables.
Returns:
A runnable that merges the Dict input with the output produced by the
mapping argument.
"""
return RunnableAssign(RunnableParallel(kwargs))
def invoke(
self, input: Other, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Other:
if self.func is not None:
call_func_with_variable_args(self.func, input, config or {}, **kwargs)
return self._call_with_config(identity, input, config)
async def ainvoke(
self,
input: Other,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Other:
if self.afunc is not None:
await acall_func_with_variable_args(
self.afunc, input, config or {}, **kwargs
)
elif self.func is not None:
call_func_with_variable_args(self.func, input, config or {}, **kwargs)
return await self._acall_with_config(aidentity, input, config)
def transform(
self,
input: Iterator[Other],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Other]:
if self.func is None:
for chunk in self._transform_stream_with_config(input, identity, config):
yield chunk
else:
final = None
for chunk in self._transform_stream_with_config(input, identity, config):
yield chunk
if final is None:
final = chunk
else:
final = final + chunk
if final is not None:
call_func_with_variable_args(self.func, final, config or {}, **kwargs)
async def atransform(
self,
input: AsyncIterator[Other],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Other]:
if self.afunc is None and self.func is None:
async for chunk in self._atransform_stream_with_config(
input, identity, config
):
yield chunk
else:
final = None
async for chunk in self._atransform_stream_with_config(
input, identity, config
):
yield chunk
if final is None:
final = chunk
else:
final = final + chunk
if final is not None:
config = config or {}
if self.afunc is not None:
await acall_func_with_variable_args(
self.afunc, final, config, **kwargs
)
elif self.func is not None:
call_func_with_variable_args(self.func, final, config, **kwargs)
def stream(
self,
input: Other,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Other]:
return self.transform(iter([input]), config, **kwargs)
async def astream(
self,
input: Other,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Other]:
async def input_aiter() -> AsyncIterator[Other]:
yield input
async for chunk in self.atransform(input_aiter(), config, **kwargs):
yield chunk
class RunnableAssign(RunnableSerializable[Dict[str, Any], Dict[str, Any]]):
"""
A runnable that assigns key-value pairs to Dict[str, Any] inputs.
"""
mapper: RunnableParallel[Dict[str, Any]]
def __init__(self, mapper: RunnableParallel[Dict[str, Any]], **kwargs: Any) -> None:
super().__init__(mapper=mapper, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
map_input_schema = self.mapper.get_input_schema(config)
if not map_input_schema.__custom_root_type__:
# ie. it's a dict
return map_input_schema
return super().get_input_schema(config)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
map_input_schema = self.mapper.get_input_schema(config)
map_output_schema = self.mapper.get_output_schema(config)
if (
not map_input_schema.__custom_root_type__
and not map_output_schema.__custom_root_type__
):
# ie. both are dicts
return create_model( # type: ignore[call-overload]
"RunnableAssignOutput",
**{
k: (v.type_, v.default)
for s in (map_input_schema, map_output_schema)
for k, v in s.__fields__.items()
},
)
return super().get_output_schema(config)
@property
def config_specs(self) -> Sequence[ConfigurableFieldSpec]:
return self.mapper.config_specs
def invoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
assert isinstance(
input, dict
), "The input to RunnablePassthrough.assign() must be a dict."
return {
**input,
**self.mapper.invoke(input, config, **kwargs),
}
async def ainvoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
assert isinstance(
input, dict
), "The input to RunnablePassthrough.assign() must be a dict."
return {
**input,
**await self.mapper.ainvoke(input, config, **kwargs),
}
def transform(
self,
input: Iterator[Dict[str, Any]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Dict[str, Any]]:
# collect mapper keys
mapper_keys = set(self.mapper.steps.keys())
# create two streams, one for the map and one for the passthrough
for_passthrough, for_map = safetee(input, 2, lock=threading.Lock())
# create map output stream
map_output = self.mapper.transform(for_map, config, **kwargs)
# get executor to start map output stream in background
with get_executor_for_config(config or {}) as executor:
# start map output stream
first_map_chunk_future = executor.submit(
next,
map_output, # type: ignore
None,
)
# consume passthrough stream
for chunk in for_passthrough:
assert isinstance(
chunk, dict
), "The input to RunnablePassthrough.assign() must be a dict."
# remove mapper keys from passthrough chunk, to be overwritten by map
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
)
if filtered:
yield filtered
# yield map output
yield cast(Dict[str, Any], first_map_chunk_future.result())
for chunk in map_output:
yield chunk
async def atransform(
self,
input: AsyncIterator[Dict[str, Any]],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Dict[str, Any]]:
# collect mapper keys
mapper_keys = set(self.mapper.steps.keys())
# create two streams, one for the map and one for the passthrough
for_passthrough, for_map = atee(input, 2, lock=asyncio.Lock())
# create map output stream
map_output = self.mapper.atransform(for_map, config, **kwargs)
# start map output stream
first_map_chunk_task: asyncio.Task = asyncio.create_task(
py_anext(map_output, None), # type: ignore[arg-type]
)
# consume passthrough stream
async for chunk in for_passthrough:
assert isinstance(
chunk, dict
), "The input to RunnablePassthrough.assign() must be a dict."
# remove mapper keys from passthrough chunk, to be overwritten by map output
filtered = AddableDict(
{k: v for k, v in chunk.items() if k not in mapper_keys}
)
if filtered:
yield filtered
# yield map output
yield await first_map_chunk_task
async for chunk in map_output:
yield chunk
def stream(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Dict[str, Any]]:
return self.transform(iter([input]), config, **kwargs)
async def astream(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Dict[str, Any]]:
async def input_aiter() -> AsyncIterator[Dict[str, Any]]:
yield input
async for chunk in self.atransform(input_aiter(), config, **kwargs):
yield chunk
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~llm_rails.py | """ This file is for LLMRails Embedding """
import logging
import os
from typing import List, Optional
import requests
from langchain.pydantic_v1 import BaseModel, Extra
from langchain.schema.embeddings import Embeddings
class LLMRailsEmbeddings(BaseModel, Embeddings):
"""LLMRails embedding models.
To use, you should have the environment
variable ``LLM_RAILS_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Model can be one of ["embedding-english-v1","embedding-multi-v1"]
Example:
.. code-block:: python
from langchain.embeddings import LLMRailsEmbeddings
cohere = LLMRailsEmbeddings(
model="embedding-english-v1", api_key="my-api-key"
)
"""
model: str = "embedding-english-v1"
"""Model name to use."""
api_key: Optional[str] = None
"""LLMRails API key."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
api_key = self.api_key or os.environ.get("LLM_RAILS_API_KEY")
if api_key is None:
logging.warning("Can't find LLMRails credentials in environment.")
raise ValueError("LLM_RAILS_API_KEY is not set")
response = requests.post(
"https://api.llmrails.com/v1/embeddings",
headers={"X-API-KEY": api_key},
json={"input": texts, "model": self.model},
timeout=60,
)
return [item["embedding"] for item in response.json()["data"]]
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~combine_documents~map_reduce.py | """Combining documents by mapping a chain over them first, then combining results."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Type
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.pydantic_v1 import BaseModel, Extra, create_model, root_validator
from langchain.schema.runnable.config import RunnableConfig
class MapReduceDocumentsChain(BaseCombineDocumentsChain):
"""Combining documents by mapping a chain over them, then combining results.
We first call `llm_chain` on each document individually, passing in the
`page_content` and any other kwargs. This is the `map` step.
We then process the results of that `map` step in a `reduce` step. This should
likely be a ReduceDocumentsChain.
Example:
.. code-block:: python
from langchain.chains import (
StuffDocumentsChain,
LLMChain,
ReduceDocumentsChain,
MapReduceDocumentsChain,
)
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
# We now define how to combine these summaries
reduce_prompt = PromptTemplate.from_template(
"Combine these summaries: {context}"
)
reduce_llm_chain = LLMChain(llm=llm, prompt=reduce_prompt)
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
)
chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
)
# If we wanted to, we could also pass in collapse_documents_chain
# which is specifically aimed at collapsing documents BEFORE
# the final call.
prompt = PromptTemplate.from_template(
"Collapse this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
collapse_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
)
chain = MapReduceDocumentsChain(
llm_chain=llm_chain,
reduce_documents_chain=reduce_documents_chain,
)
"""
llm_chain: LLMChain
"""Chain to apply to each document individually."""
reduce_documents_chain: BaseCombineDocumentsChain
"""Chain to use to reduce the results of applying `llm_chain` to each doc.
This typically either a ReduceDocumentChain or StuffDocumentChain."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
return_intermediate_steps: bool = False
"""Return the results of the map steps in the output."""
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
if self.return_intermediate_steps:
return create_model(
"MapReduceDocumentsOutput",
**{
self.output_key: (str, None),
"intermediate_steps": (List[str], None),
}, # type: ignore[call-overload]
)
return super().get_output_schema(config)
@property
def output_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_intermediate_steps:
_output_keys = _output_keys + ["intermediate_steps"]
return _output_keys
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_reduce_chain(cls, values: Dict) -> Dict:
"""For backwards compatibility."""
if "combine_document_chain" in values:
if "reduce_documents_chain" in values:
raise ValueError(
"Both `reduce_documents_chain` and `combine_document_chain` "
"cannot be provided at the same time. `combine_document_chain` "
"is deprecated, please only provide `reduce_documents_chain`"
)
combine_chain = values["combine_document_chain"]
collapse_chain = values.get("collapse_document_chain")
reduce_chain = ReduceDocumentsChain(
combine_documents_chain=combine_chain,
collapse_documents_chain=collapse_chain,
)
values["reduce_documents_chain"] = reduce_chain
del values["combine_document_chain"]
if "collapse_document_chain" in values:
del values["collapse_document_chain"]
return values
@root_validator(pre=True)
def get_return_intermediate_steps(cls, values: Dict) -> Dict:
"""For backwards compatibility."""
if "return_map_steps" in values:
values["return_intermediate_steps"] = values["return_map_steps"]
del values["return_map_steps"]
return values
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
@property
def collapse_document_chain(self) -> BaseCombineDocumentsChain:
"""Kept for backward compatibility."""
if isinstance(self.reduce_documents_chain, ReduceDocumentsChain):
if self.reduce_documents_chain.collapse_documents_chain:
return self.reduce_documents_chain.collapse_documents_chain
else:
return self.reduce_documents_chain.combine_documents_chain
else:
raise ValueError(
f"`reduce_documents_chain` is of type "
f"{type(self.reduce_documents_chain)} so it does not have "
f"this attribute."
)
@property
def combine_document_chain(self) -> BaseCombineDocumentsChain:
"""Kept for backward compatibility."""
if isinstance(self.reduce_documents_chain, ReduceDocumentsChain):
return self.reduce_documents_chain.combine_documents_chain
else:
raise ValueError(
f"`reduce_documents_chain` is of type "
f"{type(self.reduce_documents_chain)} so it does not have "
f"this attribute."
)
def combine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Combine documents in a map reduce manner.
Combine by mapping first chain over all documents, then reducing the results.
This reducing can be done recursively if needed (if there are many documents).
"""
map_results = self.llm_chain.apply(
# FYI - this is parallelized and so it is fast.
[{self.document_variable_name: d.page_content, **kwargs} for d in docs],
callbacks=callbacks,
)
question_result_key = self.llm_chain.output_key
result_docs = [
Document(page_content=r[question_result_key], metadata=docs[i].metadata)
# This uses metadata from the docs, and the textual results from `results`
for i, r in enumerate(map_results)
]
result, extra_return_dict = self.reduce_documents_chain.combine_docs(
result_docs, token_max=token_max, callbacks=callbacks, **kwargs
)
if self.return_intermediate_steps:
intermediate_steps = [r[question_result_key] for r in map_results]
extra_return_dict["intermediate_steps"] = intermediate_steps
return result, extra_return_dict
async def acombine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Combine documents in a map reduce manner.
Combine by mapping first chain over all documents, then reducing the results.
This reducing can be done recursively if needed (if there are many documents).
"""
map_results = await self.llm_chain.aapply(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs],
callbacks=callbacks,
)
question_result_key = self.llm_chain.output_key
result_docs = [
Document(page_content=r[question_result_key], metadata=docs[i].metadata)
# This uses metadata from the docs, and the textual results from `results`
for i, r in enumerate(map_results)
]
result, extra_return_dict = await self.reduce_documents_chain.acombine_docs(
result_docs, token_max=token_max, callbacks=callbacks, **kwargs
)
if self.return_intermediate_steps:
intermediate_steps = [r[question_result_key] for r in map_results]
extra_return_dict["intermediate_steps"] = intermediate_steps
return result, extra_return_dict
@property
def _chain_type(self) -> str:
return "map_reduce_documents_chain"
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
from langchain_experimental.comprehend_moderation.base_moderation_enums import (
BaseModerationActions,
)
if config:
action = config.get("action", BaseModerationActions.STOP)
if action not in [BaseModerationActions.STOP, BaseModerationActions.ALLOW]:
raise ValueError("Action can either be stop or allow")
return (
self._contains_pii(prompt_value=prompt_value, config=config)
if action == BaseModerationActions.STOP
else self._detect_pii(prompt_value=prompt_value, config=config)
)
else:
return self._contains_pii(prompt_value=prompt_value)
def _contains_pii(
self, prompt_value: str, config: Optional[Dict[str, Any]] = None
) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold.
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold", 0.5) if config else 0.5
pii_labels = config.get("labels", []) if config else []
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold", 0.5) # type: ignore
pii_labels = config.get("labels", []) # type: ignore
mask_marker = config.get("mask_character", "*") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
prompt_value = (
prompt_value[:char_offset_begin]
+ mask_marker * (char_offset_end - char_offset_begin)
+ prompt_value[char_offset_end:]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~indexes~graph.py | """Graph Index Creator."""
from typing import Optional, Type
from langchain.chains.llm import LLMChain
from langchain.graphs.networkx_graph import NetworkxEntityGraph, parse_triples
from langchain.indexes.prompts.knowledge_triplet_extraction import (
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.pydantic_v1 import BaseModel
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.prompt_template import BasePromptTemplate
class GraphIndexCreator(BaseModel):
"""Functionality to create graph index."""
llm: Optional[BaseLanguageModel] = None
graph_type: Type[NetworkxEntityGraph] = NetworkxEntityGraph
def from_text(
self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
) -> NetworkxEntityGraph:
"""Create graph index from text."""
if self.llm is None:
raise ValueError("llm should not be None")
graph = self.graph_type()
chain = LLMChain(llm=self.llm, prompt=prompt)
output = chain.predict(text=text)
knowledge = parse_triples(output)
for triple in knowledge:
graph.add_triple(triple)
return graph
async def afrom_text(
self, text: str, prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
) -> NetworkxEntityGraph:
"""Create graph index from text asynchronously."""
if self.llm is None:
raise ValueError("llm should not be None")
graph = self.graph_type()
chain = LLMChain(llm=self.llm, prompt=prompt)
output = await chain.apredict(text=text)
knowledge = parse_triples(output)
for triple in knowledge:
graph.add_triple(triple)
return graph
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~output_parsers~react_json_single_input.py | import json
import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FINAL_ANSWER_ACTION = "Final Answer:"
class ReActJsonSingleInputOutputParser(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
raise ValueError("action not found")
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
raise OutputParserException(
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception:
if not includes_answer:
raise OutputParserException(f"Could not parse LLM output: {text}")
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~anyscale.py | """Anyscale Endpoints chat wrapper. Relies heavily on ChatOpenAI."""
from __future__ import annotations
import logging
import os
import sys
from typing import TYPE_CHECKING, Dict, Optional, Set
import requests
from langchain.adapters.openai import convert_message_to_dict
from langchain.chat_models.openai import (
ChatOpenAI,
_import_tiktoken,
)
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema.messages import BaseMessage
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
import tiktoken
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class ChatAnyscale(ChatOpenAI):
"""`Anyscale` Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``ANYSCALE_API_KEY`` set with your API key.
Alternatively, you can use the anyscale_api_key keyword argument.
Any parameters that are valid to be passed to the `openai.create` call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chat_models import ChatAnyscale
chat = ChatAnyscale(model_name="meta-llama/Llama-2-7b-chat-hf")
"""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anyscale-chat"
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anyscale_api_key": "ANYSCALE_API_KEY"}
anyscale_api_key: Optional[str] = None
"""AnyScale Endpoints API keys."""
model_name: str = Field(default=DEFAULT_MODEL, alias="model")
"""Model name to use."""
anyscale_api_base: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests,
leave blank if not using a proxy or service emulator."""
anyscale_proxy: Optional[str] = None
"""To support explicit proxy for Anyscale."""
available_models: Optional[Set[str]] = None
"""Available models from Anyscale API."""
@staticmethod
def get_available_models(
anyscale_api_key: Optional[str] = None,
anyscale_api_base: str = DEFAULT_API_BASE,
) -> Set[str]:
"""Get available models from Anyscale API."""
try:
anyscale_api_key = anyscale_api_key or os.environ["ANYSCALE_API_KEY"]
except KeyError as e:
raise ValueError(
"Anyscale API key must be passed as keyword argument or "
"set in environment variable ANYSCALE_API_KEY.",
) from e
models_url = f"{anyscale_api_base}/models"
models_response = requests.get(
models_url,
headers={
"Authorization": f"Bearer {anyscale_api_key}",
},
)
if models_response.status_code != 200:
raise ValueError(
f"Error getting models from {models_url}: "
f"{models_response.status_code}",
)
return {model["id"] for model in models_response.json()["data"]}
@root_validator(pre=True)
def validate_environment_override(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_API_BASE,
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"anyscale_proxy",
"ANYSCALE_PROXY",
default="",
)
try:
import openai
except ImportError as e:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`.",
) from e
try:
values["client"] = openai.ChatCompletion
except AttributeError as exc:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`.",
) from exc
if "model_name" not in values.keys():
values["model_name"] = DEFAULT_MODEL
model_name = values["model_name"]
available_models = cls.get_available_models(
values["openai_api_key"],
values["openai_api_base"],
)
if model_name not in available_models:
raise ValueError(
f"Model name {model_name} not found in available models: "
f"{available_models}.",
)
values["available_models"] = available_models
return values
def _get_encoding_model(self) -> tuple[str, tiktoken.Encoding]:
tiktoken_ = _import_tiktoken()
if self.tiktoken_model_name is not None:
model = self.tiktoken_model_name
else:
model = self.model_name
# Returns the number of tokens used by a list of messages.
try:
encoding = tiktoken_.encoding_for_model("gpt-3.5-turbo-0301")
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
encoding = tiktoken_.get_encoding(model)
return model, encoding
def get_num_tokens_from_messages(self, messages: list[BaseMessage]) -> int:
"""Calculate num tokens with tiktoken package.
Official documentation: https://github.com/openai/openai-cookbook/blob/
main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
if sys.version_info[1] <= 7:
return super().get_num_tokens_from_messages(messages)
model, encoding = self._get_encoding_model()
tokens_per_message = 3
tokens_per_name = 1
num_tokens = 0
messages_dict = [convert_message_to_dict(m) for m in messages]
for message in messages_dict:
num_tokens += tokens_per_message
for key, value in message.items():
# Cast str(value) in case the message value is not a string
# This occurs with function messages
num_tokens += len(encoding.encode(str(value)))
if key == "name":
num_tokens += tokens_per_name
# every reply is primed with <im_start>assistant
num_tokens += 3
return num_tokens
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~format_scratchpad~xml.py | from typing import List, Tuple
from langchain.schema.agent import AgentAction
def format_xml(
intermediate_steps: List[Tuple[AgentAction, str]],
) -> str:
"""Format the intermediate steps as XML.
Args:
intermediate_steps: The intermediate steps.
Returns:
The intermediate steps as XML.
"""
log = ""
for action, observation in intermediate_steps:
log += (
f"<tool>{action.tool}</tool><tool_input>{action.tool_input}"
f"</tool_input><observation>{observation}</observation>"
)
return log
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~autonomous_agents~baby_agi~task_prioritization.py | from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
class TaskPrioritizationChain(LLMChain):
"""Chain to prioritize tasks."""
@classmethod
def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_prioritization_template = (
"Ты - AI, задача которого - привести в порядок"
" и переприоритизировать следующие задачи: {task_names}."
" Учти конечную цель твоей команды: {objective}."
" Не удаляй ни одну из задач. Верни"
" результат в виде нумерованного списка, например:"
" #. Первая задача"
" #. Вторая задача"
" Начни список задач с номера {next_task_id}."
)
prompt = PromptTemplate(
template=task_prioritization_template,
input_variables=["task_names", "next_task_id", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
| [
"next_task_id",
"Ты - AI, задача которого - привести в порядок и переприоритизировать следующие задачи: {task_names}. Учти конечную цель твоей команды: {objective}. Не удаляй ни одну из задач. Верни результат в виде нумерованного списка, например: #. Первая задача #. Вторая задача Начни список задач с номера {next_task_id}.",
"task_names"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~docstore~in_memory.py | """Simple in memory docstore in the form of a dict."""
from typing import Dict, List, Optional, Union
from langchain.docstore.base import AddableMixin, Docstore
from langchain.docstore.document import Document
class InMemoryDocstore(Docstore, AddableMixin):
"""Simple in memory docstore in the form of a dict."""
def __init__(self, _dict: Optional[Dict[str, Document]] = None):
"""Initialize with dict."""
self._dict = _dict if _dict is not None else {}
def add(self, texts: Dict[str, Document]) -> None:
"""Add texts to in memory dictionary.
Args:
texts: dictionary of id -> document.
Returns:
None
"""
overlapping = set(texts).intersection(self._dict)
if overlapping:
raise ValueError(f"Tried to add ids that already exist: {overlapping}")
self._dict = {**self._dict, **texts}
def delete(self, ids: List) -> None:
"""Deleting IDs from in memory dictionary."""
overlapping = set(ids).intersection(self._dict)
if not overlapping:
raise ValueError(f"Tried to delete ids that does not exist: {ids}")
for _id in ids:
self._dict.pop(_id)
def search(self, search: str) -> Union[str, Document]:
"""Search via direct lookup.
Args:
search: id of a document to search for.
Returns:
Document if found, else error message.
"""
if search not in self._dict:
return f"ID {search} not found."
else:
return self._dict[search]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~storage~in_memory.py | """In memory store that is not thread safe and has no eviction policy.
This is a simple implementation of the BaseStore using a dictionary that is useful
primarily for unit testing purposes.
"""
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple
from langchain.schema import BaseStore
class InMemoryStore(BaseStore[str, Any]):
"""In-memory implementation of the BaseStore using a dictionary.
Attributes:
store (Dict[str, Any]): The underlying dictionary that stores
the key-value pairs.
Examples:
.. code-block:: python
from langchain.storage import InMemoryStore
store = InMemoryStore()
store.mset([('key1', 'value1'), ('key2', 'value2')])
store.mget(['key1', 'key2'])
# ['value1', 'value2']
store.mdelete(['key1'])
list(store.yield_keys())
# ['key2']
list(store.yield_keys(prefix='k'))
# ['key2']
"""
def __init__(self) -> None:
"""Initialize an empty store."""
self.store: Dict[str, Any] = {}
def mget(self, keys: Sequence[str]) -> List[Optional[Any]]:
"""Get the values associated with the given keys.
Args:
keys (Sequence[str]): A sequence of keys.
Returns:
A sequence of optional values associated with the keys.
If a key is not found, the corresponding value will be None.
"""
return [self.store.get(key) for key in keys]
def mset(self, key_value_pairs: Sequence[Tuple[str, Any]]) -> None:
"""Set the values for the given keys.
Args:
key_value_pairs (Sequence[Tuple[str, V]]): A sequence of key-value pairs.
Returns:
None
"""
for key, value in key_value_pairs:
self.store[key] = value
def mdelete(self, keys: Sequence[str]) -> None:
"""Delete the given keys and their associated values.
Args:
keys (Sequence[str]): A sequence of keys to delete.
"""
for key in keys:
self.store.pop(key, None)
def yield_keys(self, prefix: Optional[str] = None) -> Iterator[str]:
"""Get an iterator over keys that match the given prefix.
Args:
prefix (str, optional): The prefix to match. Defaults to None.
Returns:
Iterator[str]: An iterator over keys that match the given prefix.
"""
if prefix is None:
yield from self.store.keys()
else:
for key in self.store.keys():
if key.startswith(prefix):
yield key
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~agents~load_tools.py | # flake8: noqa
"""Tools provide access to various resources and services.
LangChain has a large ecosystem of integrations with various external resources
like local and remote file systems, APIs and databases.
These integrations allow developers to create versatile applications that combine the
power of LLMs with the ability to access, interact with and manipulate external
resources.
When developing an application, developers should inspect the capabilities and
permissions of the tools that underlie the given agent toolkit, and determine
whether permissions of the given toolkit are appropriate for the application.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
import warnings
from typing import Any, Dict, List, Optional, Callable, Tuple
from mypy_extensions import Arg, KwArg
from langchain.agents.tools import Tool
from langchain.schema.language_model import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.utilities.dalle_image_generator import DallEAPIWrapper
from langchain.utilities.requests import TextRequestsWrapper
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.golden_query.tool import GoldenQueryRun
from langchain.tools.pubmed.tool import PubmedQueryRun
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.google_scholar.tool import GoogleScholarQueryRun
from langchain.tools.metaphor_search.tool import MetaphorSearchResults
from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun
from langchain.tools.searchapi.tool import SearchAPIResults, SearchAPIRun
from langchain.tools.graphql.tool import BaseGraphQLTool
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.python.tool import PythonREPLTool
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
from langchain.tools.scenexplain.tool import SceneXplainTool
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.shell.tool import ShellTool
from langchain.tools.sleep.tool import SleepTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchResults
from langchain.utilities.arxiv import ArxivAPIWrapper
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
from langchain.utilities.pubmed import PubMedAPIWrapper
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
from langchain.utilities.awslambda import LambdaWrapper
from langchain.utilities.graphql import GraphQLAPIWrapper
from langchain.utilities.searchapi import SearchApiAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.twilio import TwilioAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
def _get_python_repl() -> BaseTool:
return PythonREPLTool()
def _get_tools_requests_get() -> BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() -> BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_patch() -> BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() -> BaseTool:
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() -> BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_terminal() -> BaseTool:
return ShellTool()
def _get_sleep() -> BaseTool:
return SleepTool()
_BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
"python_repl": _get_python_repl,
"requests": _get_tools_requests_get, # preserved for backwards compatibility
"requests_get": _get_tools_requests_get,
"requests_post": _get_tools_requests_post,
"requests_patch": _get_tools_requests_patch,
"requests_put": _get_tools_requests_put,
"requests_delete": _get_tools_requests_delete,
"terminal": _get_terminal,
"sleep": _get_sleep,
}
def _get_llm_math(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain.from_llm(llm=llm).run,
coroutine=LLMMathChain.from_llm(llm=llm).arun,
)
def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool(
name="Open-Meteo-API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = {
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
)
return Tool(
name="News-API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
)
return Tool(
name="TMDB-API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
listen_api_key = kwargs["listen_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
podcast_docs.PODCAST_DOCS,
headers={"X-ListenAPI-Key": listen_api_key},
)
return Tool(
name="Podcast-API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_lambda_api(**kwargs: Any) -> BaseTool:
return Tool(
name=kwargs["awslambda_tool_name"],
description=kwargs["awslambda_tool_description"],
func=LambdaWrapper(**kwargs).run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_arxiv(**kwargs: Any) -> BaseTool:
return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs))
def _get_golden_query(**kwargs: Any) -> BaseTool:
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
def _get_pubmed(**kwargs: Any) -> BaseTool:
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_scholar(**kwargs: Any) -> BaseTool:
return GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper(**kwargs))
def _get_google_serper_results_json(**kwargs: Any) -> BaseTool:
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_searchapi(**kwargs: Any) -> BaseTool:
return SearchAPIRun(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_searchapi_results_json(**kwargs: Any) -> BaseTool:
return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_dalle_image_generator(**kwargs: Any) -> Tool:
return Tool(
"Dall-E-Image-Generator",
DallEAPIWrapper(**kwargs).run,
"A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.",
)
def _get_twilio(**kwargs: Any) -> BaseTool:
return Tool(
name="Text-Message",
description="Useful for when you need to send a text message to a provided phone number.",
func=TwilioAPIWrapper(**kwargs).run,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs))
def _get_searx_search_results_json(**kwargs: Any) -> BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) -> BaseTool:
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) -> BaseTool:
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_human_tool(**kwargs: Any) -> BaseTool:
return HumanInputRun(**kwargs)
def _get_scenexplain(**kwargs: Any) -> BaseTool:
return SceneXplainTool(**kwargs)
def _get_graphql_tool(**kwargs: Any) -> BaseTool:
graphql_endpoint = kwargs["graphql_endpoint"]
wrapper = GraphQLAPIWrapper(graphql_endpoint=graphql_endpoint)
return BaseGraphQLTool(graphql_wrapper=wrapper)
def _get_openweathermap(**kwargs: Any) -> BaseTool:
return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs))
def _get_dataforseo_api_search(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_dataforseo_api_search_json(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool:
return ElevenLabsText2SpeechTool(**kwargs)
_EXTRA_LLM_TOOLS: Dict[
str,
Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]],
] = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
}
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"searx-search-results-json": (
_get_searx_search_results_json,
["searx_host", "engines", "num_results", "aiosession"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]),
"ddg-search": (_get_ddg_search, []),
"google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]),
"google-scholar": (
_get_google_scholar,
["top_k_results", "hl", "lr", "serp_api_key"],
),
"google-serper-results-json": (
_get_google_serper_results_json,
["serper_api_key", "aiosession"],
),
"searchapi": (_get_searchapi, ["searchapi_api_key", "aiosession"]),
"searchapi-results-json": (
_get_searchapi_results_json,
["searchapi_api_key", "aiosession"],
),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"dalle-image-generator": (_get_dalle_image_generator, ["openai_api_key"]),
"twilio": (_get_twilio, ["account_sid", "auth_token", "from_number"]),
"searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]),
"wikipedia": (_get_wikipedia, ["top_k_results", "lang"]),
"arxiv": (
_get_arxiv,
["top_k_results", "load_max_docs", "load_all_available_meta"],
),
"golden-query": (_get_golden_query, ["golden_api_key"]),
"pubmed": (_get_pubmed, ["top_k_results"]),
"human": (_get_human_tool, ["prompt_func", "input_func"]),
"awslambda": (
_get_lambda_api,
["awslambda_tool_name", "awslambda_tool_description", "function_name"],
),
"sceneXplain": (_get_scenexplain, []),
"graphql": (_get_graphql_tool, ["graphql_endpoint"]),
"openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]),
"dataforseo-api-search": (
_get_dataforseo_api_search,
["api_login", "api_password", "aiosession"],
),
"dataforseo-api-search-json": (
_get_dataforseo_api_search_json,
["api_login", "api_password", "aiosession"],
),
"eleven_labs_text2speech": (_get_eleven_labs_text2speech, ["eleven_api_key"]),
}
def _handle_callbacks(
callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks
) -> Callbacks:
if callback_manager is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
if callbacks is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks arguments."
)
return callback_manager
return callbacks
def load_huggingface_tool(
task_or_repo_id: str,
model_repo_id: Optional[str] = None,
token: Optional[str] = None,
remote: bool = False,
**kwargs: Any,
) -> BaseTool:
"""Loads a tool from the HuggingFace Hub.
Args:
task_or_repo_id: Task or model repo id.
model_repo_id: Optional model repo id.
token: Optional token.
remote: Optional remote. Defaults to False.
**kwargs:
Returns:
A tool.
"""
try:
from transformers import load_tool
except ImportError:
raise ImportError(
"HuggingFace tools require the libraries `transformers>=4.29.0`"
" and `huggingface_hub>=0.14.1` to be installed."
" Please install it with"
" `pip install --upgrade transformers huggingface_hub`."
)
hf_tool = load_tool(
task_or_repo_id,
model_repo_id=model_repo_id,
token=token,
remote=remote,
**kwargs,
)
outputs = hf_tool.outputs
if set(outputs) != {"text"}:
raise NotImplementedError("Multimodal outputs not supported yet.")
inputs = hf_tool.inputs
if set(inputs) != {"text"}:
raise NotImplementedError("Multimodal inputs not supported yet.")
return Tool.from_function(
hf_tool.__call__, name=hf_tool.name, description=hf_tool.description
)
def load_tools(
tool_names: List[str],
llm: Optional[BaseLanguageModel] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Tools allow agents to interact with various resources and services like
APIs, databases, file systems, etc.
Please scope the permissions of each tools to the minimum required for the
application.
For example, if an application only needs to read from a database,
the database tool should not be given write permissions. Moreover
consider scoping the permissions to only allow accessing specific
tables and impose user-level quota for limiting resource usage.
Please read the APIs of the individual tools to determine which configuration
they support.
See [Security](https://python.langchain.com/docs/security) for more information.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(
callback_manager=kwargs.get("callback_manager"), callbacks=callbacks
)
for name in tool_names:
if name == "requests":
warnings.warn(
"tool name `requests` is deprecated - "
"please use `requests_all` or specify the requests method"
)
if name == "requests_all":
# expand requests into various methods
requests_method_tools = [
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chains~moderation.py | """Pass input through a moderation endpoint."""
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.pydantic_v1 import root_validator
from langchain.utils import get_from_dict_or_env
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["client"] = openai.Moderation
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: dict) -> str:
if results["flagged"]:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
text = inputs[self.input_key]
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
return {self.output_key: output}
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~callbacks~comet_ml_callback.py | import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence
import langchain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
import_spacy,
import_textstat,
)
from langchain.schema import AgentAction, AgentFinish, Generation, LLMResult
LANGCHAIN_MODEL_NAME = "langchain-model"
def import_comet_ml() -> Any:
"""Import comet_ml and raise an error if it is not installed."""
try:
import comet_ml # noqa: F401
except ImportError:
raise ImportError(
"To use the comet_ml callback manager you need to have the "
"`comet_ml` python package installed. Please install it with"
" `pip install comet_ml`"
)
return comet_ml
def _get_experiment(
workspace: Optional[str] = None, project_name: Optional[str] = None
) -> Any:
comet_ml = import_comet_ml()
experiment = comet_ml.Experiment( # type: ignore
workspace=workspace,
project_name=project_name,
)
return experiment
def _fetch_text_complexity_metrics(text: str) -> dict:
textstat = import_textstat()
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"text_standard": textstat.text_standard(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
return text_complexity_metrics
def _summarize_metrics_for_generated_outputs(metrics: Sequence) -> dict:
pd = import_pandas()
metrics_df = pd.DataFrame(metrics)
metrics_summary = metrics_df.describe()
return metrics_summary.to_dict()
class CometCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""Callback Handler that logs to Comet.
Parameters:
job_type (str): The type of comet_ml task such as "inference",
"testing" or "qc"
project_name (str): The comet_ml project name
tags (list): Tags to add to the task
task_name (str): Name of the comet_ml task
visualize (bool): Whether to visualize the run.
complexity_metrics (bool): Whether to log complexity metrics
stream_logs (bool): Whether to stream callback actions to Comet
This handler will utilize the associated callback method and formats
the input of each callback function with metadata regarding the state of LLM run,
and adds the response to the list of records for both the {method}_records and
action. It then logs the response to Comet.
"""
def __init__(
self,
task_type: Optional[str] = "inference",
workspace: Optional[str] = None,
project_name: Optional[str] = None,
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
stream_logs: bool = True,
) -> None:
"""Initialize callback handler."""
self.comet_ml = import_comet_ml()
super().__init__()
self.task_type = task_type
self.workspace = workspace
self.project_name = project_name
self.tags = tags
self.visualizations = visualizations
self.complexity_metrics = complexity_metrics
self.custom_metrics = custom_metrics
self.stream_logs = stream_logs
self.temp_dir = tempfile.TemporaryDirectory()
self.experiment = _get_experiment(workspace, project_name)
self.experiment.log_other("Created from", "langchain")
if tags:
self.experiment.add_tags(tags)
self.name = name
if self.name:
self.experiment.set_name(self.name)
warning = (
"The comet_ml callback is currently in beta and is subject to change "
"based on updates to `langchain`. Please report any issues to "
"https://github.com/comet-ml/issue-tracking/issues with the tag "
"`langchain`."
)
self.comet_ml.LOGGER.warning(warning)
self.callback_columns: list = []
self.action_records: list = []
self.complexity_metrics = complexity_metrics
if self.visualizations:
spacy = import_spacy()
self.nlp = spacy.load("en_core_web_sm")
else:
self.nlp = None
def _init_resp(self) -> Dict:
return {k: None for k in self.callback_columns}
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
metadata = self._init_resp()
metadata.update({"action": "on_llm_start"})
metadata.update(flatten_dict(serialized))
metadata.update(self.get_custom_callback_meta())
for prompt in prompts:
prompt_resp = deepcopy(metadata)
prompt_resp["prompts"] = prompt
self.on_llm_start_records.append(prompt_resp)
self.action_records.append(prompt_resp)
if self.stream_logs:
self._log_stream(prompt, metadata, self.step)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
self.step += 1
self.llm_streams += 1
resp = self._init_resp()
resp.update({"action": "on_llm_new_token", "token": token})
resp.update(self.get_custom_callback_meta())
self.action_records.append(resp)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
metadata = self._init_resp()
metadata.update({"action": "on_llm_end"})
metadata.update(flatten_dict(response.llm_output or {}))
metadata.update(self.get_custom_callback_meta())
output_complexity_metrics = []
output_custom_metrics = []
for prompt_idx, generations in enumerate(response.generations):
for gen_idx, generation in enumerate(generations):
text = generation.text
generation_resp = deepcopy(metadata)
generation_resp.update(flatten_dict(generation.dict()))
complexity_metrics = self._get_complexity_metrics(text)
if complexity_metrics:
output_complexity_metrics.append(complexity_metrics)
generation_resp.update(complexity_metrics)
custom_metrics = self._get_custom_metrics(
generation, prompt_idx, gen_idx
)
if custom_metrics:
output_custom_metrics.append(custom_metrics)
generation_resp.update(custom_metrics)
if self.stream_logs:
self._log_stream(text, metadata, self.step)
self.action_records.append(generation_resp)
self.on_llm_end_records.append(generation_resp)
self._log_text_metrics(output_complexity_metrics, step=self.step)
self._log_text_metrics(output_custom_metrics, step=self.step)
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
for chain_input_key, chain_input_val in inputs.items():
if isinstance(chain_input_val, str):
input_resp = deepcopy(resp)
if self.stream_logs:
self._log_stream(chain_input_val, resp, self.step)
input_resp.update({chain_input_key: chain_input_val})
self.action_records.append(input_resp)
else:
self.comet_ml.LOGGER.warning(
f"Unexpected data format provided! "
f"Input Value for {chain_input_key} will not be logged"
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_chain_end"})
resp.update(self.get_custom_callback_meta())
for chain_output_key, chain_output_val in outputs.items():
if isinstance(chain_output_val, str):
output_resp = deepcopy(resp)
if self.stream_logs:
self._log_stream(chain_output_val, resp, self.step)
output_resp.update({chain_output_key: chain_output_val})
self.action_records.append(output_resp)
else:
self.comet_ml.LOGGER.warning(
f"Unexpected data format provided! "
f"Output Value for {chain_output_key} will not be logged"
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp = self._init_resp()
resp.update({"action": "on_tool_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(input_str, resp, self.step)
resp.update({"input_str": input_str})
self.action_records.append(resp)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp = self._init_resp()
resp.update({"action": "on_tool_end"})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({"output": output})
self.action_records.append(resp)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp = self._init_resp()
resp.update({"action": "on_text"})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(text, resp, self.step)
resp.update({"text": text})
self.action_records.append(resp)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp = self._init_resp()
output = finish.return_values["output"]
log = finish.log
resp.update({"action": "on_agent_finish", "log": log})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(output, resp, self.step)
resp.update({"output": output})
self.action_records.append(resp)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
tool = action.tool
tool_input = str(action.tool_input)
log = action.log
resp = self._init_resp()
resp.update({"action": "on_agent_action", "log": log, "tool": tool})
resp.update(self.get_custom_callback_meta())
if self.stream_logs:
self._log_stream(tool_input, resp, self.step)
resp.update({"tool_input": tool_input})
self.action_records.append(resp)
def _get_complexity_metrics(self, text: str) -> dict:
"""Compute text complexity metrics using textstat.
Parameters:
text (str): The text to analyze.
Returns:
(dict): A dictionary containing the complexity metrics.
"""
resp = {}
if self.complexity_metrics:
text_complexity_metrics = _fetch_text_complexity_metrics(text)
resp.update(text_complexity_metrics)
return resp
def _get_custom_metrics(
self, generation: Generation, prompt_idx: int, gen_idx: int
) -> dict:
"""Compute Custom Metrics for an LLM Generated Output
Args:
generation (LLMResult): Output generation from an LLM
prompt_idx (int): List index of the input prompt
gen_idx (int): List index of the generated output
Returns:
dict: A dictionary containing the custom metrics.
"""
resp = {}
if self.custom_metrics:
custom_metrics = self.custom_metrics(generation, prompt_idx, gen_idx)
resp.update(custom_metrics)
return resp
def flush_tracker(
self,
langchain_asset: Any = None,
task_type: Optional[str] = "inference",
workspace: Optional[str] = None,
project_name: Optional[str] = "comet-langchain-demo",
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
finish: bool = False,
reset: bool = False,
) -> None:
"""Flush the tracker and setup the session.
Everything after this will be a new table.
Args:
name: Name of the performed session so far so it is identifiable
langchain_asset: The langchain asset to save.
finish: Whether to finish the run.
Returns:
None
"""
self._log_session(langchain_asset)
if langchain_asset:
try:
self._log_model(langchain_asset)
except Exception:
self.comet_ml.LOGGER.error(
"Failed to export agent or LLM to Comet",
exc_info=True,
extra={"show_traceback": True},
)
if finish:
self.experiment.end()
if reset:
self._reset(
task_type,
workspace,
project_name,
tags,
name,
visualizations,
complexity_metrics,
custom_metrics,
)
def _log_stream(self, prompt: str, metadata: dict, step: int) -> None:
self.experiment.log_text(prompt, metadata=metadata, step=step)
def _log_model(self, langchain_asset: Any) -> None:
model_parameters = self._get_llm_parameters(langchain_asset)
self.experiment.log_parameters(model_parameters, prefix="model")
langchain_asset_path = Path(self.temp_dir.name, "model.json")
model_name = self.name if self.name else LANGCHAIN_MODEL_NAME
try:
if hasattr(langchain_asset, "save"):
langchain_asset.save(langchain_asset_path)
self.experiment.log_model(model_name, str(langchain_asset_path))
except (ValueError, AttributeError, NotImplementedError) as e:
if hasattr(langchain_asset, "save_agent"):
langchain_asset.save_agent(langchain_asset_path)
self.experiment.log_model(model_name, str(langchain_asset_path))
else:
self.comet_ml.LOGGER.error(
f"{e}"
" Could not save Langchain Asset "
f"for {langchain_asset.__class__.__name__}"
)
def _log_session(self, langchain_asset: Optional[Any] = None) -> None:
try:
llm_session_df = self._create_session_analysis_dataframe(langchain_asset)
# Log the cleaned dataframe as a table
self.experiment.log_table("langchain-llm-session.csv", llm_session_df)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log session data to Comet",
exc_info=True,
extra={"show_traceback": True},
)
try:
metadata = {"gigachain_version": str(langchain.__version__)}
# Log the langchain low-level records as a JSON file directly
self.experiment.log_asset_data(
self.action_records, "langchain-action_records.json", metadata=metadata
)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log session data to Comet",
exc_info=True,
extra={"show_traceback": True},
)
try:
self._log_visualizations(llm_session_df)
except Exception:
self.comet_ml.LOGGER.warning(
"Failed to log visualizations to Comet",
exc_info=True,
extra={"show_traceback": True},
)
def _log_text_metrics(self, metrics: Sequence[dict], step: int) -> None:
if not metrics:
return
metrics_summary = _summarize_metrics_for_generated_outputs(metrics)
for key, value in metrics_summary.items():
self.experiment.log_metrics(value, prefix=key, step=step)
def _log_visualizations(self, session_df: Any) -> None:
if not (self.visualizations and self.nlp):
return
spacy = import_spacy()
prompts = session_df["prompts"].tolist()
outputs = session_df["text"].tolist()
for idx, (prompt, output) in enumerate(zip(prompts, outputs)):
doc = self.nlp(output)
sentence_spans = list(doc.sents)
for visualization in self.visualizations:
try:
html = spacy.displacy.render(
sentence_spans,
style=visualization,
options={"compact": True},
jupyter=False,
page=True,
)
self.experiment.log_asset_data(
html,
name=f"langchain-viz-{visualization}-{idx}.html",
metadata={"prompt": prompt},
step=idx,
)
except Exception as e:
self.comet_ml.LOGGER.warning(
e, exc_info=True, extra={"show_traceback": True}
)
return
def _reset(
self,
task_type: Optional[str] = None,
workspace: Optional[str] = None,
project_name: Optional[str] = None,
tags: Optional[Sequence] = None,
name: Optional[str] = None,
visualizations: Optional[List[str]] = None,
complexity_metrics: bool = False,
custom_metrics: Optional[Callable] = None,
) -> None:
_task_type = task_type if task_type else self.task_type
_workspace = workspace if workspace else self.workspace
_project_name = project_name if project_name else self.project_name
_tags = tags if tags else self.tags
_name = name if name else self.name
_visualizations = visualizations if visualizations else self.visualizations
_complexity_metrics = (
complexity_metrics if complexity_metrics else self.complexity_metrics
)
_custom_metrics = custom_metrics if custom_metrics else self.custom_metrics
self.__init__( # type: ignore
task_type=_task_type,
workspace=_workspace,
project_name=_project_name,
tags=_tags,
name=_name,
visualizations=_visualizations,
complexity_metrics=_complexity_metrics,
custom_metrics=_custom_metrics,
)
self.reset_callback_meta()
self.temp_dir = tempfile.TemporaryDirectory()
def _create_session_analysis_dataframe(self, langchain_asset: Any = None) -> dict:
pd = import_pandas()
llm_parameters = self._get_llm_parameters(langchain_asset)
num_generations_per_prompt = llm_parameters.get("n", 1)
llm_start_records_df = pd.DataFrame(self.on_llm_start_records)
# Repeat each input row based on the number of outputs generated per prompt
llm_start_records_df = llm_start_records_df.loc[
llm_start_records_df.index.repeat(num_generations_per_prompt)
].reset_index(drop=True)
llm_end_records_df = pd.DataFrame(self.on_llm_end_records)
llm_session_df = pd.merge(
llm_start_records_df,
llm_end_records_df,
left_index=True,
right_index=True,
suffixes=["_llm_start", "_llm_end"],
)
return llm_session_df
def _get_llm_parameters(self, langchain_asset: Any = None) -> dict:
if not langchain_asset:
return {}
try:
if hasattr(langchain_asset, "agent"):
llm_parameters = langchain_asset.agent.llm_chain.llm.dict()
elif hasattr(langchain_asset, "llm_chain"):
llm_parameters = langchain_asset.llm_chain.llm.dict()
elif hasattr(langchain_asset, "llm"):
llm_parameters = langchain_asset.llm.dict()
else:
llm_parameters = langchain_asset.dict()
except Exception:
return {}
return llm_parameters
| [
"n"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~arcee.py | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BaseRetriever
from langchain.utilities.arcee import ArceeWrapper, DALMFilter
from langchain.utils import get_from_dict_or_env
class ArceeRetriever(BaseRetriever):
"""Document retriever for Arcee's Domain Adapted Language Models (DALMs).
To use, set the ``ARCEE_API_KEY`` environment variable with your Arcee API key,
or pass ``arcee_api_key`` as a named parameter.
Example:
.. code-block:: python
from langchain.retrievers import ArceeRetriever
retriever = ArceeRetriever(
model="DALM-PubMed",
arcee_api_key="ARCEE-API-KEY"
)
documents = retriever.get_relevant_documents("AI-driven music therapy")
"""
_client: Optional[ArceeWrapper] = None #: :meta private:
"""Arcee client."""
arcee_api_key: str = ""
"""Arcee API Key"""
model: str
"""Arcee DALM name"""
arcee_api_url: str = "https://api.arcee.ai"
"""Arcee API URL"""
arcee_api_version: str = "v2"
"""Arcee API Version"""
arcee_app_url: str = "https://app.arcee.ai"
"""Arcee App URL"""
model_kwargs: Optional[Dict[str, Any]] = None
"""Keyword arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
underscore_attrs_are_private = True
def __init__(self, **data: Any) -> None:
"""Initializes private fields."""
super().__init__(**data)
self._client = ArceeWrapper(
arcee_api_key=self.arcee_api_key,
arcee_api_url=self.arcee_api_url,
arcee_api_version=self.arcee_api_version,
model_kwargs=self.model_kwargs,
model_name=self.model,
)
self._client.validate_model_training_status()
@root_validator()
def validate_environments(cls, values: Dict) -> Dict:
"""Validate Arcee environment variables."""
# validate env vars
values["arcee_api_key"] = get_from_dict_or_env(
values,
"arcee_api_key",
"ARCEE_API_KEY",
)
values["arcee_api_url"] = get_from_dict_or_env(
values,
"arcee_api_url",
"ARCEE_API_URL",
)
values["arcee_app_url"] = get_from_dict_or_env(
values,
"arcee_app_url",
"ARCEE_APP_URL",
)
values["arcee_api_version"] = get_from_dict_or_env(
values,
"arcee_api_version",
"ARCEE_API_VERSION",
)
# validate model kwargs
if values["model_kwargs"]:
kw = values["model_kwargs"]
# validate size
if kw.get("size") is not None:
if not kw.get("size") >= 0:
raise ValueError("`size` must not be negative.")
# validate filters
if kw.get("filters") is not None:
if not isinstance(kw.get("filters"), List):
raise ValueError("`filters` must be a list.")
for f in kw.get("filters"):
DALMFilter(**f)
return values
def _get_relevant_documents(
self, query: str, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
"""Retrieve {size} contexts with your retriever for a given query
Args:
query: Query to submit to the model
size: The max number of context results to retrieve.
Defaults to 3. (Can be less if filters are provided).
filters: Filters to apply to the context dataset.
"""
try:
if not self._client:
raise ValueError("Client is not initialized.")
return self._client.retrieve(query=query, **kwargs)
except Exception as e:
raise ValueError(f"Error while retrieving documents: {e}") from e
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~pal_chain~colored_object_prompt.py | # flake8: noqa
from langchain.prompts.prompt import PromptTemplate
template = (
"""
# Генерация кода Python3 для решения задач
# Q: На тумбочке находятся красный карандаш, фиолетовая кружка, бордовый брелок, розовый мишка, черная тарелка и синий мяч для снятия стресса. Какого цвета мяч для снятия стресса?
# Поместим объекты в словарь для быстрого поиска
objects = dict()
objects['pencil'] = 'red'
objects['mug'] = 'purple'
objects['keychain'] = 'burgundy'
objects['teddy bear'] = 'fuchsia'
objects['plate'] = 'black'
objects['stress ball'] = 'blue'
# Найдем цвет мяча для снятия стресса
stress_ball_color = objects['stress ball']
answer = stress_ball_color
# Q: На столе вы видите ряд объектов: фиолетовую скрепку, розовый мяч для снятия стресса, коричневый брелок, зеленый зарядный кабель, малиновый спиннер и бордовую ручку. Какого цвета объект сразу справа от мяча для снятия стресса?
# Поместим объекты в список для сохранения порядка
objects = []
objects += [('paperclip', 'purple')] * 1
objects += [('stress ball', 'pink')] * 1
objects += [('keychain', 'brown')] * 1
objects += [('scrunchiephone charger', 'green')] * 1
objects += [('fidget spinner', 'mauve')] * 1
objects += [('pen', 'burgundy')] * 1
# Найдем индекс мяча для снятия стресса
stress_ball_idx = None
for i, object in enumerate(objects):
if object[0] == 'stress ball':
stress_ball_idx = i
break
# Найдем объект сразу справа
direct_right = objects[i+1]
# Проверим цвет объекта справа
direct_right_color = direct_right[1]
answer = direct_right_color
# Q: На тумбочке вы видите следующие предметы, расположенные в ряд: бирюзовую тарелку, бордовый брелок, желтый зарядный кабель, оранжевую кружку, розовую тетрадь и серую чашку. Сколько неоранжевых предметов вы видите слева от бирюзового предмета?
# Поместим объекты в список для сохранения порядка
objects = []
objects += [('plate', 'teal')] * 1
objects += [('keychain', 'burgundy')] * 1
objects += [('scrunchiephone charger', 'yellow')] * 1
objects += [('mug', 'orange')] * 1
objects += [('notebook', 'pink')] * 1
objects += [('cup', 'grey')] * 1
# Найдем индекс бирюзового предмета
teal_idx = None
for i, object in enumerate(objects):
if object[1] == 'teal':
teal_idx = i
break
# Найдем неоранжевые предметы слева от бирюзового предмета
non_orange = [object for object in objects[:i] if object[1] != 'orange']
# Подсчитаем количество неоранжевых предметов
num_non_orange = len(non_orange)
answer = num_non_orange
# Q: {question}
""".strip()
+ "\n"
)
COLORED_OBJECT_PROMPT = PromptTemplate(input_variables=["question"], template=template)
| [
"question",
"# Генерация кода Python3 для решения задач\n# Q: На тумбочке находятся красный карандаш, фиолетовая кружка, бордовый брелок, розовый мишка, черная тарелка и синий мяч для снятия стресса. Какого цвета мяч для снятия стресса?\n# Поместим объекты в словарь для быстрого поиска\nobjects = dict()\nobjects['pencil'] = 'red'\nobjects['mug'] = 'purple'\nobjects['keychain'] = 'burgundy'\nobjects['teddy bear'] = 'fuchsia'\nobjects['plate'] = 'black'\nobjects['stress ball'] = 'blue'\n\n# Найдем цвет мяча для снятия стресса\nstress_ball_color = objects['stress ball']\nanswer = stress_ball_color\n\n\n# Q: На столе вы видите ряд объектов: фиолетовую скрепку, розовый мяч для снятия стресса, коричневый брелок, зеленый зарядный кабель, малиновый спиннер и бордовую ручку. Какого цвета объект сразу справа от мяча для снятия стресса?\n# Поместим объекты в список для сохранения порядка\nobjects = []\nobjects += [('paperclip', 'purple')] * 1\nobjects += [('stress ball', 'pink')] * 1\nobjects += [('keychain', 'brown')] * 1\nobjects += [('scrunchiephone charger', 'green')] * 1\nobjects += [('fidget spinner', 'mauve')] * 1\nobjects += [('pen', 'burgundy')] * 1\n\n# Найдем индекс мяча для снятия стресса\nstress_ball_idx = None\nfor i, object in enumerate(objects):\n if object[0] == 'stress ball':\n stress_ball_idx = i\n break\n\n# Найдем объект сразу справа\ndirect_right = objects[i+1]\n\n# Проверим цвет объекта справа\ndirect_right_color = direct_right[1]\nanswer = direct_right_color\n\n\n# Q: На тумбочке вы видите следующие предметы, расположенные в ряд: бирюзовую тарелку, бордовый брелок, желтый зарядный кабель, оранжевую кружку, розовую тетрадь и серую чашку. Сколько неоранжевых предметов вы видите слева от бирюзового предмета?\n# Поместим объекты в список для сохранения порядка\nobjects = []\nobjects += [('plate', 'teal')] * 1\nobjects += [('keychain', 'burgundy')] * 1\nobjects += [('scrunchiephone charger', 'yellow')] * 1\nobjects += [('mug', 'orange')] * 1\nobjects += [('notebook', 'pink')] * 1\nobjects += [('cup', 'grey')] * 1\n\n# Найдем индекс бирюзового предмета\nteal_idx = None\nfor i, object in enumerate(objects):\n if object[1] == 'teal':\n teal_idx = i\n break\n\n# Найдем неоранжевые предметы слева от бирюзового предмета\nnon_orange = [object for object in objects[:i] if object[1] != 'orange']\n\n# Подсчитаем количество неоранжевых предметов\nnum_non_orange = len(non_orange)\nanswer = num_non_orange\n\n\n# Q: {question}\n"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~searx_search.py | """Utility for using SearxNG meta search API.
SearxNG is a privacy-friendly free metasearch engine that aggregates results from
`multiple search engines
<https://docs.searxng.org/admin/engines/configured_engines.html>`_ and databases and
supports the `OpenSearch
<https://github.com/dewitt/opensearch/blob/master/opensearch-1-1-draft-6.md>`_
specification.
More details on the installation instructions `here. <../../integrations/searx.html>`_
For the search API refer to https://docs.searxng.org/dev/search_api.html
Quick Start
-----------
In order to use this utility you need to provide the searx host. This can be done
by passing the named parameter :attr:`searx_host <SearxSearchWrapper.searx_host>`
or exporting the environment variable SEARX_HOST.
Note: this is the only required parameter.
Then create a searx search instance like this:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
# when the host starts with `http` SSL is disabled and the connection
# is assumed to be on a private network
searx_host='http://self.hosted'
search = SearxSearchWrapper(searx_host=searx_host)
You can now use the ``search`` instance to query the searx API.
Searching
---------
Use the :meth:`run() <SearxSearchWrapper.run>` and
:meth:`results() <SearxSearchWrapper.results>` methods to query the searx API.
Other methods are available for convenience.
:class:`SearxResults` is a convenience wrapper around the raw json result.
Example usage of the ``run`` method to make a search:
.. code-block:: python
s.run(query="what is the best search engine?")
Engine Parameters
-----------------
You can pass any `accepted searx search API
<https://docs.searxng.org/dev/search_api.html>`_ parameters to the
:py:class:`SearxSearchWrapper` instance.
In the following example we are using the
:attr:`engines <SearxSearchWrapper.engines>` and the ``language`` parameters:
.. code-block:: python
# assuming the searx host is set as above or exported as an env variable
s = SearxSearchWrapper(engines=['google', 'bing'],
language='es')
Search Tips
-----------
Searx offers a special
`search syntax <https://docs.searxng.org/user/index.html#search-syntax>`_
that can also be used instead of passing engine parameters.
For example the following query:
.. code-block:: python
s = SearxSearchWrapper("langchain library", engines=['github'])
# can also be written as:
s = SearxSearchWrapper("langchain library !github")
# or even:
s = SearxSearchWrapper("langchain library !gh")
In some situations you might want to pass an extra string to the search query.
For example when the `run()` method is called by an agent. The search suffix can
also be used as a way to pass extra parameters to searx or the underlying search
engines.
.. code-block:: python
# select the github engine and pass the search suffix
s = SearchWrapper("langchain library", query_suffix="!gh")
s = SearchWrapper("langchain library")
# select github the conventional google search syntax
s.run("large language models", query_suffix="site:github.com")
*NOTE*: A search suffix can be defined on both the instance and the method level.
The resulting query will be the concatenation of the two with the former taking
precedence.
See `SearxNG Configured Engines
<https://docs.searxng.org/admin/engines/configured_engines.html>`_ and
`SearxNG Search Syntax <https://docs.searxng.org/user/index.html#id1>`_
for more details.
Notes
-----
This wrapper is based on the SearxNG fork https://github.com/searxng/searxng which is
better maintained than the original Searx project and offers more features.
Public searxNG instances often use a rate limiter for API usage, so you might want to
use a self hosted instance and disable the rate limiter.
If you are self-hosting an instance you can customize the rate limiter for your
own network as described
`here <https://docs.searxng.org/src/searx.botdetection.html#limiter-src>`_.
For a list of public SearxNG instances see https://searx.space/
"""
import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain.pydantic_v1 import (
BaseModel,
Extra,
Field,
PrivateAttr,
root_validator,
validator,
)
from langchain.utils import get_from_dict_or_env
def _get_default_params() -> dict:
return {"language": "en", "format": "json"}
class SearxResults(dict):
"""Dict like wrapper around search api results."""
_data: str = ""
def __init__(self, data: str):
"""Take a raw result from Searx and make it into a dict like object."""
json_data = json.loads(data)
super().__init__(json_data)
self.__dict__ = self
def __str__(self) -> str:
"""Text representation of searx result."""
return self._data
@property
def results(self) -> Any:
"""Silence mypy for accessing this field.
:meta private:
"""
return self.get("results")
@property
def answers(self) -> Any:
"""Helper accessor on the json result."""
return self.get("answers")
class SearxSearchWrapper(BaseModel):
"""Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
``searx_host`` or exporting the environment variable ``SEARX_HOST``.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
``unsecure``. You can also pass the host url scheme as ``http`` to disable SSL.
Example:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
"""
_result: SearxResults = PrivateAttr()
searx_host: str = ""
unsecure: bool = False
params: dict = Field(default_factory=_get_default_params)
headers: Optional[dict] = None
engines: Optional[List[str]] = []
categories: Optional[List[str]] = []
query_suffix: Optional[str] = ""
k: int = 10
aiosession: Optional[Any] = None
@validator("unsecure")
def disable_ssl_warnings(cls, v: bool) -> bool:
"""Disable SSL warnings."""
if v:
# requests.urllib3.disable_warnings()
try:
import urllib3
urllib3.disable_warnings()
except ImportError as e:
print(e)
return v
@root_validator()
def validate_params(cls, values: Dict) -> Dict:
"""Validate that custom searx params are merged with default ones."""
user_params = values["params"]
default = _get_default_params()
values["params"] = {**default, **user_params}
engines = values.get("engines")
if engines:
values["params"]["engines"] = ",".join(engines)
categories = values.get("categories")
if categories:
values["params"]["categories"] = ",".join(categories)
searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST")
if not searx_host.startswith("http"):
print(
f"Warning: missing the url scheme on host \
! assuming secure https://{searx_host} "
)
searx_host = "https://" + searx_host
elif searx_host.startswith("http://"):
values["unsecure"] = True
cls.disable_ssl_warnings(True)
values["searx_host"] = searx_host
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _searx_api_query(self, params: dict) -> SearxResults:
"""Actual request to searx API."""
raw_result = requests.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
)
# test if http result is ok
if not raw_result.ok:
raise ValueError("Searx API returned an error: ", raw_result.text)
res = SearxResults(raw_result.text)
self._result = res
return res
async def _asearx_api_query(self, params: dict) -> SearxResults:
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(
self.searx_host,
headers=self.headers,
params=params,
ssl=(lambda: False if self.unsecure else None)(),
) as response:
if not response.ok:
raise ValueError("Searx API returned an error: ", response.text)
result = SearxResults(await response.text())
self._result = result
else:
async with self.aiosession.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
) as response:
if not response.ok:
raise ValueError("Searx API returned an error: ", response.text)
result = SearxResults(await response.text())
self._result = result
return result
def run(
self,
query: str,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
str: The result of the query.
Raises:
ValueError: If an error occurred with the query.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
res = self._searx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]])
else:
toret = "No good search result found"
return toret
async def arun(
self,
query: str,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Asynchronously version of `run`."""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
res = await self._asearx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]])
else:
toret = "No good search result found"
return toret
def results(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
results = self._searx_api_query(params).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
async def aresults(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Asynchronously query with json results.
Uses aiohttp. See `results` for more info.
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
results = (await self._asearx_api_query(params)).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~minimax.py | """Wrapper around Minimax chat models."""
import logging
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.minimax import MinimaxCommon
from langchain.llms.utils import enforce_stop_tokens
from langchain.schema import (
AIMessage,
BaseMessage,
ChatResult,
HumanMessage,
)
logger = logging.getLogger(__name__)
def _parse_message(msg_type: str, text: str) -> Dict:
return {"sender_type": msg_type, "text": text}
def _parse_chat_history(history: List[BaseMessage]) -> List:
"""Parse a sequence of messages into history."""
chat_history = []
for message in history:
if isinstance(message, HumanMessage):
chat_history.append(_parse_message("USER", message.content))
if isinstance(message, AIMessage):
chat_history.append(_parse_message("BOT", message.content))
return chat_history
class MiniMaxChat(MinimaxCommon, BaseChatModel):
"""Wrapper around Minimax large language models.
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
the constructor.
Example:
.. code-block:: python
from langchain.chat_models import MiniMaxChat
llm = MiniMaxChat(model_name="abab5-chat")
"""
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if not messages:
raise ValueError(
"You should provide at least one message to start the chat!"
)
history = _parse_chat_history(messages)
payload = self._default_params
payload["messages"] = history
text = self._client.post(payload)
# This is required since the stop are not enforced by the model parameters
return text if stop is None else enforce_stop_tokens(text, stop)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
raise NotImplementedError(
"""Minimax AI doesn't support async requests at the moment."""
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~llms~replicate.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~edenai~audio_speech_to_text.py | from __future__ import annotations
import json
import logging
import time
from typing import List, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import validator
from langchain.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiSpeechToTextTool(EdenaiTool):
"""Tool that queries the Eden AI Speech To Text API.
for api reference check edenai documentation:
https://app.edenai.run/bricks/speech/asynchronous-speech-to-text.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
edenai_api_key: Optional[str] = None
name = "edenai_speech_to_text"
description = (
"A wrapper around edenai Services speech to text "
"Useful for when you have to convert audio to text."
"Input should be a url to an audio file."
)
is_async = True
language: Optional[str] = "en"
speakers: Optional[int]
profanity_filter: bool = False
custom_vocabulary: Optional[List[str]]
feature: str = "audio"
subfeature: str = "speech_to_text_async"
base_url = "https://api.edenai.run/v2/audio/speech_to_text_async/"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
def _wait_processing(self, url: str) -> requests.Response:
for _ in range(10):
time.sleep(1)
audio_analysis_result = self._get_edenai(url)
temp = audio_analysis_result.json()
if temp["status"] == "finished":
if temp["results"][self.providers[0]]["error"] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
{temp['results'][self.providers[0]]['error']}"""
)
else:
return audio_analysis_result
raise Exception("Edenai speech to text job id processing Timed out")
def _parse_response(self, response: dict) -> str:
return response["public_id"]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"file_url": query,
"language": self.language,
"speakers": self.speakers,
"profanity_filter": self.profanity_filter,
"custom_vocabulary": self.custom_vocabulary,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
job_id = self._call_eden_ai(query_params)
url = self.base_url + job_id
audio_analysis_result = self._wait_processing(url)
result = audio_analysis_result.text
formatted_text = json.loads(result)
return formatted_text["results"][self.providers[0]]["text"]
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~comprehend_moderation~base_moderation.py | import uuid
from typing import Any, Callable, Dict, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import AIMessage, HumanMessage
from langchain_experimental.comprehend_moderation.intent import ComprehendIntent
from langchain_experimental.comprehend_moderation.pii import ComprehendPII
from langchain_experimental.comprehend_moderation.toxicity import ComprehendToxicity
class BaseModeration:
def __init__(
self,
client: Any,
config: Optional[Dict[str, Any]] = None,
moderation_callback: Optional[Any] = None,
unique_id: Optional[str] = None,
run_manager: Optional[CallbackManagerForChainRun] = None,
):
self.client = client
self.config = config
self.moderation_callback = moderation_callback
self.unique_id = unique_id
self.chat_message_index = 0
self.run_manager = run_manager
self.chain_id = str(uuid.uuid4())
def _convert_prompt_to_text(self, prompt: Any) -> str:
input_text = str()
if isinstance(prompt, StringPromptValue):
input_text = prompt.text
elif isinstance(prompt, str):
input_text = prompt
elif isinstance(prompt, ChatPromptValue):
"""
We will just check the last message in the message Chain of a
ChatPromptTemplate. The typical chronology is
SystemMessage > HumanMessage > AIMessage and so on. However assuming
that with every chat the chain is invoked we will only check the last
message. This is assuming that all previous messages have been checked
already. Only HumanMessage and AIMessage will be checked. We can perhaps
loop through and take advantage of the additional_kwargs property in the
HumanMessage and AIMessage schema to mark messages that have been moderated.
However that means that this class could generate multiple text chunks
and moderate() logics would need to be updated. This also means some
complexity in re-constructing the prompt while keeping the messages in
sequence.
"""
message = prompt.messages[-1]
self.chat_message_index = len(prompt.messages) - 1
if isinstance(message, HumanMessage):
input_text = message.content
if isinstance(message, AIMessage):
input_text = message.content
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
return input_text
def _convert_text_to_prompt(self, prompt: Any, text: str) -> Any:
if isinstance(prompt, StringPromptValue):
return StringPromptValue(text=text)
elif isinstance(prompt, str):
return text
elif isinstance(prompt, ChatPromptValue):
# Copy the messages because we may need to mutate them.
# We don't want to mutate data we don't own.
messages = list(prompt.messages)
message = messages[self.chat_message_index]
if isinstance(message, HumanMessage):
messages[self.chat_message_index] = HumanMessage(
content=text,
example=message.example,
additional_kwargs=message.additional_kwargs,
)
if isinstance(message, AIMessage):
messages[self.chat_message_index] = AIMessage(
content=text,
example=message.example,
additional_kwargs=message.additional_kwargs,
)
return ChatPromptValue(messages=messages)
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
def _moderation_class(self, moderation_class: Any) -> Callable:
return moderation_class(
client=self.client,
callback=self.moderation_callback,
unique_id=self.unique_id,
chain_id=self.chain_id,
).validate
def _log_message_for_verbose(self, message: str) -> None:
if self.run_manager:
self.run_manager.on_text(message)
def moderate(self, prompt: Any) -> str:
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( # noqa: E501
ModerationIntentionError,
ModerationPiiError,
ModerationToxicityError,
)
try:
# convert prompt to text
input_text = self._convert_prompt_to_text(prompt=prompt)
output_text = str()
# perform moderation
if self.config is None:
# In absence of config Action will default to STOP only
self._log_message_for_verbose("Running pii validation...\n")
pii_validate = self._moderation_class(moderation_class=ComprehendPII)
output_text = pii_validate(prompt_value=input_text)
self._log_message_for_verbose("Running toxicity validation...\n")
toxicity_validate = self._moderation_class(
moderation_class=ComprehendToxicity
)
output_text = toxicity_validate(prompt_value=output_text)
self._log_message_for_verbose("Running intent validation...\n")
intent_validate = self._moderation_class(
moderation_class=ComprehendIntent
)
output_text = intent_validate(prompt_value=output_text)
else:
filter_functions = {
"pii": ComprehendPII,
"toxicity": ComprehendToxicity,
"intent": ComprehendIntent,
}
filters = self.config["filters"]
for _filter in filters:
filter_name = f"{_filter}"
if filter_name in filter_functions:
self._log_message_for_verbose(
f"Running {filter_name} Validation...\n"
)
validation_fn = self._moderation_class(
moderation_class=filter_functions[filter_name]
)
input_text = input_text if not output_text else output_text
output_text = validation_fn(
prompt_value=input_text,
config=self.config[filter_name]
if filter_name in self.config
else None,
)
# convert text to prompt and return
return self._convert_text_to_prompt(prompt=prompt, text=output_text)
except ModerationPiiError as e:
self._log_message_for_verbose(f"Found PII content..stopping..\n{str(e)}\n")
raise e
except ModerationToxicityError as e:
self._log_message_for_verbose(
f"Found Toxic content..stopping..\n{str(e)}\n"
)
raise e
except ModerationIntentionError as e:
self._log_message_for_verbose(
f"Found Harmful intention..stopping..\n{str(e)}\n"
)
raise e
except Exception as e:
raise e
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~retrievers~time_weighted_retriever.py | import datetime
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, Document
from langchain.schema.vectorstore import VectorStore
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float:
"""Get the hours passed between two datetimes."""
return (time - ref_time).total_seconds() / 3600
class TimeWeightedVectorStoreRetriever(BaseRetriever):
"""Retriever that combines embedding similarity with
recency in retrieving values."""
vectorstore: VectorStore
"""The vectorstore to store documents and determine salience."""
search_kwargs: dict = Field(default_factory=lambda: dict(k=100))
"""Keyword arguments to pass to the vectorstore similarity search."""
# TODO: abstract as a queue
memory_stream: List[Document] = Field(default_factory=list)
"""The memory_stream of documents to search through."""
decay_rate: float = Field(default=0.01)
"""The exponential decay factor used as (1.0-decay_rate)**(hrs_passed)."""
k: int = 4
"""The maximum number of documents to retrieve in a given call."""
other_score_keys: List[str] = []
"""Other keys in the metadata to factor into the score, e.g. 'importance'."""
default_salience: Optional[float] = None
"""The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _document_get_date(self, field: str, document: Document) -> datetime.datetime:
"""Return the value of the date field of a document."""
if field in document.metadata:
if isinstance(document.metadata[field], float):
return datetime.datetime.fromtimestamp(document.metadata[field])
return document.metadata[field]
return datetime.datetime.now()
def _get_combined_score(
self,
document: Document,
vector_relevance: Optional[float],
current_time: datetime.datetime,
) -> float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(
current_time,
document.metadata["last_accessed_at"],
)
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [
(doc, self._get_combined_score(doc, relevance, current_time))
for doc, relevance in docs_and_scores.values()
]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
# Ensure frequently accessed memories aren't forgotten
for doc, _ in rescored_docs[: self.k]:
# TODO: Update vector store doc once `update` method is exposed.
buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]]
buffered_doc.metadata["last_accessed_at"] = current_time
result.append(buffered_doc)
return result
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return self.vectorstore.add_documents(dup_docs, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~xml.py | """Loads Microsoft Excel files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredXMLLoader(UnstructuredFileLoader):
"""Load `XML` file using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredXMLLoader
loader = UnstructuredXMLLoader(
"example.xml", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-xml
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
validate_unstructured_version(min_unstructured_version="0.6.7")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.xml import partition_xml
return partition_xml(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_transformers~doctran_text_translate.py | from typing import Any, Optional, Sequence
from langchain.schema import BaseDocumentTransformer, Document
from langchain.utils import get_from_env
class DoctranTextTranslator(BaseDocumentTransformer):
"""Translate text documents using doctran.
Arguments:
openai_api_key: OpenAI API key. Can also be specified via environment variable
``OPENAI_API_KEY``.
language: The language to translate *to*.
Example:
.. code-block:: python
from langchain.document_transformers import DoctranTextTranslator
# Pass in openai_api_key or set env var OPENAI_API_KEY
qa_translator = DoctranTextTranslator(language="spanish")
translated_document = await qa_translator.atransform_documents(documents)
"""
def __init__(
self,
openai_api_key: Optional[str] = None,
language: str = "english",
openai_api_model: Optional[str] = None,
) -> None:
self.openai_api_key = openai_api_key or get_from_env(
"openai_api_key", "OPENAI_API_KEY"
)
self.openai_api_model = openai_api_model or get_from_env(
"openai_api_model", "OPENAI_API_MODEL"
)
self.language = language
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
raise NotImplementedError
async def atransform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Translates text documents using doctran."""
try:
from doctran import Doctran
doctran = Doctran(
openai_api_key=self.openai_api_key, openai_model=self.openai_api_model
)
except ImportError:
raise ImportError(
"Install doctran to use this parser. (pip install doctran)"
)
doctran_docs = [
doctran.parse(content=doc.page_content, metadata=doc.metadata)
for doc in documents
]
for i, doc in enumerate(doctran_docs):
doctran_docs[i] = await doc.translate(language=self.language).execute()
return [
Document(page_content=doc.transformed_content, metadata=doc.metadata)
for doc in doctran_docs
]
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~excel.py | """Loads Microsoft Excel files."""
from typing import Any, List
from langchain.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredExcelLoader(UnstructuredFileLoader):
"""Load Microsoft Excel files using `Unstructured`.
Like other
Unstructured loaders, UnstructuredExcelLoader can be used in both
"single" and "elements" mode. If you use the loader in "elements"
mode, each sheet in the Excel file will be a an Unstructured Table
element. If you use the loader in "elements" mode, an
HTML representation of the table will be available in the
"text_as_html" key in the document metadata.
Examples
--------
from langchain.document_loaders.excel import UnstructuredExcelLoader
loader = UnstructuredExcelLoader("stanley-cups.xlsd", mode="elements")
docs = loader.load()
"""
def __init__(
self, file_path: str, mode: str = "single", **unstructured_kwargs: Any
):
"""
Args:
file_path: The path to the Microsoft Excel file.
mode: The mode to use when partitioning the file. See unstructured docs
for more info. Optional. Defaults to "single".
**unstructured_kwargs: Keyword arguments to pass to unstructured.
"""
validate_unstructured_version(min_unstructured_version="0.6.7")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.xlsx import partition_xlsx
return partition_xlsx(filename=self.file_path, **self.unstructured_kwargs)
| [] |
2024-01-10 | ai-forever/gigachain | libs~external_tools~translator.py | """Тул для автоматического перевода промптов
библиотеки LangChain с помощью OpenAI API."""
import ast
import os
import time
import openai
import tiktoken
IGNORED_DIRS = {
"venv",
".venv",
"hub",
".github",
".mypy_cache",
".ruff_cache",
"build",
".git",
"__pycache__",
"tests",
"venv_clear",
"docs",
"external_tools",
}
enc = tiktoken.get_encoding("cl100k_base")
ALREADY_PROCESSED_STORE = "libs/external_tools/translator_processed.txt"
def translate_to_russian(text):
# Restart after pause in case of openai.error.RateLimitError
try:
messages = [
{
"role": "system",
"content": """Я отправлю тебе программу на python или текстовый файл. В этой программе или тексте могут быть запросы (промпты) к большой языковой модели. Нужно перевести их на русский.
В промпте могут содержаться плейсхолдеры в фигурных скобках, например {question} или {answer}. Это нормально, их нужно сохранить без перевода.
Всю остальную программу нужно оставить как есть. Если в программе нет промптов, то нужно просто переписать её полностью. Больше ничего не надо выводить - только код с перевеёнными промптами.
Не переводи комментарии в коде, не переводи docstring! Не переводи строки, которые не похожи на запросы или части запросов, например названия полей, имена ключей в словарях и тому подобное. Если ты не уверен, что это промпт, то лучше вообще не переводи.
Если в файле нет ни одного промпта, верни "NO" без каких-либо пояснений. Общайся на `ты`, а не на `вы`, например `сделай`, а не `сделайте`. В промптах обращение к сети обязательно должно быть на "ты".
Ты должен вернуть полный код программы, которую тебе прислали без сокращений или дополнительных пояснений или своих комментариев. Сразу пиши код.
Не пиши в начале фразу "Код программы" и тому подобное. Начинай сразу с кода, первым словом в твоем ответе должна сразу быть программа""", # noqa: E501
},
{"role": "user", "content": text},
]
# Use tiktoken to check text size
if len(enc.encode(text)) > 3500:
with open(ALREADY_PROCESSED_STORE, "a", encoding="utf-8") as f:
f.write("File is too big:\n")
return text
completion = openai.ChatCompletion.create(
model="gpt-4", messages=messages, temperature=0.0, max_tokens=4500
)
translated_text = completion["choices"][0]["message"]["content"]
if translated_text.startswith("NO"):
return text
return translated_text
except Exception as ex:
print("Exception occurred: ", ex)
print("Rate limit error. Restarting in (30s)...")
time.sleep(30)
return translate_to_russian(text)
def is_russian(s):
return any(["а" <= ch <= "я" for ch in s.lower()])
ERROR_PHRASES = {
"error",
"exception",
"failed",
"cannot",
"unable",
"not found",
"invalid",
"unexpected",
"could not",
"please report",
"stop sequences found ",
"select ",
}
def is_not_error_message(s):
return all(phrase not in s.lower() for phrase in ERROR_PHRASES)
def set_parent(node):
for child in ast.iter_child_nodes(node):
child.parent = node
set_parent(child)
def process_file(file_path):
try:
# Check file is not processed yet. Create it if not exists
if not os.path.exists(ALREADY_PROCESSED_STORE):
with open(ALREADY_PROCESSED_STORE, "w", encoding="utf-8") as f:
f.write("")
with open(ALREADY_PROCESSED_STORE, "r", encoding="utf-8") as f:
processed = f.read().splitlines()
if file_path in processed:
return False
with open(file_path, "r", encoding="utf-8") as f:
source = f.read()
if (file_path.endswith("stuff_prompt.py")) and (
"prompt" in source
or file_path.endswith(".txt")
or "template = " in source.lower()
or "PREFIX = " in source
):
print(f"Found file: {file_path}")
print(f"Source: {source}\n\n")
print("Do you see prompts here? (y/n)")
answer = input()
if answer.lower() != "n":
translated = translate_to_russian(source)
if not translated.endswith("\n"):
translated += "\n"
with open(file_path, "w", encoding="utf-8") as f:
f.write(translated)
# Save file to processed list
with open(ALREADY_PROCESSED_STORE, "a", encoding="utf-8") as f:
f.write(file_path + "\n")
pass
else:
with open(ALREADY_PROCESSED_STORE, "a", encoding="utf-8") as f:
f.write("No prompts:" + "\n" + file_path + "\n")
return True
except UnicodeDecodeError:
pass
return False
def main(directory):
total = 0
for root, dirs, files in os.walk(directory):
# Игнорируем ненужные директории
dirs[:] = [d for d in dirs if d not in IGNORED_DIRS]
for file in files:
if file.endswith(".py") or file.endswith(".txt"):
if process_file(os.path.join(root, file)):
total += 1
print(f"Total files: {total}")
if __name__ == "__main__":
main(".")
| [
"Я отправлю тебе программу на python или текстовый файл. В этой программе или тексте могут быть запросы (промпты) к большой языковой модели. Нужно перевести их на русский.\n В промпте могут содержаться плейсхолдеры в фигурных скобках, например {question} или {answer}. Это нормально, их нужно сохранить без перевода.\n Всю остальную программу нужно оставить как есть. Если в программе нет промптов, то нужно просто переписать её полностью. Больше ничего не надо выводить - только код с перевеёнными промптами.\n Не переводи комментарии в коде, не переводи docstring! Не переводи строки, которые не похожи на запросы или части запросов, например названия полей, имена ключей в словарях и тому подобное. Если ты не уверен, что это промпт, то лучше вообще не переводи.\n Если в файле нет ни одного промпта, верни \"NO\" без каких-либо пояснений. Общайся на `ты`, а не на `вы`, например `сделай`, а не `сделайте`. В промптах обращение к сети обязательно должно быть на \"ты\".\n Ты должен вернуть полный код программы, которую тебе прислали без сокращений или дополнительных пояснений или своих комментариев. Сразу пиши код.\n Не пиши в начале фразу \"Код программы\" и тому подобное. Начинай сразу с кода, первым словом в твоем ответе должна сразу быть программа"
] |
2024-01-10 | ai-forever/gigachain | libs~langchain~tests~integration_tests~storage~test_upstash_redis.py | """Implement integration tests for Redis storage."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain.storage.upstash_redis import UpstashRedisStore
if TYPE_CHECKING:
from upstash_redis import Redis
pytest.importorskip("upstash_redis")
URL = "<UPSTASH_REDIS_REST_URL>"
TOKEN = "<UPSTASH_REDIS_REST_TOKEN>"
@pytest.fixture
def redis_client() -> Redis:
"""Yield redis client."""
from upstash_redis import Redis
# This fixture flushes the database!
client = Redis(url=URL, token=TOKEN)
try:
client.ping()
except Exception:
pytest.skip("Ping request failed. Verify that credentials are correct.")
client.flushdb()
return client
def test_mget(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None)
keys = ["key1", "key2"]
redis_client.mset({"key1": "value1", "key2": "value2"})
result = store.mget(keys)
assert result == ["value1", "value2"]
def test_mset(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None)
key_value_pairs = [("key1", "value1"), ("key2", "value2")]
store.mset(key_value_pairs)
result = redis_client.mget("key1", "key2")
assert result == ["value1", "value2"]
def test_mdelete(redis_client: Redis) -> None:
"""Test that deletion works as expected."""
store = UpstashRedisStore(client=redis_client, ttl=None)
keys = ["key1", "key2"]
redis_client.mset({"key1": "value1", "key2": "value2"})
store.mdelete(keys)
result = redis_client.mget(*keys)
assert result == [None, None]
def test_yield_keys(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None)
redis_client.mset({"key1": "value2", "key2": "value2"})
assert sorted(store.yield_keys()) == ["key1", "key2"]
assert sorted(store.yield_keys(prefix="key*")) == ["key1", "key2"]
assert sorted(store.yield_keys(prefix="lang*")) == []
def test_namespace(redis_client: Redis) -> None:
store = UpstashRedisStore(client=redis_client, ttl=None, namespace="meow")
key_value_pairs = [("key1", "value1"), ("key2", "value2")]
store.mset(key_value_pairs)
cursor, all_keys = redis_client.scan(0)
while cursor != 0:
cursor, keys = redis_client.scan(cursor)
if len(keys) != 0:
all_keys.extend(keys)
assert sorted(all_keys) == [
"meow/key1",
"meow/key2",
]
store.mdelete(["key1"])
cursor, all_keys = redis_client.scan(0, match="*")
while cursor != 0:
cursor, keys = redis_client.scan(cursor, match="*")
if len(keys) != 0:
all_keys.extend(keys)
assert sorted(all_keys) == [
"meow/key2",
]
assert list(store.yield_keys()) == ["key2"]
assert list(store.yield_keys(prefix="key*")) == ["key2"]
assert list(store.yield_keys(prefix="key1")) == []
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~embeddings~ollama.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.pydantic_v1 import BaseModel, Extra
from langchain.schema.embeddings import Embeddings
class OllamaEmbeddings(BaseModel, Embeddings):
"""Ollama locally runs large language models.
To use, follow the instructions at https://ollama.ai/.
Example:
.. code-block:: python
from langchain.embeddings import OllamaEmbeddings
ollama_emb = OllamaEmbeddings(
model="llama:7b",
)
r1 = ollama_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
r2 = ollama_emb.embed_query(
"What is the second letter of Greek alphabet"
)
"""
base_url: str = "http://localhost:11434"
"""Base url the model is hosted under."""
model: str = "llama2"
"""Model name to use."""
embed_instruction: str = "passage: "
"""Instruction used to embed documents."""
query_instruction: str = "query: "
"""Instruction used to embed the query."""
mirostat: Optional[int]
"""Enable Mirostat sampling for controlling perplexity.
(default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)"""
mirostat_eta: Optional[float]
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: 0.1)"""
mirostat_tau: Optional[float]
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: 5.0)"""
num_ctx: Optional[int]
"""Sets the size of the context window used to generate the
next token. (Default: 2048) """
num_gpu: Optional[int]
"""The number of GPUs to use. On macOS it defaults to 1 to
enable metal support, 0 to disable."""
num_thread: Optional[int]
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
repeat_last_n: Optional[int]
"""Sets how far back for the model to look back to prevent
repetition. (Default: 64, 0 = disabled, -1 = num_ctx)"""
repeat_penalty: Optional[float]
"""Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
will penalize repetitions more strongly, while a lower value (e.g., 0.9)
will be more lenient. (Default: 1.1)"""
temperature: Optional[float]
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: 0.8)"""
stop: Optional[List[str]]
"""Sets the stop tokens to use."""
tfs_z: Optional[float]
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., 2.0) will reduce the
impact more, while a value of 1.0 disables this setting. (default: 1)"""
top_k: Optional[int]
"""Reduces the probability of generating nonsense. A higher value (e.g. 100)
will give more diverse answers, while a lower value (e.g. 10)
will be more conservative. (Default: 40)"""
top_p: Optional[int]
"""Works together with top-k. A higher value (e.g., 0.95) will lead
to more diverse text, while a lower value (e.g., 0.5) will
generate more focused and conservative text. (Default: 0.9)"""
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Ollama."""
return {
"model": self.model,
"options": {
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"stop": self.stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
}
model_kwargs: Optional[dict] = None
"""Other model keyword args"""
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _process_emb_response(self, input: str) -> List[float]:
"""Process a response from the API.
Args:
response: The response from the API.
Returns:
The response as a dictionary.
"""
headers = {
"Content-Type": "application/json",
}
try:
res = requests.post(
f"{self.base_url}/api/embeddings",
headers=headers,
json={"model": self.model, "prompt": input, **self._default_params},
)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
if res.status_code != 200:
raise ValueError(
"Error raised by inference API HTTP code: %s, %s"
% (res.status_code, res.text)
)
try:
t = res.json()
return t["embedding"]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {res.text}"
)
def _embed(self, input: List[str]) -> List[List[float]]:
embeddings_list: List[List[float]] = []
for prompt in input:
embeddings = self._process_emb_response(prompt)
embeddings_list.append(embeddings)
return embeddings_list
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a Ollama deployed embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
instruction_pairs = [f"{self.embed_instruction}{text}" for text in texts]
embeddings = self._embed(instruction_pairs)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a Ollama deployed embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
instruction_pair = f"{self.query_instruction}{text}"
embedding = self._embed([instruction_pair])[0]
return embedding
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~chat_models~fireworks.py | from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Type,
Union,
)
from langchain.adapters.openai import convert_message_to_dict
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
from langchain.llms.base import create_base_retry_decorator
from langchain.pydantic_v1 import Field, root_validator
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
from langchain.schema.output import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain.utils.env import get_from_dict_or_env
def _convert_delta_to_message_chunk(
_dict: Any, default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
"""Convert a delta response to a message chunk."""
role = _dict.role
content = _dict.content or ""
additional_kwargs: Dict = {}
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content, additional_kwargs=additional_kwargs)
elif role == "system" or default_class == SystemMessageChunk:
return SystemMessageChunk(content=content)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict.name)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
def convert_dict_to_message(_dict: Any) -> BaseMessage:
"""Convert a dict response to a message."""
role = _dict.role
content = _dict.content or ""
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
content = _dict.content
additional_kwargs: Dict = {}
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=content)
elif role == "function":
return FunctionMessage(content=content, name=_dict.name)
else:
return ChatMessage(content=content, role=role)
class ChatFireworks(BaseChatModel):
"""Fireworks Chat models."""
model: str = "accounts/fireworks/models/llama-v2-7b-chat"
model_kwargs: dict = Field(
default_factory=lambda: {
"temperature": 0.7,
"max_tokens": 512,
"top_p": 1,
}.copy()
)
fireworks_api_key: Optional[str] = None
max_retries: int = 20
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key in environment."""
try:
import fireworks.client
except ImportError as e:
raise ImportError(
"Could not import fireworks-ai python package. "
"Please install it with `pip install fireworks-ai`."
) from e
fireworks_api_key = get_from_dict_or_env(
values, "fireworks_api_key", "FIREWORKS_API_KEY"
)
fireworks.client.api_key = fireworks_api_key
return values
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "fireworks-chat"
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = self._create_message_dicts(messages)
params = {
"model": self.model,
"messages": message_dicts,
**self.model_kwargs,
}
response = completion_with_retry(
self, run_manager=run_manager, stop=stop, **params
)
return self._create_chat_result(response)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = self._create_message_dicts(messages)
params = {
"model": self.model,
"messages": message_dicts,
**self.model_kwargs,
}
response = await acompletion_with_retry(
self, run_manager=run_manager, stop=stop, **params
)
return self._create_chat_result(response)
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
if llm_outputs[0] is None:
return {}
return llm_outputs[0]
def _create_chat_result(self, response: Any) -> ChatResult:
generations = []
for res in response.choices:
message = convert_dict_to_message(res.message)
gen = ChatGeneration(
message=message,
generation_info=dict(finish_reason=res.finish_reason),
)
generations.append(gen)
llm_output = {"model": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _create_message_dicts(
self, messages: List[BaseMessage]
) -> List[Dict[str, Any]]:
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts = self._create_message_dicts(messages)
default_chunk_class = AIMessageChunk
params = {
"model": self.model,
"messages": message_dicts,
"stream": True,
**self.model_kwargs,
}
for chunk in completion_with_retry(
self, run_manager=run_manager, stop=stop, **params
):
choice = chunk.choices[0]
chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class)
finish_reason = choice.finish_reason
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
run_manager.on_llm_new_token(chunk.content, chunk=chunk)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts = self._create_message_dicts(messages)
default_chunk_class = AIMessageChunk
params = {
"model": self.model,
"messages": message_dicts,
"stream": True,
**self.model_kwargs,
}
async for chunk in await acompletion_with_retry_streaming(
self, run_manager=run_manager, stop=stop, **params
):
choice = chunk.choices[0]
chunk = _convert_delta_to_message_chunk(choice.delta, default_chunk_class)
finish_reason = choice.finish_reason
generation_info = (
dict(finish_reason=finish_reason) if finish_reason is not None else None
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk, generation_info=generation_info)
if run_manager:
await run_manager.on_llm_new_token(token=chunk.content, chunk=chunk)
def completion_with_retry(
llm: ChatFireworks,
*,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.ChatCompletion.create(
**kwargs,
)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: ChatFireworks,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return await fireworks.client.ChatCompletion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
async def acompletion_with_retry_streaming(
llm: ChatFireworks,
*,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call for streaming."""
import fireworks.client
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return fireworks.client.ChatCompletion.acreate(
**kwargs,
)
return await _completion_with_retry(**kwargs)
def _create_retry_decorator(
llm: ChatFireworks,
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
"""Define retry mechanism."""
import fireworks.client
errors = [
fireworks.client.error.RateLimitError,
fireworks.client.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~airbyte.py | from typing import Any, Callable, Iterator, List, Mapping, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils.utils import guard_import
RecordHandler = Callable[[Any, Optional[str]], Document]
class AirbyteCDKLoader(BaseLoader):
"""Load with an `Airbyte` source connector implemented using the `CDK`."""
def __init__(
self,
config: Mapping[str, Any],
source_class: Any,
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
source_class: The source connector class.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
from airbyte_cdk.models.airbyte_protocol import AirbyteRecordMessage
from airbyte_cdk.sources.embedded.base_integration import (
BaseEmbeddedIntegration,
)
from airbyte_cdk.sources.embedded.runner import CDKRunner
class CDKIntegration(BaseEmbeddedIntegration):
"""A wrapper around the CDK integration."""
def _handle_record(
self, record: AirbyteRecordMessage, id: Optional[str]
) -> Document:
if record_handler:
return record_handler(record, id)
return Document(page_content="", metadata=record.data)
self._integration = CDKIntegration(
config=config,
runner=CDKRunner(source=source_class(), name=source_class.__name__),
)
self._stream_name = stream_name
self._state = state
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
return self._integration._load_data(
stream_name=self._stream_name, state=self._state
)
@property
def last_state(self) -> Any:
return self._integration.last_state
class AirbyteHubspotLoader(AirbyteCDKLoader):
"""Load from `Hubspot` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_hubspot", pip_name="airbyte-source-hubspot"
).SourceHubspot
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteStripeLoader(AirbyteCDKLoader):
"""Load from `Stripe` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_stripe", pip_name="airbyte-source-stripe"
).SourceStripe
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteTypeformLoader(AirbyteCDKLoader):
"""Load from `Typeform` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_typeform", pip_name="airbyte-source-typeform"
).SourceTypeform
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteZendeskSupportLoader(AirbyteCDKLoader):
"""Load from `Zendesk Support` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_zendesk_support", pip_name="airbyte-source-zendesk-support"
).SourceZendeskSupport
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteShopifyLoader(AirbyteCDKLoader):
"""Load from `Shopify` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_shopify", pip_name="airbyte-source-shopify"
).SourceShopify
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteSalesforceLoader(AirbyteCDKLoader):
"""Load from `Salesforce` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_salesforce", pip_name="airbyte-source-salesforce"
).SourceSalesforce
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteGongLoader(AirbyteCDKLoader):
"""Load from `Gong` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_gong", pip_name="airbyte-source-gong"
).SourceGong
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~utilities~github.py | """Util that calls GitHub."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from github.Issue import Issue
class GitHubAPIWrapper(BaseModel):
"""Wrapper for GitHub API."""
github: Any #: :meta private:
github_repo_instance: Any #: :meta private:
github_repository: Optional[str] = None
github_app_id: Optional[str] = None
github_app_private_key: Optional[str] = None
github_branch: Optional[str] = None
github_base_branch: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
github_repository = get_from_dict_or_env(
values, "github_repository", "GITHUB_REPOSITORY"
)
github_app_id = get_from_dict_or_env(values, "github_app_id", "GITHUB_APP_ID")
github_app_private_key = get_from_dict_or_env(
values, "github_app_private_key", "GITHUB_APP_PRIVATE_KEY"
)
github_branch = get_from_dict_or_env(
values, "github_branch", "GITHUB_BRANCH", default="master"
)
github_base_branch = get_from_dict_or_env(
values, "github_base_branch", "GITHUB_BASE_BRANCH", default="master"
)
try:
from github import Auth, GithubIntegration
except ImportError:
raise ImportError(
"PyGithub is not installed. "
"Please install it with `pip install PyGithub`"
)
with open(github_app_private_key, "r") as f:
private_key = f.read()
auth = Auth.AppAuth(
github_app_id,
private_key,
)
gi = GithubIntegration(auth=auth)
installation = gi.get_installations()[0]
# create a GitHub instance:
g = installation.get_github_for_installation()
values["github"] = g
values["github_repo_instance"] = g.get_repo(github_repository)
values["github_repository"] = github_repository
values["github_app_id"] = github_app_id
values["github_app_private_key"] = github_app_private_key
values["github_branch"] = github_branch
values["github_base_branch"] = github_base_branch
return values
def parse_issues(self, issues: List[Issue]) -> List[dict]:
"""
Extracts title and number from each Issue and puts them in a dictionary
Parameters:
issues(List[Issue]): A list of Github Issue objects
Returns:
List[dict]: A dictionary of issue titles and numbers
"""
parsed = []
for issue in issues:
title = issue.title
number = issue.number
parsed.append({"title": title, "number": number})
return parsed
def get_issues(self) -> str:
"""
Fetches all open issues from the repo
Returns:
str: A plaintext report containing the number of issues
and each issue's title and number.
"""
issues = self.github_repo_instance.get_issues(state="open")
if issues.totalCount > 0:
parsed_issues = self.parse_issues(issues)
parsed_issues_str = (
"Found " + str(len(parsed_issues)) + " issues:\n" + str(parsed_issues)
)
return parsed_issues_str
else:
return "No open issues available"
def get_issue(self, issue_number: int) -> Dict[str, Any]:
"""
Fetches a specific issue and its first 10 comments
Parameters:
issue_number(int): The number for the github issue
Returns:
dict: A doctionary containing the issue's title,
body, and comments as a string
"""
issue = self.github_repo_instance.get_issue(number=issue_number)
page = 0
comments: List[dict] = []
while len(comments) <= 10:
comments_page = issue.get_comments().get_page(page)
if len(comments_page) == 0:
break
for comment in comments_page:
comments.append({"body": comment.body, "user": comment.user.login})
page += 1
return {
"title": issue.title,
"body": issue.body,
"comments": str(comments),
}
def create_pull_request(self, pr_query: str) -> str:
"""
Makes a pull request from the bot's branch to the base branch
Parameters:
pr_query(str): a string which contains the PR title
and the PR body. The title is the first line
in the string, and the body are the rest of the string.
For example, "Updated README\nmade changes to add info"
Returns:
str: A success or failure message
"""
if self.github_base_branch == self.github_branch:
return """Cannot make a pull request because
commits are already in the master branch"""
else:
try:
title = pr_query.split("\n")[0]
body = pr_query[len(title) + 2 :]
pr = self.github_repo_instance.create_pull(
title=title,
body=body,
head=self.github_branch,
base=self.github_base_branch,
)
return f"Successfully created PR number {pr.number}"
except Exception as e:
return "Unable to make pull request due to error:\n" + str(e)
def comment_on_issue(self, comment_query: str) -> str:
"""
Adds a comment to a github issue
Parameters:
comment_query(str): a string which contains the issue number,
two newlines, and the comment.
for example: "1\n\nWorking on it now"
adds the comment "working on it now" to issue 1
Returns:
str: A success or failure message
"""
issue_number = int(comment_query.split("\n\n")[0])
comment = comment_query[len(str(issue_number)) + 2 :]
try:
issue = self.github_repo_instance.get_issue(number=issue_number)
issue.create_comment(comment)
return "Commented on issue " + str(issue_number)
except Exception as e:
return "Unable to make comment due to error:\n" + str(e)
def create_file(self, file_query: str) -> str:
"""
Creates a new file on the Github repo
Parameters:
file_query(str): a string which contains the file path
and the file contents. The file path is the first line
in the string, and the contents are the rest of the string.
For example, "hello_world.md\n# Hello World!"
Returns:
str: A success or failure message
"""
file_path = file_query.split("\n")[0]
file_contents = file_query[len(file_path) + 2 :]
try:
exists = self.github_repo_instance.get_contents(file_path)
if exists is None:
self.github_repo_instance.create_file(
path=file_path,
message="Create " + file_path,
content=file_contents,
branch=self.github_branch,
)
return "Created file " + file_path
else:
return f"File already exists at {file_path}. Use update_file instead"
except Exception as e:
return "Unable to make file due to error:\n" + str(e)
def read_file(self, file_path: str) -> str:
"""
Reads a file from the github repo
Parameters:
file_path(str): the file path
Returns:
str: The file decoded as a string
"""
file = self.github_repo_instance.get_contents(file_path)
return file.decoded_content.decode("utf-8")
def update_file(self, file_query: str) -> str:
"""
Updates a file with new content.
Parameters:
file_query(str): Contains the file path and the file contents.
The old file contents is wrapped in OLD <<<< and >>>> OLD
The new file contents is wrapped in NEW <<<< and >>>> NEW
For example:
/test/hello.txt
OLD <<<<
Hello Earth!
>>>> OLD
NEW <<<<
Hello Mars!
>>>> NEW
Returns:
A success or failure message
"""
try:
file_path = file_query.split("\n")[0]
old_file_contents = (
file_query.split("OLD <<<<")[1].split(">>>> OLD")[0].strip()
)
new_file_contents = (
file_query.split("NEW <<<<")[1].split(">>>> NEW")[0].strip()
)
file_content = self.read_file(file_path)
updated_file_content = file_content.replace(
old_file_contents, new_file_contents
)
if file_content == updated_file_content:
return (
"File content was not updated because old content was not found."
"It may be helpful to use the read_file action to get "
"the current file contents."
)
self.github_repo_instance.update_file(
path=file_path,
message="Update " + file_path,
content=updated_file_content,
branch=self.github_branch,
sha=self.github_repo_instance.get_contents(file_path).sha,
)
return "Updated file " + file_path
except Exception as e:
return "Unable to update file due to error:\n" + str(e)
def delete_file(self, file_path: str) -> str:
"""
Deletes a file from the repo
Parameters:
file_path(str): Where the file is
Returns:
str: Success or failure message
"""
try:
file = self.github_repo_instance.get_contents(file_path)
self.github_repo_instance.delete_file(
path=file_path,
message="Delete " + file_path,
branch=self.github_branch,
sha=file.sha,
)
return "Deleted file " + file_path
except Exception as e:
return "Unable to delete file due to error:\n" + str(e)
def run(self, mode: str, query: str) -> str:
if mode == "get_issues":
return self.get_issues()
elif mode == "get_issue":
return json.dumps(self.get_issue(int(query)))
elif mode == "comment_on_issue":
return self.comment_on_issue(query)
elif mode == "create_file":
return self.create_file(query)
elif mode == "create_pull_request":
return self.create_pull_request(query)
elif mode == "read_file":
return self.read_file(query)
elif mode == "update_file":
return self.update_file(query)
elif mode == "delete_file":
return self.delete_file(query)
else:
raise ValueError("Invalid mode" + mode)
| [] |
2024-01-10 | ai-forever/gigachain | libs~experimental~langchain_experimental~autonomous_agents~baby_agi~baby_agi.py | """BabyAGI agent."""
from collections import deque
from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.vectorstore import VectorStore
from langchain_experimental.autonomous_agents.baby_agi.task_creation import (
TaskCreationChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_execution import (
TaskExecutionChain,
)
from langchain_experimental.autonomous_agents.baby_agi.task_prioritization import (
TaskPrioritizationChain,
)
from langchain_experimental.pydantic_v1 import BaseModel, Field
# This class has a metaclass conflict: both `Chain` and `BaseModel` define a metaclass
# to use, and the two metaclasses attempt to define the same functions but
# in mutually-incompatible ways. It isn't clear how to resolve this,
# and this code predates mypy beginning to perform that check.
#
# Mypy errors:
# ```
# Definition of "__repr_str__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Definition of "__repr_name__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Definition of "__rich_repr__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Definition of "__pretty__" in base class "Representation" is
# incompatible with definition in base class "BaseModel" [misc]
# Metaclass conflict: the metaclass of a derived class must be
# a (non-strict) subclass of the metaclasses of all its bases [misc]
# ```
#
# TODO: look into refactoring this class in a way that avoids the mypy type errors
class BabyAGI(Chain, BaseModel): # type: ignore[misc]
"""Controller model for the BabyAGI agent."""
task_list: deque = Field(default_factory=deque)
task_creation_chain: Chain = Field(...)
task_prioritization_chain: Chain = Field(...)
execution_chain: Chain = Field(...)
task_id_counter: int = Field(1)
vectorstore: VectorStore = Field(init=False)
max_iterations: Optional[int] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def add_task(self, task: Dict) -> None:
self.task_list.append(task)
def print_task_list(self) -> None:
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in self.task_list:
print(str(t["task_id"]) + ": " + t["task_name"])
def print_next_task(self, task: Dict) -> None:
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_id"]) + ": " + task["task_name"])
def print_task_result(self, result: str) -> None:
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
@property
def input_keys(self) -> List[str]:
return ["objective"]
@property
def output_keys(self) -> List[str]:
return []
def get_next_task(
self, result: str, task_description: str, objective: str, **kwargs: Any
) -> List[Dict]:
"""Get the next task."""
task_names = [t["task_name"] for t in self.task_list]
incomplete_tasks = ", ".join(task_names)
response = self.task_creation_chain.run(
result=result,
task_description=task_description,
incomplete_tasks=incomplete_tasks,
objective=objective,
**kwargs,
)
new_tasks = response.split("\n")
return [
{"task_name": task_name} for task_name in new_tasks if task_name.strip()
]
def prioritize_tasks(
self, this_task_id: int, objective: str, **kwargs: Any
) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in list(self.task_list)]
next_task_id = int(this_task_id) + 1
response = self.task_prioritization_chain.run(
task_names=", ".join(task_names),
next_task_id=str(next_task_id),
objective=objective,
**kwargs,
)
new_tasks = response.split("\n")
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append(
{"task_id": task_id, "task_name": task_name}
)
return prioritized_task_list
def _get_top_tasks(self, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = self.vectorstore.similarity_search(query, k=k)
if not results:
return []
return [str(item.metadata["task"]) for item in results]
def execute_task(self, objective: str, task: str, k: int = 5, **kwargs: Any) -> str:
"""Execute a task."""
context = self._get_top_tasks(query=objective, k=k)
return self.execution_chain.run(
objective=objective, context="\n".join(context), task=task, **kwargs
)
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the agent."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
objective = inputs["objective"]
first_task = inputs.get("first_task", "Make a todo list")
self.add_task({"task_id": 1, "task_name": first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
# Step 1: Pull the first task
task = self.task_list.popleft()
self.print_next_task(task)
# Step 2: Execute the task
result = self.execute_task(
objective, task["task_name"], callbacks=_run_manager.get_child()
)
this_task_id = int(task["task_id"])
self.print_task_result(result)
# Step 3: Store the result in Pinecone
result_id = f"result_{task['task_id']}_{num_iters}"
self.vectorstore.add_texts(
texts=[result],
metadatas=[{"task": task["task_name"]}],
ids=[result_id],
)
# Step 4: Create new tasks and reprioritize task list
new_tasks = self.get_next_task(
result,
task["task_name"],
objective,
callbacks=_run_manager.get_child(),
)
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({"task_id": self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(
self.prioritize_tasks(
this_task_id, objective, callbacks=_run_manager.get_child()
)
)
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print(
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m"
)
break
return {}
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
verbose: bool = False,
task_execution_chain: Optional[Chain] = None,
**kwargs: Any,
) -> "BabyAGI":
"""Initialize the BabyAGI Controller."""
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose)
task_prioritization_chain = TaskPrioritizationChain.from_llm(
llm, verbose=verbose
)
if task_execution_chain is None:
execution_chain: Chain = TaskExecutionChain.from_llm(llm, verbose=verbose)
else:
execution_chain = task_execution_chain
return cls(
task_creation_chain=task_creation_chain,
task_prioritization_chain=task_prioritization_chain,
execution_chain=execution_chain,
vectorstore=vectorstore,
**kwargs,
)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~tools~edenai~image_explicitcontent.py | from __future__ import annotations
import logging
from typing import Optional
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class EdenAiExplicitImageTool(EdenaiTool):
"""Tool that queries the Eden AI Explicit image detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name = "edenai_image_explicit_content_detection"
description = (
"A wrapper around edenai Services Explicit image detection. "
"""Useful for when you have to extract Explicit Content from images.
it detects adult only content in images,
that is generally inappropriate for people under
the age of 18 and includes nudity, sexual activity,
pornography, violence, gore content, etc."""
"Input should be the string url of the image ."
)
combine_available = True
feature = "image"
subfeature = "explicit_content"
def _parse_json(self, json_data: dict) -> str:
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"
for idx, found_obj in enumerate(json_data["items"]):
label = found_obj["label"].lower()
likelihood = found_obj["likelihood"]
result_str += f"{idx}: {label} likelihood {likelihood},\n"
return result_str[:-2]
def _parse_response(self, json_data: list) -> str:
if len(json_data) == 1:
result = self._parse_json(json_data[0])
else:
for entry in json_data:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
| [] |
2024-01-10 | ai-forever/gigachain | libs~langchain~langchain~document_loaders~xorbits.py | from typing import Any
from langchain.document_loaders.dataframe import BaseDataFrameLoader
class XorbitsLoader(BaseDataFrameLoader):
"""Load `Xorbits` DataFrame."""
def __init__(self, data_frame: Any, page_content_column: str = "text"):
"""Initialize with dataframe object.
Requirements:
Must have xorbits installed. You can install with `pip install xorbits`.
Args:
data_frame: Xorbits DataFrame object.
page_content_column: Name of the column containing the page content.
Defaults to "text".
"""
try:
import xorbits.pandas as pd
except ImportError as e:
raise ImportError(
"Cannot import xorbits, please install with 'pip install xorbits'."
) from e
if not isinstance(data_frame, pd.DataFrame):
raise ValueError(
f"Expected data_frame to be a xorbits.pandas.DataFrame, \
got {type(data_frame)}"
)
super().__init__(data_frame, page_content_column=page_content_column)
| [] |