id
stringlengths
14
16
text
stringlengths
45
2.05k
source
stringlengths
53
111
9897ea20cd19-3
""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please it install it with `pip install boto3`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_name": self.endpoint_name}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "sagemaker_endpoint" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Sagemaker inference endpoint. Args:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\sagemaker_endpoint.html"
9897ea20cd19-4
"""Call out to Sagemaker inference endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(prompt, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") text = self.content_handler.transform_output(response["Body"]) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to the sagemaker endpoint. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\sagemaker_endpoint.html"
2e9aa27fc340-0
Source code for langchain.llms.self_hosted """Run model inference on self-hosted remote hardware.""" import importlib.util import logging import pickle from typing import Any, Callable, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger() def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a pipeline callable (or, more likely, a key pointing to the model on the cluster's object store) and returns text predictions for each document in the batch. """ text = pipeline(prompt, *args, **kwargs) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _send_pipeline_to_device(pipeline: Any, device: int) -> Any: """Send a pipeline to a device on the cluster.""" if isinstance(pipeline, str): with open(pipeline, "rb") as f: pipeline = pickle.load(f) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted.html"
2e9aa27fc340-1
if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline.device = torch.device(device) pipeline.model = pipeline.model.to(pipeline.device) return pipeline [docs]class SelfHostedPipeline(LLM, BaseModel): """Run model inference on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example for custom pipeline and inference functions: .. code-block:: python from langchain.llms import SelfHostedPipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def load_pipeline(): tokenizer = AutoTokenizer.from_pretrained("gpt2") model = AutoModelForCausalLM.from_pretrained("gpt2") return pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) def inference_fn(pipeline, prompt, stop = None): return pipeline(prompt)[0]["generated_text"] gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") llm = SelfHostedPipeline( model_load_fn=load_pipeline,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted.html"
2e9aa27fc340-2
llm = SelfHostedPipeline( model_load_fn=load_pipeline, hardware=gpu, model_reqs=model_reqs, inference_fn=inference_fn ) Example for <2GB model (can be serialized and sent directly to the server): .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") my_model = ... llm = SelfHostedPipeline.from_pipeline( pipeline=my_model, hardware=gpu, model_reqs=["./", "torch", "transformers"], ) Example passing model path for larger models: .. code-block:: python from langchain.llms import SelfHostedPipeline import runhouse as rh import pickle from transformers import pipeline generator = pipeline(model="gpt2") rh.blob(pickle.dumps(generator), path="models/pipeline.pkl" ).save().to(gpu, path="models") llm = SelfHostedPipeline.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ pipeline_ref: Any #: :meta private: client: Any #: :meta private: inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted.html"
2e9aa27fc340-3
load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" model_reqs: List[str] = ["./", "torch"] """Requirements to install on hardware to inference the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any): """Init the pipeline with an auxiliary function. The load function must be in global scope to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ super().__init__(**kwargs) try: import runhouse as rh except ImportError: raise ValueError( "Could not import runhouse python package. " "Please install it with `pip install runhouse`." ) remote_load_fn = rh.function(fn=self.model_load_fn).to( self.hardware, reqs=self.model_reqs ) _load_fn_kwargs = self.load_fn_kwargs or {} self.pipeline_ref = remote_load_fn.remote(**_load_fn_kwargs) self.client = rh.function(fn=self.inference_fn).to( self.hardware, reqs=self.model_reqs ) [docs] @classmethod def from_pipeline( cls, pipeline: Any, hardware: Any, model_reqs: Optional[List[str]] = None, device: int = 0, **kwargs: Any, ) -> LLM: """Init the SelfHostedPipeline from a pipeline object or string.""" if not isinstance(pipeline, str): logger.warning(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted.html"
2e9aa27fc340-4
if not isinstance(pipeline, str): logger.warning( "Serializing pipeline to send to remote hardware. " "Note, it can be quite slow" "to serialize and send large models with each execution. " "Consider sending the pipeline" "to the cluster and passing the path to the pipeline instead." ) load_fn_kwargs = {"pipeline": pipeline, "device": device} return cls( load_fn_kwargs=load_fn_kwargs, model_load_fn=_send_pipeline_to_device, hardware=hardware, model_reqs=["transformers", "torch"] + (model_reqs or []), **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"hardware": self.hardware}, } @property def _llm_type(self) -> str: return "self_hosted_llm" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted.html"
31383bdaa8b2-0
Source code for langchain.llms.self_hosted_hugging_face """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware.""" import importlib.util import logging from typing import Any, Callable, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.llms.self_hosted import SelfHostedPipeline from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation") logger = logging.getLogger() def _generate_text( pipeline: Any, prompt: str, *args: Any, stop: Optional[List[str]] = None, **kwargs: Any, ) -> str: """Inference function to send to the remote hardware. Accepts a Hugging Face pipeline (or more likely, a key pointing to such a pipeline on the cluster's object store) and returns generated text. """ response = pipeline(prompt, *args, **kwargs) if pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif pipeline.task == "text2text-generation": text = response[0]["generated_text"] else: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: text = enforce_stop_tokens(text, stop) return text def _load_transformer( model_id: str = DEFAULT_MODEL_ID, task: str = DEFAULT_TASK, device: int = 0,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted_hugging_face.html"
31383bdaa8b2-1
task: str = DEFAULT_TASK, device: int = 0, model_kwargs: Optional[dict] = None, ) -> Any: """Inference function to send to the remote hardware. Accepts a huggingface model_id and returns a pipeline for the task. """ from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from transformers import pipeline as hf_pipeline _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task == "text2text-generation": model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available"
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted_hugging_face.html"
31383bdaa8b2-2
"Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return pipeline [docs]class SelfHostedHuggingFaceLLM(SelfHostedPipeline, BaseModel): """Wrapper around HuggingFace Pipeline API to run on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Only supports `text-generation` and `text2text-generation` for now. Example using from_model_id: .. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceLLM( model_id="google/flan-t5-large", task="text2text-generation", hardware=gpu ) Example passing fn that generates a pipeline (bc the pipeline is not serializable): .. code-block:: python
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted_hugging_face.html"
31383bdaa8b2-3
.. code-block:: python from langchain.llms import SelfHostedHuggingFaceLLM from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh def get_pipeline(): model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer ) return pipe hf = SelfHostedHuggingFaceLLM( model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu) """ model_id: str = DEFAULT_MODEL_ID """Hugging Face model_id to load the model.""" task: str = DEFAULT_TASK """Hugging Face task (either "text-generation" or "text2text-generation").""" device: int = 0 """Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_reqs: List[str] = ["./", "transformers", "torch"] """Requirements to install on hardware to inference the model.""" model_load_fn: Callable = _load_transformer """Function to load the model remotely on the server.""" inference_fn: Callable = _generate_text #: :meta private: """Inference function to send to the remote hardware.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def __init__(self, **kwargs: Any):
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted_hugging_face.html"
31383bdaa8b2-4
extra = Extra.forbid def __init__(self, **kwargs: Any): """Construct the pipeline remotely using an auxiliary function. The load function needs to be importable to be imported and run on the server, i.e. in a module and not a REPL or closure. Then, initialize the remote inference function. """ load_fn_kwargs = { "model_id": kwargs.get("model_id", DEFAULT_MODEL_ID), "task": kwargs.get("task", DEFAULT_TASK), "device": kwargs.get("device", 0), "model_kwargs": kwargs.get("model_kwargs", None), } super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "selfhosted_huggingface_pipeline" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\self_hosted_hugging_face.html"
c6cc353bbd42-0
Source code for langchain.llms.stochasticai """Wrapper around StochasticAI APIs.""" import logging import time from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class StochasticAI(LLM, BaseModel): """Wrapper around StochasticAI large language models. To use, you should have the environment variable ``STOCHASTICAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import StochasticAI stochasticai = StochasticAI(api_url="") """ api_url: str = "" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" stochasticai_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\stochasticai.html"
c6cc353bbd42-1
raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" stochasticai_api_key = get_from_dict_or_env( values, "stochasticai_api_key", "STOCHASTICAI_API_KEY" ) values["stochasticai_api_key"] = stochasticai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.api_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "stochasticai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to StochasticAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, headers={
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\stochasticai.html"
c6cc353bbd42-2
json={"prompt": prompt, "params": params}, headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_post.raise_for_status() response_post_json = response_post.json() completed = False while not completed: response_get = requests.get( url=response_post_json["data"]["responseUrl"], headers={ "apiKey": f"{self.stochasticai_api_key}", "Accept": "application/json", "Content-Type": "application/json", }, ) response_get.raise_for_status() response_get_json = response_get.json()["data"] text = response_get_json.get("completion") completed = text is not None time.sleep(0.5) text = text[0] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\stochasticai.html"
60f2ebaa17a1-0
Source code for langchain.llms.writer """Wrapper around Writer APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class Writer(LLM, BaseModel): """Wrapper around Writer large language models. To use, you should have the environment variable ``WRITER_API_KEY`` set with your API key. Example: .. code-block:: python from langchain import Writer writer = Writer(model_id="palmyra-base") """ model_id: str = "palmyra-base" """Model name to use.""" tokens_to_generate: int = 24 """Max number of tokens to generate.""" logprobs: bool = False """Whether to return log probabilities.""" temperature: float = 1.0 """What sampling temperature to use.""" length: int = 256 """The maximum number of tokens to generate in the completion.""" top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 1 """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" repetition_penalty: float = 1.0 """Penalizes repeated tokens according to frequency.""" random_seed: int = 0 """The model generates random results. Changing the random seed alone will produce a different response with similar characteristics. It is possible to reproduce results by fixing the random seed (assuming all other hyperparameters are also fixed)"""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\writer.html"
60f2ebaa17a1-1
by fixing the random seed (assuming all other hyperparameters are also fixed)""" beam_search_diversity_rate: float = 1.0 """Only applies to beam search, i.e. when the beam width is >1. A higher value encourages beam search to return a more diverse set of candidates""" beam_width: Optional[int] = None """The number of concurrent candidates to keep track of during beam search""" length_pentaly: float = 1.0 """Only applies to beam search, i.e. when the beam width is >1. Larger values penalize long candidates more heavily, thus preferring shorter candidates""" writer_api_key: Optional[str] = None stop: Optional[List[str]] = None """Sequences when completion generation will stop""" base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" writer_api_key = get_from_dict_or_env( values, "writer_api_key", "WRITER_API_KEY" ) values["writer_api_key"] = writer_api_key return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Writer API.""" return { "tokens_to_generate": self.tokens_to_generate, "stop": self.stop, "logprobs": self.logprobs, "temperature": self.temperature, "top_p": self.top_p,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\writer.html"
60f2ebaa17a1-2
"temperature": self.temperature, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, "random_seed": self.random_seed, "beam_search_diversity_rate": self.beam_search_diversity_rate, "beam_width": self.beam_width, "length_pentaly": self.length_pentaly, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_id": self.model_id}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "writer" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Writer's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = Writer("Tell me a joke.") """ if self.base_url is not None: base_url = self.base_url else: base_url = ( "https://api.llm.writer.com/v1/models/{self.model_id}/completions" ) response = requests.post( url=base_url, headers={ "Authorization": f"Bearer {self.writer_api_key}", "Content-Type": "application/json", "Accept": "application/json", }, json={"prompt": prompt, **self._default_params}, )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\writer.html"
60f2ebaa17a1-3
}, json={"prompt": prompt, **self._default_params}, ) text = response.text if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\writer.html"
03d60c7faa07-0
Source code for langchain.prompts.base """BasePrompt schema definition.""" from __future__ import annotations import json from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Mapping, Optional, Union import yaml from pydantic import BaseModel, Extra, Field, root_validator from langchain.formatting import formatter from langchain.schema import BaseMessage, BaseOutputParser, HumanMessage, PromptValue def jinja2_formatter(template: str, **kwargs: Any) -> str: """Format a template using jinja2.""" try: from jinja2 import Template except ImportError: raise ValueError( "jinja2 not installed, which is needed to use the jinja2_formatter. " "Please install it with `pip install jinja2`." ) return Template(template).render(**kwargs) DEFAULT_FORMATTER_MAPPING: Dict[str, Callable] = { "f-string": formatter.format, "jinja2": jinja2_formatter, } def check_valid_template( template: str, template_format: str, input_variables: List[str] ) -> None: """Check that template string is valid.""" if template_format not in DEFAULT_FORMATTER_MAPPING: valid_formats = list(DEFAULT_FORMATTER_MAPPING) raise ValueError( f"Invalid template format. Got `{template_format}`;" f" should be one of {valid_formats}" ) dummy_inputs = {input_variable: "foo" for input_variable in input_variables} try: formatter_func = DEFAULT_FORMATTER_MAPPING[template_format] formatter_func(template, **dummy_inputs) except KeyError as e: raise ValueError(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\base.html"
03d60c7faa07-1
except KeyError as e: raise ValueError( "Invalid prompt schema; check for mismatched or missing input parameters. " + str(e) ) class StringPromptValue(PromptValue): text: str def to_string(self) -> str: """Return prompt as string.""" return self.text def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return [HumanMessage(content=self.text)] [docs]class BasePromptTemplate(BaseModel, ABC): """Base class for all prompt templates, returning a prompt.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" output_parser: Optional[BaseOutputParser] = None """How to parse the output of calling an LLM on this formatted prompt.""" partial_variables: Mapping[str, Union[str, Callable[[], str]]] = Field( default_factory=dict ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] @abstractmethod def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" @root_validator() def validate_variable_names(cls, values: Dict) -> Dict: """Validate variable names do not include restricted names.""" if "stop" in values["input_variables"]: raise ValueError( "Cannot have an input variable named 'stop', as it is used internally," " please rename." ) if "stop" in values["partial_variables"]: raise ValueError( "Cannot have an partial variable named 'stop', as it is used " "internally, please rename." )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\base.html"
03d60c7faa07-2
"internally, please rename." ) overall = set(values["input_variables"]).intersection( values["partial_variables"] ) if overall: raise ValueError( f"Found overlapping input and partial variables: {overall}" ) return values [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: """Return a partial of the prompt template.""" prompt_dict = self.__dict__.copy() prompt_dict["input_variables"] = list( set(self.input_variables).difference(kwargs) ) prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs} return type(self)(**prompt_dict) def _merge_partial_and_user_variables(self, **kwargs: Any) -> Dict[str, Any]: # Get partial params: partial_kwargs = { k: v if isinstance(v, str) else v() for k, v in self.partial_variables.items() } return {**partial_kwargs, **kwargs} [docs] @abstractmethod def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ @property @abstractmethod def _prompt_type(self) -> str: """Return the prompt type key.""" [docs] def dict(self, **kwargs: Any) -> Dict: """Return dictionary representation of prompt.""" prompt_dict = super().dict(**kwargs) prompt_dict["_type"] = self._prompt_type
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\base.html"
03d60c7faa07-3
prompt_dict["_type"] = self._prompt_type return prompt_dict [docs] def save(self, file_path: Union[Path, str]) -> None: """Save the prompt. Args: file_path: Path to directory to save prompt to. Example: .. code-block:: python prompt.save(file_path="path/prompt.yaml") """ if self.partial_variables: raise ValueError("Cannot save prompt with partial variables.") # Convert file to Path object. if isinstance(file_path, str): save_path = Path(file_path) else: save_path = file_path directory_path = save_path.parent directory_path.mkdir(parents=True, exist_ok=True) # Fetch dictionary to save prompt_dict = self.dict() if save_path.suffix == ".json": with open(file_path, "w") as f: json.dump(prompt_dict, f, indent=4) elif save_path.suffix == ".yaml": with open(file_path, "w") as f: yaml.dump(prompt_dict, f, default_flow_style=False) else: raise ValueError(f"{save_path} must be json or yaml") [docs]class StringPromptTemplate(BasePromptTemplate, ABC): """String prompt should expose the format method, returning a prompt.""" [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: """Create Chat Messages.""" return StringPromptValue(text=self.format(**kwargs)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\base.html"
a1302fd978a1-0
Source code for langchain.prompts.chat """Chat prompt template.""" from __future__ import annotations from abc import ABC, abstractmethod from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, Union from pydantic import BaseModel, Field from langchain.memory.buffer import get_buffer_string from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import ( AIMessage, BaseMessage, ChatMessage, HumanMessage, PromptValue, SystemMessage, ) class BaseMessagePromptTemplate(BaseModel, ABC): @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To messages.""" @property @abstractmethod def input_variables(self) -> List[str]: """Input variables for this prompt template.""" [docs]class MessagesPlaceholder(BaseMessagePromptTemplate): """Prompt template that assumes variable is already list of messages.""" variable_name: str [docs] def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To a BaseMessage.""" value = kwargs[self.variable_name] if not isinstance(value, list): raise ValueError( f"variable {self.variable_name} should be a list of base messages, " f"got {value}" ) for v in value: if not isinstance(v, BaseMessage): raise ValueError( f"variable {self.variable_name} should be a list of base messages," f" got {value}" ) return value @property def input_variables(self) -> List[str]: """Input variables for this prompt template."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\chat.html"
a1302fd978a1-1
def input_variables(self) -> List[str]: """Input variables for this prompt template.""" return [self.variable_name] class BaseStringMessagePromptTemplate(BaseMessagePromptTemplate, ABC): prompt: StringPromptTemplate additional_kwargs: dict = Field(default_factory=dict) @classmethod def from_template(cls, template: str, **kwargs: Any) -> BaseMessagePromptTemplate: prompt = PromptTemplate.from_template(template) return cls(prompt=prompt, **kwargs) @abstractmethod def format(self, **kwargs: Any) -> BaseMessage: """To a BaseMessage.""" def format_messages(self, **kwargs: Any) -> List[BaseMessage]: return [self.format(**kwargs)] @property def input_variables(self) -> List[str]: return self.prompt.input_variables class ChatMessagePromptTemplate(BaseStringMessagePromptTemplate): role: str def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return ChatMessage( content=text, role=self.role, additional_kwargs=self.additional_kwargs ) class HumanMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return HumanMessage(content=text, additional_kwargs=self.additional_kwargs) class AIMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs) return AIMessage(content=text, additional_kwargs=self.additional_kwargs) class SystemMessagePromptTemplate(BaseStringMessagePromptTemplate): def format(self, **kwargs: Any) -> BaseMessage: text = self.prompt.format(**kwargs)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\chat.html"
a1302fd978a1-2
text = self.prompt.format(**kwargs) return SystemMessage(content=text, additional_kwargs=self.additional_kwargs) class ChatPromptValue(PromptValue): messages: List[BaseMessage] def to_string(self) -> str: """Return prompt as string.""" return get_buffer_string(self.messages) def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" return self.messages [docs]class ChatPromptTemplate(BasePromptTemplate, ABC): input_variables: List[str] messages: List[Union[BaseMessagePromptTemplate, BaseMessage]] @classmethod def from_role_strings( cls, string_messages: List[Tuple[str, str]] ) -> ChatPromptTemplate: messages = [ ChatMessagePromptTemplate( content=PromptTemplate.from_template(template), role=role ) for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_strings( cls, string_messages: List[Tuple[Type[BaseMessagePromptTemplate], str]] ) -> ChatPromptTemplate: messages = [ role(content=PromptTemplate.from_template(template)) for role, template in string_messages ] return cls.from_messages(messages) @classmethod def from_messages( cls, messages: Sequence[Union[BaseMessagePromptTemplate, BaseMessage]] ) -> ChatPromptTemplate: input_vars = set() for message in messages: if isinstance(message, BaseMessagePromptTemplate): input_vars.update(message.input_variables) return cls(input_variables=list(input_vars), messages=messages) [docs] def format(self, **kwargs: Any) -> str: return self.format_prompt(**kwargs).to_string()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\chat.html"
a1302fd978a1-3
return self.format_prompt(**kwargs).to_string() [docs] def format_prompt(self, **kwargs: Any) -> PromptValue: kwargs = self._merge_partial_and_user_variables(**kwargs) result = [] for message_template in self.messages: if isinstance(message_template, BaseMessage): result.extend([message_template]) elif isinstance(message_template, BaseMessagePromptTemplate): rel_params = { k: v for k, v in kwargs.items() if k in message_template.input_variables } message = message_template.format_messages(**rel_params) result.extend(message) else: raise ValueError(f"Unexpected input: {message_template}") return ChatPromptValue(messages=result) [docs] def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate: raise NotImplementedError @property def _prompt_type(self) -> str: raise NotImplementedError [docs] def save(self, file_path: Union[Path, str]) -> None: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\chat.html"
780ec9cc07fb-0
Source code for langchain.prompts.few_shot """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, ) from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptTemplate(StringPromptTemplate, BaseModel): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: str """A prompt template string to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: str = "" """A prompt template string to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\few_shot.html"
780ec9cc07fb-1
"""Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: check_valid_template( values["prefix"] + values["suffix"], values["template_format"], values["input_variables"] + list(values["partial_variables"]), ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\few_shot.html"
780ec9cc07fb-2
# Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall template. pieces = [self.prefix, *example_strings, self.suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\few_shot.html"
8ea07321fa85-0
Source code for langchain.prompts.few_shot_with_templates """Prompt template that contains few shot examples.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, ) from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate [docs]class FewShotPromptWithTemplates(StringPromptTemplate, BaseModel): """Prompt template that contains few shot examples.""" examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" example_selector: Optional[BaseExampleSelector] = None """ExampleSelector to choose the examples to format into the prompt. Either this or examples should be provided.""" example_prompt: PromptTemplate """PromptTemplate used to format an individual example.""" suffix: StringPromptTemplate """A PromptTemplate to put after the examples.""" input_variables: List[str] """A list of the names of the variables the prompt template expects.""" example_separator: str = "\n\n" """String separator used to join the prefix, the examples, and suffix.""" prefix: Optional[StringPromptTemplate] = None """A PromptTemplate to put before the examples.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @root_validator(pre=True) def check_examples_and_selector(cls, values: Dict) -> Dict: """Check that one and only one of examples/example_selector are provided."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\few_shot_with_templates.html"
8ea07321fa85-1
"""Check that one and only one of examples/example_selector are provided.""" examples = values.get("examples", None) example_selector = values.get("example_selector", None) if examples and example_selector: raise ValueError( "Only one of 'examples' and 'example_selector' should be provided" ) if examples is None and example_selector is None: raise ValueError( "One of 'examples' and 'example_selector' should be provided" ) return values @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that prefix, suffix and input variables are consistent.""" if values["validate_template"]: input_variables = values["input_variables"] expected_input_variables = set(values["suffix"].input_variables) expected_input_variables |= set(values["partial_variables"]) if values["prefix"] is not None: expected_input_variables |= set(values["prefix"].input_variables) missing_vars = expected_input_variables.difference(input_variables) if missing_vars: raise ValueError( f"Got input_variables={input_variables}, but based on " f"prefix/suffix expected {expected_input_variables}" ) return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True def _get_examples(self, **kwargs: Any) -> List[dict]: if self.examples is not None: return self.examples elif self.example_selector is not None: return self.example_selector.select_examples(kwargs) else: raise ValueError [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\few_shot_with_templates.html"
8ea07321fa85-2
"""Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs) # Get the examples to use. examples = self._get_examples(**kwargs) # Format the examples. example_strings = [ self.example_prompt.format(**example) for example in examples ] # Create the overall prefix. if self.prefix is None: prefix = "" else: prefix_kwargs = { k: v for k, v in kwargs.items() if k in self.prefix.input_variables } for k in prefix_kwargs.keys(): kwargs.pop(k) prefix = self.prefix.format(**prefix_kwargs) # Create the overall suffix suffix_kwargs = { k: v for k, v in kwargs.items() if k in self.suffix.input_variables } for k in suffix_kwargs.keys(): kwargs.pop(k) suffix = self.suffix.format( **suffix_kwargs, ) pieces = [prefix, *example_strings, suffix] template = self.example_separator.join([piece for piece in pieces if piece]) # Format the template with the input variables. return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs) @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "few_shot_with_templates" [docs] def dict(self, **kwargs: Any) -> Dict: """Return a dictionary of the prompt.""" if self.example_selector:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\few_shot_with_templates.html"
8ea07321fa85-3
"""Return a dictionary of the prompt.""" if self.example_selector: raise ValueError("Saving an example selector is not currently supported") return super().dict(**kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\few_shot_with_templates.html"
e66162e11889-0
Source code for langchain.prompts.loading """Load prompts from disk.""" import importlib import json import logging from pathlib import Path from typing import Union import yaml from langchain.output_parsers.regex import RegexParser from langchain.prompts.base import BasePromptTemplate from langchain.prompts.few_shot import FewShotPromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/" logger = logging.getLogger(__file__) def load_prompt_from_config(config: dict) -> BasePromptTemplate: """Load prompt from Config Dict.""" if "_type" not in config: logger.warning("No `_type` key found, defaulting to `prompt`.") config_type = config.pop("_type", "prompt") if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} prompt not supported") prompt_loader = type_to_loader_dict[config_type] return prompt_loader(config) def _load_template(var_name: str, config: dict) -> dict: """Load template from disk if applicable.""" # Check if template_path exists in config. if f"{var_name}_path" in config: # If it does, make sure template variable doesn't also exist. if var_name in config: raise ValueError( f"Both `{var_name}_path` and `{var_name}` cannot be provided." ) # Pop the template path from the config. template_path = Path(config.pop(f"{var_name}_path")) # Load the template. if template_path.suffix == ".txt": with open(template_path) as f:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\loading.html"
e66162e11889-1
if template_path.suffix == ".txt": with open(template_path) as f: template = f.read() else: raise ValueError # Set the template variable to the extracted variable. config[var_name] = template return config def _load_examples(config: dict) -> dict: """Load examples if necessary.""" if isinstance(config["examples"], list): pass elif isinstance(config["examples"], str): with open(config["examples"]) as f: if config["examples"].endswith(".json"): examples = json.load(f) elif config["examples"].endswith((".yaml", ".yml")): examples = yaml.safe_load(f) else: raise ValueError( "Invalid file format. Only json or yaml formats are supported." ) config["examples"] = examples else: raise ValueError("Invalid examples format. Only list or string are supported.") return config def _load_output_parser(config: dict) -> dict: """Load output parser.""" if "output_parsers" in config: if config["output_parsers"] is not None: _config = config["output_parsers"] output_parser_type = _config["_type"] if output_parser_type == "regex_parser": output_parser = RegexParser(**_config) else: raise ValueError(f"Unsupported output parser {output_parser_type}") config["output_parsers"] = output_parser return config def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate: """Load the few shot prompt from the config.""" # Load the suffix and prefix templates. config = _load_template("suffix", config)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\loading.html"
e66162e11889-2
config = _load_template("suffix", config) config = _load_template("prefix", config) # Load the example prompt. if "example_prompt_path" in config: if "example_prompt" in config: raise ValueError( "Only one of example_prompt and example_prompt_path should " "be specified." ) config["example_prompt"] = load_prompt(config.pop("example_prompt_path")) else: config["example_prompt"] = load_prompt_from_config(config["example_prompt"]) # Load the examples. config = _load_examples(config) config = _load_output_parser(config) return FewShotPromptTemplate(**config) def _load_prompt(config: dict) -> PromptTemplate: """Load the prompt template from config.""" # Load the template from disk if necessary. config = _load_template("template", config) config = _load_output_parser(config) return PromptTemplate(**config) [docs]def load_prompt(path: Union[str, Path]) -> BasePromptTemplate: """Unified method for loading a prompt from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"} ): return hub_result else: return _load_prompt_from_file(path) def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate: """Load prompt from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json":
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\loading.html"
e66162e11889-3
# Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) elif file_path.suffix == ".py": spec = importlib.util.spec_from_loader( "prompt", loader=None, origin=str(file_path) ) if spec is None: raise ValueError("could not load spec") helper = importlib.util.module_from_spec(spec) with open(file_path, "rb") as f: exec(f.read(), helper.__dict__) if not isinstance(helper.PROMPT, BasePromptTemplate): raise ValueError("Did not get object of type BasePromptTemplate.") return helper.PROMPT else: raise ValueError(f"Got unsupported file type {file_path.suffix}") # Load the prompt from the config now. return load_prompt_from_config(config) type_to_loader_dict = { "prompt": _load_prompt, "few_shot": _load_few_shot_prompt, # "few_shot_with_templates": _load_few_shot_with_templates_prompt, } By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\loading.html"
d8a0025fab43-0
Source code for langchain.prompts.prompt """Prompt schema definition.""" from __future__ import annotations from pathlib import Path from string import Formatter from typing import Any, Dict, List, Union from pydantic import BaseModel, Extra, root_validator from langchain.prompts.base import ( DEFAULT_FORMATTER_MAPPING, StringPromptTemplate, check_valid_template, ) [docs]class PromptTemplate(StringPromptTemplate, BaseModel): """Schema to represent a prompt for an LLM. Example: .. code-block:: python from langchain import PromptTemplate prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ input_variables: List[str] """A list of the names of the variables the prompt template expects.""" template: str """The prompt template.""" template_format: str = "f-string" """The format of the prompt template. Options are: 'f-string', 'jinja2'.""" validate_template: bool = True """Whether or not to try validating the template.""" @property def _prompt_type(self) -> str: """Return the prompt type key.""" return "prompt" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def format(self, **kwargs: Any) -> str: """Format the prompt with the inputs. Args: kwargs: Any arguments to be passed to the prompt template. Returns: A formatted string. Example: .. code-block:: python prompt.format(variable1="foo") """ kwargs = self._merge_partial_and_user_variables(**kwargs)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\prompt.html"
d8a0025fab43-1
""" kwargs = self._merge_partial_and_user_variables(**kwargs) return DEFAULT_FORMATTER_MAPPING[self.template_format](self.template, **kwargs) @root_validator() def template_is_valid(cls, values: Dict) -> Dict: """Check that template and input variables are consistent.""" if values["validate_template"]: all_inputs = values["input_variables"] + list(values["partial_variables"]) check_valid_template( values["template"], values["template_format"], all_inputs ) return values [docs] @classmethod def from_examples( cls, examples: List[str], suffix: str, input_variables: List[str], example_separator: str = "\n\n", prefix: str = "", ) -> PromptTemplate: """Take examples in list format with prefix and suffix to create a prompt. Intended be used as a way to dynamically create a prompt from examples. Args: examples: List of examples to use in the prompt. suffix: String to go after the list of examples. Should generally set up the user's input. input_variables: A list of variable names the final prompt template will expect. example_separator: The separator to use in between examples. Defaults to two new line characters. prefix: String that should go before any examples. Generally includes examples. Default to an empty string. Returns: The final prompt generated. """ template = example_separator.join([prefix, *examples, suffix]) return cls(input_variables=input_variables, template=template) [docs] @classmethod def from_file( cls, template_file: Union[str, Path], input_variables: List[str] ) -> PromptTemplate:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\prompt.html"
d8a0025fab43-2
) -> PromptTemplate: """Load a prompt from a file. Args: template_file: The path to the file containing the prompt template. input_variables: A list of variable names the final prompt template will expect. Returns: The prompt loaded from the file. """ with open(str(template_file), "r") as f: template = f.read() return cls(input_variables=input_variables, template=template) [docs] @classmethod def from_template(cls, template: str) -> PromptTemplate: """Load a prompt template from a template.""" input_variables = { v for _, v, _, _ in Formatter().parse(template) if v is not None } return cls(input_variables=list(sorted(input_variables)), template=template) # For backwards compatibility. Prompt = PromptTemplate By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\prompt.html"
36e9eebfaa59-0
Source code for langchain.prompts.example_selector.length_based """Select examples based on length.""" import re from typing import Callable, Dict, List from pydantic import BaseModel, validator from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.prompts.prompt import PromptTemplate def _get_length_based(text: str) -> int: return len(re.split("\n| ", text)) [docs]class LengthBasedExampleSelector(BaseExampleSelector, BaseModel): """Select examples based on length.""" examples: List[dict] """A list of the examples that the prompt template expects.""" example_prompt: PromptTemplate """Prompt template used to format the examples.""" get_text_length: Callable[[str], int] = _get_length_based """Function to measure prompt length. Defaults to word count.""" max_length: int = 2048 """Max length for the prompt, beyond which examples are cut.""" example_text_lengths: List[int] = [] #: :meta private: [docs] def add_example(self, example: Dict[str, str]) -> None: """Add new example to list.""" self.examples.append(example) string_example = self.example_prompt.format(**example) self.example_text_lengths.append(self.get_text_length(string_example)) @validator("example_text_lengths", always=True) def calculate_example_text_lengths(cls, v: List[int], values: Dict) -> List[int]: """Calculate text lengths if they don't exist.""" # Check if text lengths were passed in if v: return v # If they were not, calculate them example_prompt = values["example_prompt"] get_text_length = values["get_text_length"]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\example_selector\\length_based.html"
36e9eebfaa59-1
get_text_length = values["get_text_length"] string_examples = [example_prompt.format(**eg) for eg in values["examples"]] return [get_text_length(eg) for eg in string_examples] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on the input lengths.""" inputs = " ".join(input_variables.values()) remaining_length = self.max_length - self.get_text_length(inputs) i = 0 examples = [] while remaining_length > 0 and i < len(self.examples): new_length = remaining_length - self.example_text_lengths[i] if new_length < 0: break else: examples.append(self.examples[i]) remaining_length = new_length i += 1 return examples By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\example_selector\\length_based.html"
bd17ae22d25c-0
Source code for langchain.prompts.example_selector.semantic_similarity """Example selector that selects examples based on SemanticSimilarity.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Type from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings from langchain.prompts.example_selector.base import BaseExampleSelector from langchain.vectorstores.base import VectorStore def sorted_values(values: Dict[str, str]) -> List[Any]: """Return a list of values in dict sorted by key.""" return [values[val] for val in sorted(values)] [docs]class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel): """Example selector that selects examples based on SemanticSimilarity.""" vectorstore: VectorStore """VectorStore than contains information about examples.""" k: int = 4 """Number of examples to select.""" example_keys: Optional[List[str]] = None """Optional keys to filter examples to.""" input_keys: Optional[List[str]] = None """Optional keys to filter input to. If provided, the search is based on the input variables instead of all variables.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True [docs] def add_example(self, example: Dict[str, str]) -> str: """Add new example to vectorstore.""" if self.input_keys: string_example = " ".join( sorted_values({key: example[key] for key in self.input_keys}) ) else: string_example = " ".join(sorted_values(example)) ids = self.vectorstore.add_texts([string_example], metadatas=[example]) return ids[0]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\example_selector\\semantic_similarity.html"
bd17ae22d25c-1
return ids[0] [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.similarity_search(query, k=self.k) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, **vectorstore_cls_kwargs: Any, ) -> SemanticSimilarityExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\example_selector\\semantic_similarity.html"
bd17ae22d25c-2
instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, input_keys=input_keys) [docs]class MaxMarginalRelevanceExampleSelector(SemanticSimilarityExampleSelector, BaseModel): """ExampleSelector that selects examples based on Max Marginal Relevance. This was shown to improve performance in this paper: https://arxiv.org/pdf/2211.13892.pdf """ fetch_k: int = 20 """Number of examples to fetch to rerank.""" [docs] def select_examples(self, input_variables: Dict[str, str]) -> List[dict]: """Select which examples to use based on semantic similarity.""" # Get the docs with the highest similarity. if self.input_keys: input_variables = {key: input_variables[key] for key in self.input_keys} query = " ".join(sorted_values(input_variables)) example_docs = self.vectorstore.max_marginal_relevance_search( query, k=self.k, fetch_k=self.fetch_k ) # Get the examples from the metadata. # This assumes that examples are stored in metadata. examples = [dict(e.metadata) for e in example_docs]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\example_selector\\semantic_similarity.html"
bd17ae22d25c-3
examples = [dict(e.metadata) for e in example_docs] # If example keys are provided, filter examples to those keys. if self.example_keys: examples = [{k: eg[k] for k in self.example_keys} for eg in examples] return examples [docs] @classmethod def from_examples( cls, examples: List[dict], embeddings: Embeddings, vectorstore_cls: Type[VectorStore], k: int = 4, input_keys: Optional[List[str]] = None, fetch_k: int = 20, **vectorstore_cls_kwargs: Any, ) -> MaxMarginalRelevanceExampleSelector: """Create k-shot example selector using example list and embeddings. Reshuffles examples dynamically based on query similarity. Args: examples: List of examples to use in the prompt. embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings(). vectorstore_cls: A vector store DB interface class, e.g. FAISS. k: Number of examples to select input_keys: If provided, the search is based on the input variables instead of all variables. vectorstore_cls_kwargs: optional kwargs containing url for vector store Returns: The ExampleSelector instantiated, backed by a vector store. """ if input_keys: string_examples = [ " ".join(sorted_values({k: eg[k] for k in input_keys})) for eg in examples ] else: string_examples = [" ".join(sorted_values(eg)) for eg in examples] vectorstore = vectorstore_cls.from_texts( string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\example_selector\\semantic_similarity.html"
bd17ae22d25c-4
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs ) return cls(vectorstore=vectorstore, k=k, fetch_k=fetch_k, input_keys=input_keys) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\prompts\\example_selector\\semantic_similarity.html"
52c462cf998c-0
Source code for langchain.utilities.searx_search """Utility for using SearxNG meta search API. SearxNG is a privacy-friendly free metasearch engine that aggregates results from `multiple search engines <https://docs.searxng.org/admin/engines/configured_engines.html>`_ and databases and supports the `OpenSearch <https://github.com/dewitt/opensearch/blob/master/opensearch-1-1-draft-6.md>`_ specification. More detailes on the installtion instructions `here. <../../ecosystem/searx.html>`_ For the search API refer to https://docs.searxng.org/dev/search_api.html Quick Start ----------- In order to use this tool you need to provide the searx host. This can be done by passing the named parameter :attr:`searx_host <SearxSearchWrapper.searx_host>` or exporting the environment variable SEARX_HOST. Note: this is the only required parameter. Then create a searx search instance like this: .. code-block:: python from langchain.utilities import SearxSearchWrapper # when the host starts with `http` SSL is disabled and the connection # is assumed to be on a private network searx_host='http://self.hosted' search = SearxSearchWrapper(searx_host=searx_host) You can now use the ``search`` instance to query the searx API. Searching --------- Use the :meth:`run() <SearxSearchWrapper.run>` and :meth:`results() <SearxSearchWrapper.results>` methods to query the searx API. Other methods are are available for convenience.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-1
Other methods are are available for convenience. :class:`SearxResults` is a convenience wrapper around the raw json result. Example usage of the ``run`` method to make a search: .. code-block:: python s.run(query="what is the best search engine?") Engine Parameters ----------------- You can pass any `accepted searx search API <https://docs.searxng.org/dev/search_api.html>`_ parameters to the :py:class:`SearxSearchWrapper` instance. In the following example we are using the :attr:`engines <SearxSearchWrapper.engines>` and the ``language`` parameters: .. code-block:: python # assuming the searx host is set as above or exported as an env variable s = SearxSearchWrapper(engines=['google', 'bing'], language='es') Search Tips ----------- Searx offers a special `search syntax <https://docs.searxng.org/user/index.html#search-syntax>`_ that can also be used instead of passing engine parameters. For example the following query: .. code-block:: python s = SearxSearchWrapper("langchain library", engines=['github']) # can also be written as: s = SearxSearchWrapper("langchain library !github") # or even: s = SearxSearchWrapper("langchain library !gh") In some situations you might want to pass an extra string to the search query. For example when the `run()` method is called by an agent. The search suffix can also be used as a way to pass extra parameters to searx or the underlying search engines. .. code-block:: python # select the github engine and pass the search suffix
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-2
.. code-block:: python # select the github engine and pass the search suffix s = SearchWrapper("langchain library", query_suffix="!gh") s = SearchWrapper("langchain library") # select github the conventional google search syntax s.run("large language models", query_suffix="site:github.com") *NOTE*: A search suffix can be defined on both the instance and the method level. The resulting query will be the concatenation of the two with the former taking precedence. See `SearxNG Configured Engines <https://docs.searxng.org/admin/engines/configured_engines.html>`_ and `SearxNG Search Syntax <https://docs.searxng.org/user/index.html#id1>`_ for more details. Notes ----- This wrapper is based on the SearxNG fork https://github.com/searxng/searxng which is better maintained than the original Searx project and offers more features. Public searxNG instances often use a rate limiter for API usage, so you might want to use a self hosted instance and disable the rate limiter. If you are self-hosting an instance you can customize the rate limiter for your own network as described `here <https://github.com/searxng/searxng/pull/2129>`_. For a list of public SearxNG instances see https://searx.space/ """ import json from typing import Any, Dict, List, Optional import requests from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator, validator from langchain.utils import get_from_dict_or_env def _get_default_params() -> dict: return {"language": "en", "format": "json"}
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-3
return {"language": "en", "format": "json"} [docs]class SearxResults(dict): """Dict like wrapper around search api results.""" _data = "" def __init__(self, data: str): """Take a raw result from Searx and make it into a dict like object.""" json_data = json.loads(data) super().__init__(json_data) self.__dict__ = self def __str__(self) -> str: """Text representation of searx result.""" return self._data @property def results(self) -> Any: """Silence mypy for accessing this field. :meta private: """ return self.get("results") @property def answers(self) -> Any: """Helper accessor on the json result.""" return self.get("answers") [docs]class SearxSearchWrapper(BaseModel): """Wrapper for Searx API. To use you need to provide the searx host by passing the named parameter ``searx_host`` or exporting the environment variable ``SEARX_HOST``. In some situations you might want to disable SSL verification, for example if you are running searx locally. You can do this by passing the named parameter ``unsecure``. You can also pass the host url scheme as ``http`` to disable SSL. Example: .. code-block:: python from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://localhost:8888") Example with SSL disabled: .. code-block:: python from langchain.utilities import SearxSearchWrapper
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-4
.. code-block:: python from langchain.utilities import SearxSearchWrapper # note the unsecure parameter is not needed if you pass the url scheme as # http searx = SearxSearchWrapper(searx_host="http://localhost:8888", unsecure=True) """ _result: SearxResults = PrivateAttr() searx_host: str = "" unsecure: bool = False params: dict = Field(default_factory=_get_default_params) headers: Optional[dict] = None engines: Optional[List[str]] = [] query_suffix: Optional[str] = "" k: int = 10 @validator("unsecure") def disable_ssl_warnings(cls, v: bool) -> bool: """Disable SSL warnings.""" if v: # requests.urllib3.disable_warnings() try: import urllib3 urllib3.disable_warnings() except ImportError as e: print(e) return v @root_validator() def validate_params(cls, values: Dict) -> Dict: """Validate that custom searx params are merged with default ones.""" user_params = values["params"] default = _get_default_params() values["params"] = {**default, **user_params} engines = values.get("engines") if engines: values["params"]["engines"] = ",".join(engines) searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST") if not searx_host.startswith("http"): print( f"Warning: missing the url scheme on host \
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-5
print( f"Warning: missing the url scheme on host \ ! assuming secure https://{searx_host} " ) searx_host = "https://" + searx_host elif searx_host.startswith("http://"): values["unsecure"] = True cls.disable_ssl_warnings(True) values["searx_host"] = searx_host return values class Config: """Configuration for this pydantic object.""" extra = Extra.forbid def _searx_api_query(self, params: dict) -> SearxResults: """Actual request to searx API.""" raw_result = requests.get( self.searx_host, headers=self.headers, params=params, verify=not self.unsecure, ) # test if http result is ok if not raw_result.ok: raise ValueError("Searx API returned an error: ", raw_result.text) res = SearxResults(raw_result.text) self._result = res return res [docs] def run( self, query: str, engines: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> str: """Run query through Searx API and parse results. You can pass any other params to the searx query API. Args: query: The query to search for. query_suffix: Extra suffix appended to the query. engines: List of engines to use for the query. **kwargs: extra parameters to pass to the searx API. Example: This will make a query to the qwant engine:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-6
Example: This will make a query to the qwant engine: .. code-block:: python from langchain.utilities import SearxSearchWrapper searx = SearxSearchWrapper(searx_host="http://my.searx.host") searx.run("what is the weather in France ?", engine="qwant") # the same result can be achieved using the `!` syntax of searx # to select the engine using `query_suffix` searx.run("what is the weather in France ?", query_suffix="!qwant") """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) res = self._searx_api_query(params) if len(res.answers) > 0: toret = res.answers[0] # only return the content of the results list elif len(res.results) > 0: toret = "\n\n".join([r.get("content", "") for r in res.results[: self.k]]) else: toret = "No good search result found" return toret [docs] def results( self, query: str, num_results: int, engines: Optional[List[str]] = None,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-7
num_results: int, engines: Optional[List[str]] = None, query_suffix: Optional[str] = "", **kwargs: Any, ) -> List[Dict]: """Run query through Searx API and returns the results with metadata. Args: query: The query to search for. query_suffix: Extra suffix appended to the query. num_results: Limit the number of results to return. engines: List of engines to use for the query. **kwargs: extra parameters to pass to the searx API. Returns: Dict with the following keys: { snippet: The description of the result. title: The title of the result. link: The link to the result. engines: The engines used for the result. category: Searx category of the result. } """ _params = { "q": query, } params = {**self.params, **_params, **kwargs} if self.query_suffix and len(self.query_suffix) > 0: params["q"] += " " + self.query_suffix if isinstance(query_suffix, str) and len(query_suffix) > 0: params["q"] += " " + query_suffix if isinstance(engines, list) and len(engines) > 0: params["engines"] = ",".join(engines) results = self._searx_api_query(params).results[:num_results] if len(results) == 0: return [{"Result": "No good Search Result was found"}] return [ { "snippet": result.get("content", ""), "title": result["title"],
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
52c462cf998c-8
"snippet": result.get("content", ""), "title": result["title"], "link": result["url"], "engines": result["engines"], "category": result["category"], } for result in results ] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\searx_search.html"
a609622184ad-0
Source code for langchain.utilities.serpapi """Chain that calls SerpAPI. Heavily borrowed from https://github.com/ofirpress/self-ask """ import os import sys from typing import Any, Dict, Optional, Tuple import aiohttp from pydantic import BaseModel, Extra, Field, root_validator from langchain.utils import get_from_dict_or_env class HiddenPrints: """Context manager to hide prints.""" def __enter__(self) -> None: """Open file to pipe stdout to.""" self._original_stdout = sys.stdout sys.stdout = open(os.devnull, "w") def __exit__(self, *_: Any) -> None: """Close file that stdout was piped to.""" sys.stdout.close() sys.stdout = self._original_stdout [docs]class SerpAPIWrapper(BaseModel): """Wrapper around SerpAPI. To use, you should have the ``google-search-results`` python package installed, and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass `serpapi_api_key` as a named parameter to the constructor. Example: .. code-block:: python from langchain import SerpAPIWrapper serpapi = SerpAPIWrapper() """ search_engine: Any #: :meta private: params: dict = Field( default={ "engine": "google", "google_domain": "google.com", "gl": "us", "hl": "en", } ) serpapi_api_key: Optional[str] = None aiosession: Optional[aiohttp.ClientSession] = None class Config:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\serpapi.html"
a609622184ad-1
aiosession: Optional[aiohttp.ClientSession] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" serpapi_api_key = get_from_dict_or_env( values, "serpapi_api_key", "SERPAPI_API_KEY" ) values["serpapi_api_key"] = serpapi_api_key try: from serpapi import GoogleSearch values["search_engine"] = GoogleSearch except ImportError: raise ValueError( "Could not import serpapi python package. " "Please it install it with `pip install google-search-results`." ) return values [docs] async def arun(self, query: str) -> str: """Use aiohttp to run query through SerpAPI and parse result.""" def construct_url_and_params() -> Tuple[str, Dict[str, str]]: params = self.get_params(query) params["source"] = "python" if self.serpapi_api_key: params["serp_api_key"] = self.serpapi_api_key params["output"] = "json" url = "https://serpapi.com/search" return url, params url, params = construct_url_and_params() if not self.aiosession: async with aiohttp.ClientSession() as session: async with session.get(url, params=params) as response: res = await response.json() else: async with self.aiosession.get(url, params=params) as response:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\serpapi.html"
a609622184ad-2
else: async with self.aiosession.get(url, params=params) as response: res = await response.json() return self._process_response(res) [docs] def run(self, query: str) -> str: """Run query through SerpAPI and parse result.""" return self._process_response(self.results(query)) [docs] def results(self, query: str) -> dict: """Run query through SerpAPI and return the raw result.""" params = self.get_params(query) with HiddenPrints(): search = self.search_engine(params) res = search.get_dict() return res [docs] def get_params(self, query: str) -> Dict[str, str]: """Get parameters for SerpAPI.""" _params = { "api_key": self.serpapi_api_key, "q": query, } params = {**self.params, **_params} return params @staticmethod def _process_response(res: dict) -> str: """Process response from SerpAPI.""" if "error" in res.keys(): raise ValueError(f"Got error from SerpAPI: {res['error']}") if "answer_box" in res.keys() and "answer" in res["answer_box"].keys(): toret = res["answer_box"]["answer"] elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys(): toret = res["answer_box"]["snippet"] elif ( "answer_box" in res.keys() and "snippet_highlighted_words" in res["answer_box"].keys() ): toret = res["answer_box"]["snippet_highlighted_words"][0]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\serpapi.html"
a609622184ad-3
): toret = res["answer_box"]["snippet_highlighted_words"][0] elif ( "sports_results" in res.keys() and "game_spotlight" in res["sports_results"].keys() ): toret = res["sports_results"]["game_spotlight"] elif ( "knowledge_graph" in res.keys() and "description" in res["knowledge_graph"].keys() ): toret = res["knowledge_graph"]["description"] elif "snippet" in res["organic_results"][0].keys(): toret = res["organic_results"][0]["snippet"] else: toret = "No good search result found" return toret By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\utilities\\serpapi.html"
9b72be4dc957-0
Source code for langchain.vectorstores.atlas """Wrapper around Atlas by Nomic.""" from __future__ import annotations import logging import uuid from typing import Any, Iterable, List, Optional import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger() [docs]class AtlasDB(VectorStore): """Wrapper around Atlas: Nomic's neural database and rhizomatic instrument. To use, you should have the ``nomic`` python package installed. Example: .. code-block:: python from langchain.vectorstores import AtlasDB from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = AtlasDB("my_project", embeddings.embed_query) """ _ATLAS_DEFAULT_ID_FIELD = "atlas_id" def __init__( self, name: str, embedding_function: Optional[Embeddings] = None, api_key: Optional[str] = None, description: str = "A description for your project", is_public: bool = True, reset_project_if_exists: bool = False, ) -> None: """ Initialize the Atlas Client Args: name (str): The name of your project. If the project already exists, it will be loaded. embedding_function (Optional[Callable]): An optional function used for embedding your data. If None, data will be embedded with Nomic's embed model. api_key (str): Your nomic API key description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
9b72be4dc957-1
is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally userful during development and testing. """ try: import nomic from nomic import AtlasProject except ImportError: raise ValueError( "Could not import nomic python package. " "Please install it with `pip install nomic`." ) if api_key is None: raise ValueError("No API key provided. Sign up at atlas.nomic.ai!") nomic.login(api_key) self._embedding_function = embedding_function modality = "text" if self._embedding_function is not None: modality = "embedding" # Check if the project exists, create it if not self.project = AtlasProject( name=name, description=description, modality=modality, is_public=is_public, reset_project_if_exists=reset_project_if_exists, unique_id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, ) self.project._latest_project_state() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, refresh: bool = True, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
9b72be4dc957-2
metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]]): An optional list of ids. refresh(bool): Whether or not to refresh indices with the updated data. Default True. Returns: List[str]: List of IDs of the added texts. """ if ( metadatas is not None and len(metadatas) > 0 and "text" in metadatas[0].keys() ): raise ValueError("Cannot accept key text in metadata!") texts = list(texts) if ids is None: ids = [str(uuid.uuid1()) for _ in texts] # Embedding upload case if self._embedding_function is not None: _embeddings = self._embedding_function.embed_documents(texts) embeddings = np.stack(_embeddings) if metadatas is None: data = [ {AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i], "text": texts[i]} for i, _ in enumerate(texts) ] else: for i in range(len(metadatas)): metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] metadatas[i]["text"] = texts[i] data = metadatas self.project._validate_map_data_inputs( [], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data ) with self.project.wait_for_project_lock(): self.project.add_embeddings(embeddings=embeddings, data=data) # Text upload case else: if metadatas is None: data = [
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
9b72be4dc957-3
else: if metadatas is None: data = [ {"text": text, AtlasDB._ATLAS_DEFAULT_ID_FIELD: ids[i]} for i, text in enumerate(texts) ] else: for i, text in enumerate(texts): metadatas[i]["text"] = texts metadatas[i][AtlasDB._ATLAS_DEFAULT_ID_FIELD] = ids[i] data = metadatas self.project._validate_map_data_inputs( [], id_field=AtlasDB._ATLAS_DEFAULT_ID_FIELD, data=data ) with self.project.wait_for_project_lock(): self.project.add_text(data) if refresh: if len(self.project.indices) > 0: with self.project.wait_for_project_lock(): self.project.rebuild_maps() return ids [docs] def create_index(self, **kwargs: Any) -> Any: """Creates an index in your project. See https://docs.nomic.ai/atlas_api.html#nomic.project.AtlasProject.create_index for full detail. """ with self.project.wait_for_project_lock(): return self.project.create_index(**kwargs) [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Document]: """Run similarity search with AtlasDB Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. Returns: List[Document]: List of documents most similar to the query text. """ if self._embedding_function is None: raise NotImplementedError(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
9b72be4dc957-4
""" if self._embedding_function is None: raise NotImplementedError( "AtlasDB requires an embedding_function for text similarity search!" ) _embedding = self._embedding_function.embed_documents([query])[0] embedding = np.array(_embedding).reshape(1, -1) with self.project.wait_for_project_lock(): neighbors, _ = self.project.projections[0].vector_search( queries=embedding, k=k ) datas = self.project.get_data(ids=neighbors[0]) docs = [ Document(page_content=datas[i]["text"], metadata=datas[i]) for i, neighbor in enumerate(neighbors) ] return docs [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, description: str = "A description for your project", is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any, ) -> AtlasDB: """Create an AtlasDB vectorstore from a raw documents. Args: texts (List[str]): The list of texts to ingest. name (str): Name of the project to create. api_key (str): Your nomic API key, embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
9b72be4dc957-5
ids (Optional[List[str]]): Optional list of document IDs. If None, ids will be auto created description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]): Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns: AtlasDB: Nomic's neural database and finest rhizomatic instrument """ if name is None or api_key is None: raise ValueError("`name` and `api_key` cannot be None.") # Inject relevant kwargs all_index_kwargs = {"name": name + "_index", "indexed_field": "text"} if index_kwargs is not None: for k, v in index_kwargs.items(): all_index_kwargs[k] = v # Build project atlasDB = cls( name, embedding_function=embedding, api_key=api_key, description="A description for your project", is_public=is_public, reset_project_if_exists=reset_project_if_exists, ) with atlasDB.project.wait_for_project_lock(): atlasDB.add_texts(texts=texts, metadatas=metadatas, ids=ids) atlasDB.create_index(**all_index_kwargs) return atlasDB [docs] @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, name: Optional[str] = None,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
9b72be4dc957-6
ids: Optional[List[str]] = None, name: Optional[str] = None, api_key: Optional[str] = None, persist_directory: Optional[str] = None, description: str = "A description for your project", is_public: bool = True, reset_project_if_exists: bool = False, index_kwargs: Optional[dict] = None, **kwargs: Any, ) -> AtlasDB: """Create an AtlasDB vectorstore from a list of documents. Args: name (str): Name of the collection to create. api_key (str): Your nomic API key, documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. ids (Optional[List[str]]): Optional list of document IDs. If None, ids will be auto created description (str): A description for your project. is_public (bool): Whether your project is publicly accessible. True by default. reset_project_if_exists (bool): Whether to reset this project if it already exists. Default False. Generally userful during development and testing. index_kwargs (Optional[dict]): Dict of kwargs for index creation. See https://docs.nomic.ai/atlas_api.html Returns: AtlasDB: Nomic's neural database and finest rhizomatic instrument """ if name is None or api_key is None: raise ValueError("`name` and `api_key` cannot be None.") texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return cls.from_texts( name=name, api_key=api_key,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
9b72be4dc957-7
return cls.from_texts( name=name, api_key=api_key, texts=texts, embedding=embedding, metadatas=metadatas, ids=ids, description=description, is_public=is_public, reset_project_if_exists=reset_project_if_exists, index_kwargs=index_kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\atlas.html"
17d08d3b17ca-0
Source code for langchain.vectorstores.base """Interface for vector stores.""" from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, Dict, Iterable, List, Optional from pydantic import BaseModel, Field, root_validator from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever [docs]class VectorStore(ABC): """Interface for vector stores.""" [docs] @abstractmethod def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return self.add_texts(texts, metadatas, **kwargs) [docs] @abstractmethod def similarity_search(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\base.html"
17d08d3b17ca-1
[docs] @abstractmethod def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ raise NotImplementedError [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20 ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20 ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\base.html"
17d08d3b17ca-2
Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> VectorStore: """Return VectorStore initialized from documents and embeddings.""" texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs) [docs] @classmethod @abstractmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VectorStore: """Return VectorStore initialized from texts and embeddings.""" [docs] def as_retriever(self) -> VectorStoreRetriever: return VectorStoreRetriever(vectorstore=self) class VectorStoreRetriever(BaseRetriever, BaseModel): vectorstore: VectorStore search_type: str = "similarity" search_kwargs: dict = Field(default_factory=dict) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\base.html"
17d08d3b17ca-3
if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "mmr"): raise ValueError(f"search_type of {search_type} not allowed.") return values def get_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, **self.search_kwargs) elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\base.html"
81474cd63794-0
Source code for langchain.vectorstores.chroma """Wrapper around ChromaDB embeddings platform.""" from __future__ import annotations import logging import uuid from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore if TYPE_CHECKING: import chromadb import chromadb.config logger = logging.getLogger() def _results_to_docs(results: Any) -> List[Document]: return [doc for doc, _ in _results_to_docs_and_scores(results)] def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]: return [ # TODO: Chroma can do batch querying, # we shouldn't hard code to the 1st result (Document(page_content=result[0], metadata=result[1] or {}), result[2]) for result in zip( results["documents"][0], results["metadatas"][0], results["distances"][0], ) ] [docs]class Chroma(VectorStore): """Wrapper around ChromaDB embeddings platform. To use, you should have the ``chromadb`` python package installed. Example: .. code-block:: python from langchain.vectorstores import Chroma from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = Chroma("langchain_store", embeddings.embed_query) """ _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" def __init__( self, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\chroma.html"
81474cd63794-1
self, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, embedding_function: Optional[Embeddings] = None, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, ) -> None: """Initialize with Chroma client.""" try: import chromadb import chromadb.config except ImportError: raise ValueError( "Could not import chromadb python package. " "Please install it with `pip install chromadb`." ) if client_settings: self._client_settings = client_settings else: self._client_settings = chromadb.config.Settings() if persist_directory is not None: self._client_settings = chromadb.config.Settings( chroma_db_impl="duckdb+parquet", persist_directory=persist_directory ) self._client = chromadb.Client(self._client_settings) self._embedding_function = embedding_function self._persist_directory = persist_directory self._collection = self._client.get_or_create_collection( name=collection_name, embedding_function=self._embedding_function.embed_documents if self._embedding_function is not None else None, ) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\chroma.html"
81474cd63794-2
metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = None if self._embedding_function is not None: embeddings = self._embedding_function.embed_documents(list(texts)) self._collection.add( metadatas=metadatas, embeddings=embeddings, documents=texts, ids=ids ) return ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with Chroma. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Document]: List of documents most simmilar to the query text. """ docs_and_scores = self.similarity_search_with_score(query, k, where=filter) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Document]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\chroma.html"
81474cd63794-3
**kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ results = self._collection.query( query_embeddings=embedding, n_results=k, where=filter ) return _results_to_docs(results) [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[Dict[str, str]] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Run similarity search with Chroma with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List[Tuple[Document, float]]: List of documents most similar to the query text with distance in float. """ if self._embedding_function is None: results = self._collection.query( query_texts=[query], n_results=k, where=filter ) else: query_embedding = self._embedding_function.embed_query(query) results = self._collection.query( query_embeddings=[query_embedding], n_results=k, where=filter ) return _results_to_docs_and_scores(results) [docs] def delete_collection(self) -> None: """Delete the collection.""" self._client.delete_collection(self._collection.name)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\chroma.html"
81474cd63794-4
"""Delete the collection.""" self._client.delete_collection(self._collection.name) [docs] def persist(self) -> None: """Persist the collection. This can be used to explicitly persist the data to disk. It will also be called automatically when the object is destroyed. """ if self._persist_directory is None: raise ValueError( "You must specify a persist_directory on" "creation to persist the collection." ) self._client.persist() [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, **kwargs: Any, ) -> Chroma: """Create a Chroma vectorstore from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: texts (List[str]): List of texts to add to the collection. collection_name (str): Name of the collection to create. persist_directory (Optional[str]): Directory to persist the collection. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. client_settings (Optional[chromadb.config.Settings]): Chroma client settings Returns:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\chroma.html"
81474cd63794-5
client_settings (Optional[chromadb.config.Settings]): Chroma client settings Returns: Chroma: Chroma vectorstore. """ chroma_collection = cls( collection_name=collection_name, embedding_function=embedding, persist_directory=persist_directory, client_settings=client_settings, ) chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids) return chroma_collection [docs] @classmethod def from_documents( cls, documents: List[Document], embedding: Optional[Embeddings] = None, ids: Optional[List[str]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, persist_directory: Optional[str] = None, client_settings: Optional[chromadb.config.Settings] = None, **kwargs: Any, ) -> Chroma: """Create a Chroma vectorstore from a list of documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: collection_name (str): Name of the collection to create. persist_directory (Optional[str]): Directory to persist the collection. ids (Optional[List[str]]): List of document IDs. Defaults to None. documents (List[Document]): List of documents to add to the vectorstore. embedding (Optional[Embeddings]): Embedding function. Defaults to None. client_settings (Optional[chromadb.config.Settings]): Chroma client settings Returns: Chroma: Chroma vectorstore. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\chroma.html"
81474cd63794-6
metadatas = [doc.metadata for doc in documents] return cls.from_texts( texts=texts, embedding=embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, persist_directory=persist_directory, client_settings=client_settings, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\chroma.html"
7b984a41adf3-0
Source code for langchain.vectorstores.deeplake """Wrapper around Activeloop Deep Lake.""" from __future__ import annotations import logging import uuid from typing import Any, Iterable, List, Optional, Sequence import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger() def L2_search( query_embedding: np.ndarray, data_vectors: np.ndarray, k: int = 4 ) -> list: """naive L2 search for nearest neighbors""" # Calculate the L2 distance between the query_vector and all data_vectors distances = np.linalg.norm(data_vectors - query_embedding, axis=1) # Sort the distances and return the indices of the k nearest vectors nearest_indices = np.argsort(distances)[:k] return nearest_indices.tolist() [docs]class DeepLake(VectorStore): """Wrapper around Deep Lake, a data lake for deep learning applications. It not only stores embeddings, but also the original data and queries with version control automatically enabled. It is more than just a vector store. You can use the dataset to fine-tune your own LLM models or use it for other downstream tasks. We implement naive similiarity search, but it can be extended with Tensor Query Language (TQL for production use cases) over billion rows. To use, you should have the ``deeplake`` python package installed. Example: .. code-block:: python from langchain.vectorstores import DeepLake from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings() vectorstore = DeepLake("langchain_store", embeddings.embed_query) """
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\deeplake.html"
7b984a41adf3-1
vectorstore = DeepLake("langchain_store", embeddings.embed_query) """ _LANGCHAIN_DEFAULT_DEEPLAKE_PATH = "mem://langchain" def __init__( self, dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, token: Optional[str] = None, embedding_function: Optional[Embeddings] = None, ) -> None: """Initialize with Deep Lake client.""" try: import deeplake except ImportError: raise ValueError( "Could not import deeplake python package. " "Please install it with `pip install deeplake`." ) self._deeplake = deeplake if deeplake.exists(dataset_path, token=token): self.ds = deeplake.load(dataset_path, token=token) logger.warning( f"Deep Lake Dataset in {dataset_path} already exists, " f"loading from the storage" ) self.ds.summary() else: self.ds = deeplake.empty(dataset_path, token=token, overwrite=True) with self.ds: self.ds.create_tensor("text", htype="text") self.ds.create_tensor("metadata", htype="json") self.ds.create_tensor("embedding", htype="generic") self.ds.create_tensor("ids", htype="text") self._embedding_function = embedding_function [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\deeplake.html"
7b984a41adf3-2
**kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts (Iterable[str]): Texts to add to the vectorstore. metadatas (Optional[List[dict]], optional): Optional list of metadatas. ids (Optional[List[str]], optional): Optional list of IDs. Returns: List[str]: List of IDs of the added texts. """ if ids is None: ids = [str(uuid.uuid1()) for _ in texts] text_list = list(texts) if self._embedding_function is None: embeddings: Sequence[Optional[List[float]]] = [None] * len(text_list) else: embeddings = self._embedding_function.embed_documents(text_list) if metadatas is None: metadatas_to_use: Sequence[Optional[dict]] = [None] * len(text_list) else: metadatas_to_use = metadatas elements = zip(text_list, embeddings, metadatas_to_use, ids) @self._deeplake.compute def ingest(sample_in: list, sample_out: list) -> None: s = { "text": sample_in[0], "embedding": sample_in[1], "metadata": sample_in[2], "ids": sample_in[3], } sample_out.append(s) ingest().eval(list(elements), self.ds) self.ds.commit() return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\deeplake.html"
7b984a41adf3-3
) -> List[Document]: """Return docs most similar to query.""" if self._embedding_function is None: self.ds.summary() ds_view = self.ds.filter(lambda x: query in x["text"].data()["value"]) else: query_emb = np.array(self._embedding_function.embed_query(query)) embeddings = self.ds.embedding.numpy() indices = L2_search(query_emb, embeddings, k=k) ds_view = self.ds[indices] docs = [ Document( page_content=el["text"].data()["value"], metadata=el["metadata"].data()["value"], ) for el in ds_view ] return docs [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Optional[Embeddings] = None, metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, dataset_path: str = _LANGCHAIN_DEFAULT_DEEPLAKE_PATH, **kwargs: Any, ) -> DeepLake: """Create a Deep Lake dataset from a raw documents. If a persist_directory is specified, the collection will be persisted there. Otherwise, the data will be ephemeral in-memory. Args: path (str, pathlib.Path): - The full path to the dataset. Can be: - a Deep Lake cloud path of the form ``hub://username/datasetname``. To write to Deep Lake cloud datasets, ensure that you are logged in to Deep Lake (use 'activeloop login' from command line) - an s3 path of the form ``s3://bucketname/path/to/dataset``.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\deeplake.html"
7b984a41adf3-4
Credentials are required in either the environment or passed to the creds argument. - a local file system path of the form ``./path/to/dataset`` or ``~/path/to/dataset`` or ``path/to/dataset``. - a memory path of the form ``mem://path/to/dataset`` which doesn't save the dataset but keeps it in memory instead. Should be used only for testing as it does not persist. documents (List[Document]): List of documents to add. embedding (Optional[Embeddings]): Embedding function. Defaults to None. metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. ids (Optional[List[str]]): List of document IDs. Defaults to None. Returns: DeepLake: Deep Lake dataset. """ deeplake_dataset = cls( dataset_path=dataset_path, embedding_function=embedding, ) deeplake_dataset.add_texts(texts=texts, metadatas=metadatas, ids=ids) return deeplake_dataset [docs] def delete_dataset(self) -> None: """Delete the collection.""" self.ds.delete() [docs] def persist(self) -> None: """Persist the collection.""" self.ds.flush() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\deeplake.html"
4af1ace75551-0
Source code for langchain.vectorstores.elastic_vector_search """Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from typing import Any, Dict, Iterable, List, Optional from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore def _default_text_mapping(dim: int) -> Dict: return { "properties": { "text": {"type": "text"}, "vector": {"type": "dense_vector", "dims": dim}, } } def _default_script_query(query_vector: List[float]) -> Dict: return { "script_score": { "query": {"match_all": {}}, "script": { "source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", "params": {"query_vector": query_vector}, }, } } [docs]class ElasticVectorSearch(VectorStore): """Wrapper around Elasticsearch as a vector database. Example: .. code-block:: python from langchain import ElasticVectorSearch elastic_vector_search = ElasticVectorSearch( "http://localhost:9200", "embeddings", embedding ) """ def __init__(self, elasticsearch_url: str, index_name: str, embedding: Embeddings): """Initialize with necessary components.""" try: import elasticsearch except ImportError: raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\elastic_vector_search.html"
4af1ace75551-1
) self.embedding = embedding self.index_name = index_name try: es_client = elasticsearch.Elasticsearch(elasticsearch_url) # noqa except ValueError as e: raise ValueError( f"Your elasticsearch client string is misformatted. Got error: {e} " ) self.client = es_client [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.helpers import bulk except ImportError: raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) requests = [] ids = [] embeddings = self.embedding.embed_documents(list(texts)) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} _id = str(uuid.uuid4()) request = { "_op_type": "index", "_index": self.index_name, "vector": embeddings[i], "text": text, "metadata": metadata, "_id": _id, } ids.append(_id) requests.append(request) bulk(self.client, requests) # TODO: add option not to refresh
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\elastic_vector_search.html"
4af1ace75551-2
bulk(self.client, requests) # TODO: add option not to refresh self.client.indices.refresh(index=self.index_name) return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(query) script_query = _default_script_query(embedding) response = self.client.search(index=self.index_name, query=script_query) hits = [hit["_source"] for hit in response["hits"]["hits"][:k]] documents = [ Document(page_content=hit["text"], metadata=hit["metadata"]) for hit in hits ] return documents [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\elastic_vector_search.html"
4af1ace75551-3
from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = get_from_dict_or_env( kwargs, "elasticsearch_url", "ELASTICSEARCH_URL" ) try: import elasticsearch from elasticsearch.helpers import bulk except ImportError: raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticearch`." ) try: client = elasticsearch.Elasticsearch(elasticsearch_url) except ValueError as e: raise ValueError( "Your elasticsearch client string is misformatted. " f"Got error: {e} " ) index_name = uuid.uuid4().hex embeddings = embedding.embed_documents(texts) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) # TODO would be nice to create index before embedding, # just to save expensive steps for last client.indices.create(index=index_name, mappings=mapping) requests = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = { "_op_type": "index", "_index": index_name, "vector": embeddings[i], "text": text, "metadata": metadata, } requests.append(request) bulk(client, requests) client.indices.refresh(index=index_name) return cls(elasticsearch_url, index_name, embedding) By Harrison Chase
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\elastic_vector_search.html"
4af1ace75551-4
return cls(elasticsearch_url, index_name, embedding) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\elastic_vector_search.html"
e38139c3030c-0
Source code for langchain.vectorstores.faiss """Wrapper around FAISS vector database.""" from __future__ import annotations import pickle import uuid from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def dependable_faiss_import() -> Any: """Import faiss if available, otherwise raise error.""" try: import faiss except ImportError: raise ValueError( "Could not import faiss python package. " "Please install it with `pip install faiss` " "or `pip install faiss-cpu` (depending on Python version)." ) return faiss [docs]class FAISS(VectorStore): """Wrapper around FAISS vector database. To use, you should have the ``faiss`` python package installed. Example: .. code-block:: python from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) """ def __init__( self, embedding_function: Callable, index: Any, docstore: Docstore, index_to_docstore_id: Dict[int, str], ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id def __add(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-1
self.index_to_docstore_id = index_to_docstore_id def __add( self, texts: Iterable[str], embeddings: Iterable[List[float]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) # Add to the index, the index_to_id mapping, and the docstore. starting_len = len(self.index_to_docstore_id) self.index.add(np.array(embeddings, dtype=np.float32)) # Get list of index, id, and docs. full_info = [ (starting_len + i, str(uuid.uuid4()), doc) for i, doc in enumerate(documents) ] # Add information to docstore and index. self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) return [_id for _, _id, _ in full_info] [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-2
**kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) # Embed and create the documents. embeddings = [self.embedding_function(text) for text in texts] return self.__add(texts, embeddings, metadatas, **kwargs) [docs] def add_embeddings( self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) # Embed and create the documents. texts = [te[0] for te in text_embeddings]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-3
texts = [te[0] for te in text_embeddings] embeddings = [te[1] for te in text_embeddings] return self.__add(texts, embeddings, metadatas, **kwargs) [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k) docs = [] for j, i in enumerate(indices[0]): if i == -1: # This happens when not enough docs are returned. continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, scores[0][j])) return docs [docs] def similarity_search_with_score( self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-4
""" embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k) return [doc for doc, _ in docs_and_scores] [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20 ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-5
Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. Returns: List of Documents selected by maximal marginal relevance. """ _, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k) # -1 happens when not enough docs are returned. embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k ) selected_indices = [indices[0][i] for i in mmr_selected] docs = [] for i in selected_indices: if i == -1: # This happens when not enough docs are returned. continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20 ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-6
fetch_k: Number of Documents to fetch to pass to MMR algorithm. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector(embedding, k, fetch_k) return docs [docs] def merge_from(self, target: FAISS) -> None: """Merge another FAISS object with the current one. Add the target FAISS to the current one. Args: target: FAISS object you wish to merge into the current one Returns: None. """ if not isinstance(self.docstore, AddableMixin): raise ValueError("Cannot merge with this type of docstore") # Numerical index for target docs are incremental on existing ones starting_len = len(self.index_to_docstore_id) # Merge two IndexFlatL2 self.index.merge_from(target.index) # Create new id for docs from target FAISS object full_info = [] for i in target.index_to_docstore_id: doc = target.docstore.search(target.index_to_docstore_id[i]) if not isinstance(doc, Document): raise ValueError("Document should be returned") full_info.append((starting_len + i, str(uuid.uuid4()), doc)) # Add information to docstore and index_to_docstore_id. self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]],
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-7
cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: faiss = dependable_faiss_import() index = faiss.IndexFlatL2(len(embeddings[0])) index.add(np.array(embeddings, dtype=np.float32)) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, docstore, index_to_id) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-8
""" embeddings = embedding.embed_documents(texts) return cls.__from(texts, embeddings, embedding, metadatas, **kwargs) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from(texts, embeddings, embedding, metadatas, **kwargs) [docs] def save_local(self, folder_path: str) -> None: """Save FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. """ path = Path(folder_path) path.mkdir(exist_ok=True, parents=True) # save index separately since it is not picklable faiss = dependable_faiss_import() faiss.write_index(self.index, str(path / "index.faiss"))
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"
e38139c3030c-9
faiss.write_index(self.index, str(path / "index.faiss")) # save docstore and index_to_docstore_id with open(path / "index.pkl", "wb") as f: pickle.dump((self.docstore, self.index_to_docstore_id), f) [docs] @classmethod def load_local(cls, folder_path: str, embeddings: Embeddings) -> FAISS: """Load FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries """ path = Path(folder_path) # load index separately since it is not picklable faiss = dependable_faiss_import() index = faiss.read_index(str(path / "index.faiss")) # load docstore and index_to_docstore_id with open(path / "index.pkl", "rb") as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embeddings.embed_query, index, docstore, index_to_docstore_id) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\faiss.html"