id
stringlengths
14
16
text
stringlengths
45
2.05k
source
stringlengths
53
111
e07ee81b056c-1
retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _completion_with_retry(**kwargs) [docs]class OpenAIEmbeddings(BaseModel, Embeddings): """Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and optionally and API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" from langchain.embeddings.openai import OpenAIEmbeddings embeddings = OpenAIEmbeddings(model="your-embeddings-deployment-name") text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any #: :meta private: model: str = "text-embedding-ada-002"
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\openai.html"
e07ee81b056c-2
model: str = "text-embedding-ada-002" # TODO: deprecate these two in favor of model # https://community.openai.com/t/api-update-engines-models/18597 # https://github.com/openai/openai-python/issues/132 document_model_name: str = "text-embedding-ada-002" query_model_name: str = "text-embedding-ada-002" embedding_ctx_length: int = -1 openai_api_key: Optional[str] = None chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid # TODO: deprecate this @root_validator(pre=True) def get_model_names(cls, values: Dict) -> Dict: # model_name is for first generation, and model is for second generation. # Both are not allowed together. if "model_name" in values and "model" in values: raise ValueError( "Both `model_name` and `model` were provided, " "but only one should be." ) """Get model names from just old model name.""" if "model_name" in values: if "document_model_name" in values: raise ValueError( "Both `model_name` and `document_model_name` were provided, " "but only one should be." ) if "query_model_name" in values: raise ValueError( "Both `model_name` and `query_model_name` were provided, "
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\openai.html"
e07ee81b056c-3
"Both `model_name` and `query_model_name` were provided, " "but only one should be." ) model_name = values.pop("model_name") values["document_model_name"] = f"text-search-{model_name}-doc-001" values["query_model_name"] = f"text-search-{model_name}-query-001" # Set document/query model names from model parameter. if "model" in values: if "document_model_name" in values: raise ValueError( "Both `model` and `document_model_name` were provided, " "but only one should be." ) if "query_model_name" in values: raise ValueError( "Both `model` and `query_model_name` were provided, " "but only one should be." ) model = values.get("model") values["document_model_name"] = model values["query_model_name"] = model return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) try: import openai openai.api_key = openai_api_key values["client"] = openai.Embedding except ImportError: raise ValueError( "Could not import openai python package. " "Please it install it with `pip install openai`." ) return values # please refer to # https://github.com/openai/openai-cookbook/blob/main/examples/Embedding_long_inputs.ipynb
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\openai.html"
e07ee81b056c-4
def _get_len_safe_embeddings( self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for i in range(len(texts))] try: import tiktoken tokens = [] indices = [] encoding = tiktoken.model.encoding_for_model(self.document_model_name) for i, text in enumerate(texts): # replace newlines, which can negatively affect performance. text = text.replace("\n", " ") token = encoding.encode(text) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self, input=tokens[i : i + _chunk_size], engine=self.document_model_name, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for i in range(len(texts))] lens: List[List[int]] = [[] for i in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) lens[indices[i]].append(len(batched_embeddings[i])) for i in range(len(texts)): average = np.average(results[i], axis=0, weights=lens[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings except ImportError: raise ValueError(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\openai.html"
e07ee81b056c-5
return embeddings except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please it install it with `pip install tiktoken`." ) def _embedding_func(self, text: str, *, engine: str) -> List[float]: """Call out to OpenAI's embedding endpoint.""" # replace newlines, which can negatively affect performance. if self.embedding_ctx_length > 0: return self._get_len_safe_embeddings([text], engine=engine)[0] else: text = text.replace("\n", " ") return embed_with_retry(self, input=[text], engine=engine)["data"][0][ "embedding" ] [docs] def embed_documents( self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ # handle large batches of texts if self.embedding_ctx_length > 0: return self._get_len_safe_embeddings(texts, engine=self.document_model_name) else: results = [] _chunk_size = chunk_size or self.chunk_size for i in range(0, len(texts), _chunk_size): response = embed_with_retry( self, input=texts[i : i + _chunk_size], engine=self.document_model_name, )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\openai.html"
e07ee81b056c-6
engine=self.document_model_name, ) results += [r["embedding"] for r in response["data"]] return results [docs] def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self._embedding_func(text, engine=self.query_model_name) return embedding By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\openai.html"
1e23973787eb-0
Source code for langchain.embeddings.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.llms.sagemaker_endpoint import ContentHandlerBase [docs]class SagemakerEndpointEmbeddings(BaseModel, Embeddings): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain.embeddings import SagemakerEndpointEmbeddings endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpointEmbeddings( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\sagemaker_endpoint.html"
1e23973787eb-1
"""The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: ContentHandlerBase """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python from langchain.llms.sagemaker_endpoint import ContentHandlerBase class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\sagemaker_endpoint.html"
1e23973787eb-2
endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """ class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that AWS credentials to and python package exists in environment.""" try: import boto3 try: if values["credentials_profile_name"] is not None: session = boto3.Session( profile_name=values["credentials_profile_name"] ) else: # use default credentials session = boto3.Session() values["client"] = session.client( "sagemaker-runtime", region_name=values["region_name"] ) except Exception as e: raise ValueError( "Could not load credentials to authenticate with AWS client. " "Please check that credentials in the specified " "profile name are valid." ) from e except ImportError: raise ValueError( "Could not import boto3 python package. " "Please it install it with `pip install boto3`." ) return values def _embedding_func(self, texts: List[str]) -> List[float]: """Call out to SageMaker Inference embedding endpoint.""" # replace newlines, which can negatively affect performance. texts = list(map(lambda x: x.replace("\n", " "), texts)) _model_kwargs = self.model_kwargs or {} _endpoint_kwargs = self.endpoint_kwargs or {}
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\sagemaker_endpoint.html"
1e23973787eb-3
_endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(texts, _model_kwargs) content_type = self.content_handler.content_type accepts = self.content_handler.accepts # send request try: response = self.client.invoke_endpoint( EndpointName=self.endpoint_name, Body=body, ContentType=content_type, Accept=accepts, **_endpoint_kwargs, ) except Exception as e: raise ValueError(f"Error raised by inference endpoint: {e}") return self.content_handler.transform_output(response["Body"]) [docs] def embed_documents( self, texts: List[str], chunk_size: int = 64 ) -> List[List[float]]: """Compute doc embeddings using a SageMaker Inference Endpoint. Args: texts: The list of texts to embed. chunk_size: The chunk size defines how many input texts will be grouped together as request. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ results = [] _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size for i in range(0, len(texts), _chunk_size): response = self._embedding_func(texts[i : i + _chunk_size]) results.append(response) return results [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a SageMaker inference endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ return self._embedding_func([text]) By Harrison Chase
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\sagemaker_endpoint.html"
1e23973787eb-4
""" return self._embedding_func([text]) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\sagemaker_endpoint.html"
da086f9930a8-0
Source code for langchain.embeddings.self_hosted """Running custom embedding models on self-hosted remote hardware.""" from typing import Any, Callable, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings from langchain.llms import SelfHostedPipeline def _embed_documents(pipeline: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return pipeline(*args, **kwargs) [docs]class SelfHostedEmbeddings(SelfHostedPipeline, Embeddings, BaseModel): """Runs custom embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example using a model load function: .. code-block:: python from langchain.embeddings import SelfHostedEmbeddings from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import runhouse as rh gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") def get_pipeline(): model_id = "facebook/bart-large" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) return pipeline("feature-extraction", model=model, tokenizer=tokenizer) embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted.html"
da086f9930a8-1
embeddings = SelfHostedEmbeddings( model_load_fn=get_pipeline, hardware=gpu model_reqs=["./", "torch", "transformers"], ) Example passing in a pipeline path: .. code-block:: python from langchain.embeddings import SelfHostedHFEmbeddings import runhouse as rh from transformers import pipeline gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") pipeline = pipeline(model="bert-base-uncased", task="feature-extraction") rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(gpu, path="models") embeddings = SelfHostedHFEmbeddings.from_pipeline( pipeline="models/pipeline.pkl", hardware=gpu, model_reqs=["./", "torch", "transformers"], ) """ inference_fn: Callable = _embed_documents """Inference function to extract the embeddings on the remote hardware.""" inference_kwargs: Any = None """Any kwargs to pass to the model's inference function.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed.s Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client(self.pipeline_ref, texts) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted.html"
da086f9930a8-2
if not isinstance(embeddings, list): return embeddings.tolist() return embeddings [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embeddings = self.client(self.pipeline_ref, text) if not isinstance(embeddings, list): return embeddings.tolist() return embeddings By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted.html"
7b7e42c249db-0
Source code for langchain.embeddings.self_hosted_hugging_face """Wrapper around HuggingFace embedding models for self-hosted remote hardware.""" import importlib import logging from typing import Any, Callable, List, Optional from pydantic import BaseModel from langchain.embeddings.self_hosted import SelfHostedEmbeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) logger = logging.getLogger(__name__) def _embed_documents(client: Any, *args: Any, **kwargs: Any) -> List[List[float]]: """Inference function to send to the remote hardware. Accepts a sentence_transformer model_id and returns a list of embeddings for each document in the batch. """ return client.encode(*args, **kwargs) def load_embedding_model(model_id: str, instruct: bool = False, device: int = 0) -> Any: """Load the embedding model.""" if not instruct: import sentence_transformers client = sentence_transformers.SentenceTransformer(model_id) else: from InstructorEmbedding import INSTRUCTOR client = INSTRUCTOR(model_id) if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted_hugging_face.html"
7b7e42c249db-1
) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) client = client.to(device) return client [docs]class SelfHostedHuggingFaceEmbeddings(SelfHostedEmbeddings, BaseModel): """Runs sentence_transformers embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceEmbeddings import runhouse as rh model_name = "sentence-transformers/all-mpnet-base-v2" gpu = rh.cluster(name="rh-a10x", instance_type="A100:1") hf = SelfHostedHuggingFaceEmbeddings(model_name=model_name, hardware=gpu) """ client: Any #: :meta private: model_id: str = DEFAULT_MODEL_NAME """Model name to use.""" model_reqs: List[str] = ["./", "sentence_transformers", "torch"] """Requirements to install on hardware to inference the model.""" hardware: Any """Remote hardware to send the inference function to.""" model_load_fn: Callable = load_embedding_model
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted_hugging_face.html"
7b7e42c249db-2
model_load_fn: Callable = load_embedding_model """Function to load the model remotely on the server.""" load_fn_kwargs: Optional[dict] = None """Key word arguments to pass to the model load function.""" inference_fn: Callable = _embed_documents """Inference function to extract the embeddings.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get("model_id", DEFAULT_MODEL_NAME) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", False) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs]class SelfHostedHuggingFaceInstructEmbeddings(SelfHostedHuggingFaceEmbeddings): """Runs InstructorEmbedding embedding models on self-hosted remote hardware. Supported hardware includes auto-launched instances on AWS, GCP, Azure, and Lambda, as well as servers specified by IP address and SSH credentials (such as on-prem, or another cloud like Paperspace, Coreweave, etc.). To use, you should have the ``runhouse`` python package installed. Example: .. code-block:: python from langchain.embeddings import SelfHostedHuggingFaceInstructEmbeddings import runhouse as rh model_name = "hkunlp/instructor-large" gpu = rh.cluster(name='rh-a10x', instance_type='A100:1') hf = SelfHostedHuggingFaceInstructEmbeddings( model_name=model_name, hardware=gpu) """
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted_hugging_face.html"
7b7e42c249db-3
model_name=model_name, hardware=gpu) """ model_id: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" model_reqs: List[str] = ["./", "InstructorEmbedding", "torch"] """Requirements to install on hardware to inference the model.""" def __init__(self, **kwargs: Any): """Initialize the remote inference function.""" load_fn_kwargs = kwargs.pop("load_fn_kwargs", {}) load_fn_kwargs["model_id"] = load_fn_kwargs.get( "model_id", DEFAULT_INSTRUCT_MODEL ) load_fn_kwargs["instruct"] = load_fn_kwargs.get("instruct", True) load_fn_kwargs["device"] = load_fn_kwargs.get("device", 0) super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [] for text in texts: instruction_pairs.append([self.embed_instruction, text]) embeddings = self.client(self.pipeline_ref, instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted_hugging_face.html"
7b7e42c249db-4
text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client(self.pipeline_ref, [instruction_pair])[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\self_hosted_hugging_face.html"
679d308fbcdc-0
Source code for langchain.embeddings.tensorflow_hub """Wrapper around TensorflowHub embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_URL = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" [docs]class TensorflowHubEmbeddings(BaseModel, Embeddings): """Wrapper around tensorflow_hub embedding models. To use, you should have the ``tensorflow_text`` python package installed. Example: .. code-block:: python from langchain.embeddings import TensorflowHubEmbeddings url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual/3" tf = TensorflowHubEmbeddings(model_url=url) """ embed: Any #: :meta private: model_url: str = DEFAULT_MODEL_URL """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the tensorflow_hub and tensorflow_text.""" super().__init__(**kwargs) try: import tensorflow_hub import tensorflow_text # noqa self.embed = tensorflow_hub.load(self.model_url) except ImportError as e: raise ValueError( "Could not import some python packages." "Please install them." ) from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a TensorflowHub embedding model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\tensorflow_hub.html"
679d308fbcdc-1
Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.embed(texts).numpy() return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a TensorflowHub embedding model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.embed(text).numpy()[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\tensorflow_hub.html"
71219955f265-0
Source code for langchain.llms.ai21 """Wrapper around AI21 APIs.""" from typing import Any, Dict, List, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env class AI21PenaltyData(BaseModel): """Parameters for AI21 penalty data.""" scale: int = 0 applyToWhitespaces: bool = True applyToPunctuations: bool = True applyToNumbers: bool = True applyToStopwords: bool = True applyToEmojis: bool = True [docs]class AI21(LLM, BaseModel): """Wrapper around AI21 large language models. To use, you should have the environment variable ``AI21_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import AI21 ai21 = AI21(model="j1-jumbo") """ model: str = "j1-jumbo" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" maxTokens: int = 256 """The maximum number of tokens to generate in the completion.""" minTokens: int = 0 """The minimum number of tokens to generate in the completion.""" topP: float = 1.0 """Total probability mass of tokens to consider at each step.""" presencePenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens.""" countPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to count."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\ai21.html"
71219955f265-1
"""Penalizes repeated tokens according to count.""" frequencyPenalty: AI21PenaltyData = AI21PenaltyData() """Penalizes repeated tokens according to frequency.""" numResults: int = 1 """How many completions to generate for each prompt.""" logitBias: Optional[Dict[str, float]] = None """Adjust the probability of specific tokens being generated.""" ai21_api_key: Optional[str] = None stop: Optional[List[str]] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment.""" ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY") values["ai21_api_key"] = ai21_api_key return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling AI21 API.""" return { "temperature": self.temperature, "maxTokens": self.maxTokens, "minTokens": self.minTokens, "topP": self.topP, "presencePenalty": self.presencePenalty.dict(), "countPenalty": self.countPenalty.dict(), "frequencyPenalty": self.frequencyPenalty.dict(), "numResults": self.numResults, "logitBias": self.logitBias, } @property def _identifying_params(self) -> Dict[str, Any]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\ai21.html"
71219955f265-2
@property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "ai21" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to AI21's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ai21("Tell me a joke.") """ if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: stop = self.stop elif stop is None: stop = [] if self.base_url is not None: base_url = self.base_url else: if self.model in ("j1-grande-instruct",): base_url = "https://api.ai21.com/studio/v1/experimental" else: base_url = "https://api.ai21.com/studio/v1" response = requests.post( url=f"{base_url}/{self.model}/complete", headers={"Authorization": f"Bearer {self.ai21_api_key}"}, json={"prompt": prompt, "stopSequences": stop, **self._default_params}, ) if response.status_code != 200: optional_detail = response.json().get("error")
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\ai21.html"
71219955f265-3
optional_detail = response.json().get("error") raise ValueError( f"AI21 /complete call failed with status code {response.status_code}." f" Details: {optional_detail}" ) response_json = response.json() return response_json["completions"][0]["data"]["text"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\ai21.html"
8ab2e5f5616e-0
Source code for langchain.llms.aleph_alpha """Wrapper around Aleph Alpha APIs.""" from typing import Any, Dict, List, Optional, Sequence from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class AlephAlpha(LLM, BaseModel): """Wrapper around Aleph Alpha large language models. To use, you should have the ``aleph_alpha_client`` python package installed, and the environment variable ``ALEPH_ALPHA_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Parameters are explained more in depth here: https://github.com/Aleph-Alpha/aleph-alpha-client/blob/c14b7dd2b4325c7da0d6a119f6e76385800e097b/aleph_alpha_client/completion.py#L10 Example: .. code-block:: python from langchain.llms import AlephAlpha alpeh_alpha = AlephAlpha(aleph_alpha_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = "luminous-base" """Model name to use.""" maximum_tokens: int = 64 """The maximum number of tokens to be generated.""" temperature: float = 0.0 """A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 0.0 """Total probability mass of tokens to consider at each step."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\aleph_alpha.html"
8ab2e5f5616e-1
"""Total probability mass of tokens to consider at each step.""" presence_penalty: float = 0.0 """Penalizes repeated tokens.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency.""" repetition_penalties_include_prompt: Optional[bool] = False """Flag deciding whether presence penalty or frequency penalty are updated from the prompt.""" use_multiplicative_presence_penalty: Optional[bool] = False """Flag deciding whether presence penalty is applied multiplicatively (True) or additively (False).""" penalty_bias: Optional[str] = None """Penalty bias for the completion.""" penalty_exceptions: Optional[List[str]] = None """List of strings that may be generated without penalty, regardless of other penalty settings""" penalty_exceptions_include_stop_sequences: Optional[bool] = None """Should stop_sequences be included in penalty_exceptions.""" best_of: Optional[int] = None """returns the one with the "best of" results (highest log probability per token) """ n: int = 1 """How many completions to generate for each prompt.""" logit_bias: Optional[Dict[int, float]] = None """The logit bias allows to influence the likelihood of generating tokens.""" log_probs: Optional[int] = None """Number of top log probabilities to be returned for each generated token.""" tokens: Optional[bool] = False """return tokens of completion.""" disable_optimizations: Optional[bool] = False minimum_tokens: Optional[int] = 0 """Generate at least this number of tokens.""" echo: bool = False """Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\aleph_alpha.html"
8ab2e5f5616e-2
"""Echo the prompt in the completion.""" use_multiplicative_frequency_penalty: bool = False sequence_penalty: float = 0.0 sequence_penalty_min_length: int = 2 use_multiplicative_sequence_penalty: bool = False completion_bias_inclusion: Optional[Sequence[str]] = None completion_bias_inclusion_first_token_only: bool = False completion_bias_exclusion: Optional[Sequence[str]] = None completion_bias_exclusion_first_token_only: bool = False """Only consider the first token for the completion_bias_exclusion.""" contextual_control_threshold: Optional[float] = None """If set to None, attention control parameters only apply to those tokens that have explicitly been set in the request. If set to a non-None value, control parameters are also applied to similar tokens. """ control_log_additive: Optional[bool] = True """True: apply control by adding the log(control_factor) to attention scores. False: (attention_scores - - attention_scores.min(-1)) * control_factor """ repetition_penalties_include_completion: bool = True """Flag deciding whether presence penalty or frequency penalty are updated from the completion.""" raw_completion: bool = False """Force the raw completion of the model to be returned.""" aleph_alpha_api_key: Optional[str] = None """API key for Aleph Alpha API.""" stop_sequences: Optional[List[str]] = None """Stop sequences to use.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\aleph_alpha.html"
8ab2e5f5616e-3
"""Validate that api key and python package exists in environment.""" aleph_alpha_api_key = get_from_dict_or_env( values, "aleph_alpha_api_key", "ALEPH_ALPHA_API_KEY" ) try: import aleph_alpha_client values["client"] = aleph_alpha_client.Client(token=aleph_alpha_api_key) except ImportError: raise ValueError( "Could not import aleph_alpha_client python package. " "Please it install it with `pip install aleph_alpha_client`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling the Aleph Alpha API.""" return { "maximum_tokens": self.maximum_tokens, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "n": self.n, "repetition_penalties_include_prompt": self.repetition_penalties_include_prompt, # noqa: E501 "use_multiplicative_presence_penalty": self.use_multiplicative_presence_penalty, # noqa: E501 "penalty_bias": self.penalty_bias, "penalty_exceptions": self.penalty_exceptions, "penalty_exceptions_include_stop_sequences": self.penalty_exceptions_include_stop_sequences, # noqa: E501 "best_of": self.best_of, "logit_bias": self.logit_bias, "log_probs": self.log_probs, "tokens": self.tokens, "disable_optimizations": self.disable_optimizations, "minimum_tokens": self.minimum_tokens, "echo": self.echo,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\aleph_alpha.html"
8ab2e5f5616e-4
"minimum_tokens": self.minimum_tokens, "echo": self.echo, "use_multiplicative_frequency_penalty": self.use_multiplicative_frequency_penalty, # noqa: E501 "sequence_penalty": self.sequence_penalty, "sequence_penalty_min_length": self.sequence_penalty_min_length, "use_multiplicative_sequence_penalty": self.use_multiplicative_sequence_penalty, # noqa: E501 "completion_bias_inclusion": self.completion_bias_inclusion, "completion_bias_inclusion_first_token_only": self.completion_bias_inclusion_first_token_only, # noqa: E501 "completion_bias_exclusion": self.completion_bias_exclusion, "completion_bias_exclusion_first_token_only": self.completion_bias_exclusion_first_token_only, # noqa: E501 "contextual_control_threshold": self.contextual_control_threshold, "control_log_additive": self.control_log_additive, "repetition_penalties_include_completion": self.repetition_penalties_include_completion, # noqa: E501 "raw_completion": self.raw_completion, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "alpeh_alpha" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Aleph Alpha's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\aleph_alpha.html"
8ab2e5f5616e-5
Returns: The string generated by the model. Example: .. code-block:: python response = alpeh_alpha("Tell me a joke.") """ from aleph_alpha_client import CompletionRequest, Prompt params = self._default_params if self.stop_sequences is not None and stop is not None: raise ValueError( "stop sequences found in both the input and default params." ) elif self.stop_sequences is not None: params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion # If stop tokens are provided, Aleph Alpha's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop_sequences is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\aleph_alpha.html"
3dde193d5cba-0
Source code for langchain.llms.anthropic """Wrapper around Anthropic APIs.""" import re from typing import Any, Dict, Generator, List, Mapping, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class Anthropic(LLM, BaseModel): r"""Wrapper around Anthropic large language models. To use, you should have the ``anthropic`` python package installed, and the environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python import anthropic from langchain.llms import Anthropic model = Anthropic(model="<model_name>", anthropic_api_key="my-api-key") # Simplest invocation, automatically wrapped with HUMAN_PROMPT # and AI_PROMPT. response = model("What are the biggest risks facing humanity?") # Or if you want to use the chat mode, build a few-shot-prompt, or # put words in the Assistant's mouth, use HUMAN_PROMPT and AI_PROMPT: raw_prompt = "What are the biggest risks facing humanity?" prompt = f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}" response = model(prompt) """ client: Any #: :meta private: model: str = "claude-v1" """Model name to use.""" max_tokens_to_sample: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 1.0 """A non-negative float that tunes the degree of randomness in generation."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\anthropic.html"
3dde193d5cba-1
"""A non-negative float that tunes the degree of randomness in generation.""" top_k: int = 0 """Number of most likely tokens to consider at each step.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" anthropic_api_key: Optional[str] = None HUMAN_PROMPT: Optional[str] = None AI_PROMPT: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" anthropic_api_key = get_from_dict_or_env( values, "anthropic_api_key", "ANTHROPIC_API_KEY" ) try: import anthropic values["client"] = anthropic.Client(anthropic_api_key) values["HUMAN_PROMPT"] = anthropic.HUMAN_PROMPT values["AI_PROMPT"] = anthropic.AI_PROMPT except ImportError: raise ValueError( "Could not import anthropic python package. " "Please it install it with `pip install anthropic`." ) return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling Anthropic API.""" return { "max_tokens_to_sample": self.max_tokens_to_sample, "temperature": self.temperature, "top_k": self.top_k, "top_p": self.top_p, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\anthropic.html"
3dde193d5cba-2
"""Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "anthropic" def _wrap_prompt(self, prompt: str) -> str: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if prompt.startswith(self.HUMAN_PROMPT): return prompt # Already wrapped. # Guard against common errors in specifying wrong number of newlines. corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt) if n_subs == 1: return corrected_prompt # As a last resort, wrap the prompt ourselves to emulate instruct-style. return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n" def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") if stop is None: stop = [] # Never want model to invent new turns of Human / Assistant dialog. stop.extend([self.HUMAN_PROMPT, self.AI_PROMPT]) return stop def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: r"""Call out to Anthropic's completion endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\anthropic.html"
3dde193d5cba-3
stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python prompt = "What are the biggest risks facing humanity?" prompt = f"\n\nHuman: {prompt}\n\nAssistant:" response = model(prompt) """ stop = self._get_anthropic_stop(stop) response = self.client.completion( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) text = response["completion"] return text [docs] def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: r"""Call Anthropic completion_stream and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from Anthropic. Example: .. code-block:: python prompt = "Write a poem about a stream." prompt = f"\n\nHuman: {prompt}\n\nAssistant:" generator = anthropic.stream(prompt) for token in generator: yield token """ stop = self._get_anthropic_stop(stop) return self.client.completion_stream( model=self.model, prompt=self._wrap_prompt(prompt), stop_sequences=stop, **self._default_params, ) By Harrison Chase © Copyright 2023, Harrison Chase.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\anthropic.html"
3dde193d5cba-4
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\anthropic.html"
e25f9056fddc-0
Source code for langchain.llms.bananadev """Wrapper around Banana API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Banana(LLM, BaseModel): """Wrapper around Banana large language models. To use, you should have the ``banana-dev`` python package installed, and the environment variable ``BANANA_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Banana banana = Banana(model_key="") """ model_key: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" banana_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\bananadev.html"
e25f9056fddc-1
if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" banana_api_key = get_from_dict_or_env( values, "banana_api_key", "BANANA_API_KEY" ) values["banana_api_key"] = banana_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_key": self.model_key}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "banana" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call to Banana endpoint.""" try: import banana_dev as banana except ImportError: raise ValueError( "Could not import banana-dev python package. " "Please install it with `pip install banana-dev`." ) params = self.model_kwargs or {} api_key = self.banana_api_key model_key = self.model_key model_inputs = { # a json specific to your model. "prompt": prompt, **params,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\bananadev.html"
e25f9056fddc-2
"prompt": prompt, **params, } response = banana.run(api_key, model_key, model_inputs) try: text = response["modelOutputs"][0]["output"] except (KeyError, TypeError): returned = response["modelOutputs"][0] raise ValueError( "Response should be of schema: {'output': 'text'}." f"\nResponse was: {returned}" "\nTo fix this:" "\n- fork the source repo of the Banana model" "\n- modify app.py to return the above schema" "\n- deploy that as a custom repo" ) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\bananadev.html"
2747cdb5e619-0
Source code for langchain.llms.cerebriumai """Wrapper around CerebriumAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class CerebriumAI(LLM, BaseModel): """Wrapper around CerebriumAI large language models. To use, you should have the ``cerebrium`` python package installed, and the environment variable ``CEREBRIUMAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import CerebriumAI cerebrium = CerebriumAI(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" cerebriumai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {})
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\cerebriumai.html"
2747cdb5e619-1
extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cerebriumai_api_key = get_from_dict_or_env( values, "cerebriumai_api_key", "CEREBRIUMAI_API_KEY" ) values["cerebriumai_api_key"] = cerebriumai_api_key return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "cerebriumai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call to CerebriumAI endpoint.""" try: from cerebrium import model_api_request except ImportError: raise ValueError( "Could not import cerebrium python package. " "Please install it with `pip install cerebrium`." )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\cerebriumai.html"
2747cdb5e619-2
"Please install it with `pip install cerebrium`." ) params = self.model_kwargs or {} response = model_api_request( self.endpoint_url, {"prompt": prompt, **params}, self.cerebriumai_api_key ) text = response["data"]["result"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\cerebriumai.html"
3e596b553be3-0
Source code for langchain.llms.cohere """Wrapper around Cohere APIs.""" import logging from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Cohere(LLM, BaseModel): """Wrapper around Cohere large language models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.llms import Cohere cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: Optional[str] = None """Model name to use.""" max_tokens: int = 256 """Denotes the number of tokens to predict per generation.""" temperature: float = 0.75 """A non-negative float that tunes the degree of randomness in generation.""" k: int = 0 """Number of most likely tokens to consider at each step.""" p: int = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0.0 """Penalizes repeated tokens according to frequency. Between 0 and 1.""" presence_penalty: float = 0.0 """Penalizes repeated tokens. Between 0 and 1.""" truncate: Optional[str] = None
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\cohere.html"
3e596b553be3-1
truncate: Optional[str] = None """Specify how the client handles inputs longer than the maximum token length: Truncate from START, END or NONE""" cohere_api_key: Optional[str] = None stop: Optional[List[str]] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError( "Could not import cohere python package. " "Please it install it with `pip install cohere`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Cohere API.""" return { "max_tokens": self.max_tokens, "temperature": self.temperature, "k": self.k, "p": self.p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "truncate": self.truncate, } @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" return {**{"model": self.model}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "cohere"
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\cohere.html"
3e596b553be3-2
"""Return type of llm.""" return "cohere" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to Cohere's generate endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = cohere("Tell me a joke.") """ params = self._default_params if self.stop is not None and stop is not None: raise ValueError("`stop` found in both the input and default params.") elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop response = self.client.generate(model=self.model, prompt=prompt, **params) text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. if stop is not None or self.stop is not None: text = enforce_stop_tokens(text, params["stop_sequences"]) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\cohere.html"
985b62ff881b-0
Source code for langchain.llms.deepinfra """Wrapper around DeepInfra APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_MODEL_ID = "google/flan-t5-xl" [docs]class DeepInfra(LLM, BaseModel): """Wrapper around DeepInfra deployed models. To use, you should have the ``requests`` python package installed, and the environment variable ``DEEPINFRA_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import DeepInfra di = DeepInfra(model_id="google/flan-t5-xl", deepinfra_api_token="my-api-key") """ model_id: str = DEFAULT_MODEL_ID model_kwargs: Optional[dict] = None deepinfra_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" deepinfra_api_token = get_from_dict_or_env( values, "deepinfra_api_token", "DEEPINFRA_API_TOKEN" ) values["deepinfra_api_token"] = deepinfra_api_token return values @property def _identifying_params(self) -> Mapping[str, Any]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\deepinfra.html"
985b62ff881b-1
@property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "deepinfra" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to DeepInfra's inference API endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = di("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} res = requests.post( f"https://api.deepinfra.com/v1/inference/{self.model_id}", headers={ "Authorization": f"bearer {self.deepinfra_api_token}", "Content-Type": "application/json", }, json={"input": prompt, **_model_kwargs}, ) if res.status_code != 200: raise ValueError("Error raised by inference API") text = res.json()[0]["generated_text"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\deepinfra.html"
b7c3446cd10b-0
Source code for langchain.llms.forefrontai """Wrapper around ForefrontAI APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env [docs]class ForefrontAI(LLM, BaseModel): """Wrapper around ForefrontAI large language models. To use, you should have the environment variable ``FOREFRONTAI_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import ForefrontAI forefrontai = ForefrontAI(endpoint_url="") """ endpoint_url: str = "" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" length: int = 256 """The maximum number of tokens to generate in the completion.""" top_p: float = 1.0 """Total probability mass of tokens to consider at each step.""" top_k: int = 40 """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" repetition_penalty: int = 1 """Penalizes repeated tokens according to frequency.""" forefrontai_api_key: Optional[str] = None base_url: Optional[str] = None """Base url to use, if None decides based on model name.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key exists in environment."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\forefrontai.html"
b7c3446cd10b-1
"""Validate that api key exists in environment.""" forefrontai_api_key = get_from_dict_or_env( values, "forefrontai_api_key", "FOREFRONTAI_API_KEY" ) values["forefrontai_api_key"] = forefrontai_api_key return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling ForefrontAI API.""" return { "temperature": self.temperature, "length": self.length, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"endpoint_url": self.endpoint_url}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "forefrontai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to ForefrontAI's complete endpoint. Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = ForefrontAI("Tell me a joke.") """ response = requests.post( url=self.endpoint_url, headers={ "Authorization": f"Bearer {self.forefrontai_api_key}", "Content-Type": "application/json", }, json={"text": prompt, **self._default_params},
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\forefrontai.html"
b7c3446cd10b-2
}, json={"text": prompt, **self._default_params}, ) response_json = response.json() text = response_json["result"][0]["completion"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\forefrontai.html"
9521485beec2-0
Source code for langchain.llms.gooseai """Wrapper around GooseAI API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class GooseAI(LLM, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``GOOSEAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import GooseAI gooseai = GooseAI(model_name="gpt-neo-20b") """ client: Any model_name: str = "gpt-neo-20b" """Model name to use""" temperature: float = 0.7 """What sampling temperature to use""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" min_tokens: int = 1 """The minimum number of tokens to generate in the completion.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\gooseai.html"
9521485beec2-1
"""Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" gooseai_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" gooseai_api_key = get_from_dict_or_env( values, "gooseai_api_key", "GOOSEAI_API_KEY" ) try: import openai
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\gooseai.html"
9521485beec2-2
) try: import openai openai.api_key = gooseai_api_key openai.api_base = "https://api.goose.ai/v1" values["client"] = openai.Completion except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling GooseAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "min_tokens": self.min_tokens, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "gooseai" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call the GooseAI API.""" params = self._default_params if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\gooseai.html"
9521485beec2-3
params["stop"] = stop response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\gooseai.html"
56703b1e6b94-0
Source code for langchain.llms.huggingface_endpoint """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env VALID_TASKS = ("text2text-generation", "text-generation") [docs]class HuggingFaceEndpoint(LLM, BaseModel): """Wrapper around HuggingFaceHub Inference Endpoints. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceEndpoint endpoint_url = ( "https://abcdefghijklmnop.us-east-1.aws.endpoints.huggingface.cloud" ) hf = HuggingFaceEndpoint( endpoint_url=endpoint_url, huggingfacehub_api_token="my-api-key" ) """ endpoint_url: str = "" """Endpoint URL to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_endpoint.html"
56703b1e6b94-1
extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.hf_api import HfApi try: HfApi( endpoint="https://huggingface.co", # Can be a Private Hub endpoint. token=huggingfacehub_api_token, ).whoami() except Exception as e: raise ValueError( "Could not authenticate with huggingface_hub. " "Please check your API token." ) from e except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please it install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"endpoint_url": self.endpoint_url, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_endpoint" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_endpoint.html"
56703b1e6b94-2
Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} # payload samples parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} # HTTP headers for authorization headers = { "Authorization": f"Bearer {self.huggingfacehub_api_token}", "Content-Type": "application/json", } # send request try: response = requests.post( self.endpoint_url, headers=headers, json=parameter_payload ) except requests.exceptions.RequestException as e: # This is the correct syntax raise ValueError(f"Error raised by inference endpoint: {e}") generated_text = response.json() if "error" in generated_text: raise ValueError( f"Error raised by inference API: {generated_text['error']}" ) if self.task == "text-generation": # Text generation return includes the starter text. text = generated_text[0]["generated_text"][len(prompt) :] elif self.task == "text2text-generation": text = generated_text[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_endpoint.html"
56703b1e6b94-3
# stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_endpoint.html"
069074db6cfd-0
Source code for langchain.llms.huggingface_hub """Wrapper around HuggingFace APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "gpt2" VALID_TASKS = ("text2text-generation", "text-generation") [docs]class HuggingFaceHub(LLM, BaseModel): """Wrapper around HuggingFaceHub models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Only supports `text-generation` and `text2text-generation` for now. Example: .. code-block:: python from langchain.llms import HuggingFaceHub hf = HuggingFaceHub(repo_id="gpt2", huggingfacehub_api_token="my-api-key") """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = None """Task to call the model with. Should be a task that returns `generated_text`.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_hub.html"
069074db6cfd-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please it install it with `pip install huggingface_hub`." ) return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" _model_kwargs = self.model_kwargs or {} return { **{"repo_id": self.repo_id, "task": self.task}, **{"model_kwargs": _model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "huggingface_hub" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to HuggingFace Hub's inference endpoint. Args: prompt: The prompt to pass into the model.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_hub.html"
069074db6cfd-2
Args: prompt: The prompt to pass into the model. stop: Optional list of stop words to use when generating. Returns: The string generated by the model. Example: .. code-block:: python response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} response = self.client(inputs=prompt, params=_model_kwargs) if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") if self.client.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.client.task == "text2text-generation": text = response[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.client.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_hub.html"
f7f9eed11f07-0
Source code for langchain.llms.huggingface_pipeline """Wrapper around HuggingFace Pipeline APIs.""" import importlib.util import logging from typing import Any, List, Mapping, Optional from pydantic import BaseModel, Extra from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens DEFAULT_MODEL_ID = "gpt2" DEFAULT_TASK = "text-generation" VALID_TASKS = ("text2text-generation", "text-generation") logger = logging.getLogger() [docs]class HuggingFacePipeline(LLM, BaseModel): """Wrapper around HuggingFace Pipeline API. To use, you should have the ``transformers`` python package installed. Only supports `text-generation` and `text2text-generation` for now. Example using from_model_id: .. code-block:: python from langchain.llms import HuggingFacePipeline hf = HuggingFacePipeline.from_model_id( model_id="gpt2", task="text-generation" ) Example passing pipeline in directly: .. code-block:: python from langchain.llms import HuggingFacePipeline from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_id = "gpt2" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10 ) hf = HuggingFacePipeline(pipeline=pipe) """ pipeline: Any #: :meta private: model_id: str = DEFAULT_MODEL_ID """Model name to use.""" model_kwargs: Optional[dict] = None
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_pipeline.html"
f7f9eed11f07-1
"""Model name to use.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] @classmethod def from_model_id( cls, model_id: str, task: str, device: int = -1, model_kwargs: Optional[dict] = None, **kwargs: Any, ) -> LLM: """Construct the pipeline object from model_id and task.""" try: from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, ) from transformers import pipeline as hf_pipeline except ImportError: raise ValueError( "Could not import transformers python package. " "Please it install it with `pip install transformers`." ) _model_kwargs = model_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs) try: if task == "text-generation": model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs) elif task == "text2text-generation": model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs) else: raise ValueError( f"Got invalid task {task}, " f"currently only {VALID_TASKS} are supported" ) except ImportError as e: raise ValueError( f"Could not load the {task} model due to missing dependencies." ) from e if importlib.util.find_spec("torch") is not None: import torch
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_pipeline.html"
f7f9eed11f07-2
if importlib.util.find_spec("torch") is not None: import torch cuda_device_count = torch.cuda.device_count() if device < -1 or (device >= cuda_device_count): raise ValueError( f"Got device=={device}, " f"device is required to be within [-1, {cuda_device_count})" ) if device < 0 and cuda_device_count > 0: logger.warning( "Device has %d GPUs available. " "Provide device={deviceId} to `from_model_id` to use available" "GPUs for execution. deviceId is -1 (default) for CPU and " "can be a positive integer associated with CUDA device id.", cuda_device_count, ) pipeline = hf_pipeline( task=task, model=model, tokenizer=tokenizer, device=device, model_kwargs=_model_kwargs, ) if pipeline.task not in VALID_TASKS: raise ValueError( f"Got invalid task {pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) return cls( pipeline=pipeline, model_id=model_id, model_kwargs=_model_kwargs, **kwargs, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"model_id": self.model_id}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: return "huggingface_pipeline" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_pipeline.html"
f7f9eed11f07-3
response = self.pipeline(prompt) if self.pipeline.task == "text-generation": # Text generation return includes the starter text. text = response[0]["generated_text"][len(prompt) :] elif self.pipeline.task == "text2text-generation": text = response[0]["generated_text"] else: raise ValueError( f"Got invalid task {self.pipeline.task}, " f"currently only {VALID_TASKS} are supported" ) if stop is not None: # This is a bit hacky, but I can't figure out a better way to enforce # stop tokens when making calls to huggingface_hub. text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\huggingface_pipeline.html"
324aee4621c9-0
Source code for langchain.llms.modal """Wrapper around Modal API.""" import logging from typing import Any, Dict, List, Mapping, Optional import requests from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens logger = logging.getLogger(__name__) [docs]class Modal(LLM, BaseModel): """Wrapper around Modal large language models. To use, you should have the ``modal-client`` python package installed. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import Modal modal = Modal(endpoint_url="") """ endpoint_url: str = "" """model endpoint to use""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""{field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\modal.html"
324aee4621c9-1
Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return { **{"endpoint_url": self.endpoint_url}, **{"model_kwargs": self.model_kwargs}, } @property def _llm_type(self) -> str: """Return type of llm.""" return "modal" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call to Modal endpoint.""" params = self.model_kwargs or {} response = requests.post( url=self.endpoint_url, headers={ "Content-Type": "application/json", }, json={"prompt": prompt, **params}, ) try: if prompt in response.json()["prompt"]: response_json = response.json() except KeyError: raise ValueError("LangChain requires 'prompt' key in response.") text = response_json["prompt"] if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\modal.html"
1fe06ea599f9-0
Source code for langchain.llms.nlpcloud """Wrapper around NLPCloud APIs.""" from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.utils import get_from_dict_or_env [docs]class NLPCloud(LLM, BaseModel): """Wrapper around NLPCloud large language models. To use, you should have the ``nlpcloud`` python package installed, and the environment variable ``NLPCLOUD_API_KEY`` set with your API key. Example: .. code-block:: python from langchain.llms import NLPCloud nlpcloud = NLPCloud(model="gpt-neox-20b") """ client: Any #: :meta private: model_name: str = "finetuned-gpt-neox-20b" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" min_length: int = 1 """The minimum number of tokens to generate in the completion.""" max_length: int = 256 """The maximum number of tokens to generate in the completion.""" length_no_input: bool = True """Whether min_length and max_length should include the length of the input.""" remove_input: bool = True """Remove input text from API response""" remove_end_sequence: bool = True """Whether or not to remove the end sequence token.""" bad_words: List[str] = [] """List of tokens not allowed to be generated.""" top_p: int = 1 """Total probability mass of tokens to consider at each step.""" top_k: int = 50
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\nlpcloud.html"
1fe06ea599f9-1
top_k: int = 50 """The number of highest probability tokens to keep for top-k filtering.""" repetition_penalty: float = 1.0 """Penalizes repeated tokens. 1.0 means no penalty.""" length_penalty: float = 1.0 """Exponential penalty to the length.""" do_sample: bool = True """Whether to use sampling (True) or greedy decoding.""" num_beams: int = 1 """Number of beams for beam search.""" early_stopping: bool = False """Whether to stop beam search at num_beams sentences.""" num_return_sequences: int = 1 """How many completions to generate for each prompt.""" nlpcloud_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" nlpcloud_api_key = get_from_dict_or_env( values, "nlpcloud_api_key", "NLPCLOUD_API_KEY" ) try: import nlpcloud values["client"] = nlpcloud.Client( values["model_name"], nlpcloud_api_key, gpu=True, lang="en" ) except ImportError: raise ValueError( "Could not import nlpcloud python package. " "Please it install it with `pip install nlpcloud`." ) return values @property def _default_params(self) -> Mapping[str, Any]: """Get the default parameters for calling NLPCloud API.""" return {
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\nlpcloud.html"
1fe06ea599f9-2
"""Get the default parameters for calling NLPCloud API.""" return { "temperature": self.temperature, "min_length": self.min_length, "max_length": self.max_length, "length_no_input": self.length_no_input, "remove_input": self.remove_input, "remove_end_sequence": self.remove_end_sequence, "bad_words": self.bad_words, "top_p": self.top_p, "top_k": self.top_k, "repetition_penalty": self.repetition_penalty, "length_penalty": self.length_penalty, "do_sample": self.do_sample, "num_beams": self.num_beams, "early_stopping": self.early_stopping, "num_return_sequences": self.num_return_sequences, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "nlpcloud" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call out to NLPCloud's create endpoint. Args: prompt: The prompt to pass into the model. stop: Not supported by this interface (pass in init method) Returns: The string generated by the model. Example: .. code-block:: python response = nlpcloud("Tell me a joke.") """ if stop and len(stop) > 1: raise ValueError( "NLPCloud only supports a single stop sequence per generation."
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\nlpcloud.html"
1fe06ea599f9-3
raise ValueError( "NLPCloud only supports a single stop sequence per generation." "Pass in a list of length 1." ) elif stop and len(stop) == 1: end_sequence = stop[0] else: end_sequence = None response = self.client.generation( prompt, end_sequence=end_sequence, **self._default_params ) return response["generated_text"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\nlpcloud.html"
b5b3fac753c4-0
Source code for langchain.llms.openai """Wrapper around OpenAI APIs.""" from __future__ import annotations import logging import sys import warnings from typing import ( Any, Callable, Dict, Generator, List, Mapping, Optional, Set, Tuple, Union, ) from pydantic import BaseModel, Extra, Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.llms.base import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0][ "finish_reason" ] response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-1
def _streaming_response_template() -> Dict[str, Any]: return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any ) -> Any:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-2
) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: # Use OpenAI's async api https://github.com/openai/openai-python#async-api return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ client: Any #: :meta private: model_name: str = "text-davinci-003" """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-3
"""Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"): warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config: """Configuration for this pydantic object."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-4
class Config: """Configuration for this pydantic object.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) try: import openai openai.api_key = openai_api_key values["client"] = openai.Completion except ImportError: raise ValueError( "Could not import openai python package. " "Please it install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-5
if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "best_of": self.best_of, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } return {**normal_params, **self.model_kwargs} def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TODO: write a unit test for this params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-6
for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() for stream_resp in completion_with_retry( self, prompt=_prompts, **params ): self.callback_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = completion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Get the token usage from the response. # Includes prompt, completion, and total tokens used. _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.")
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-7
raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() async for stream_resp in await acompletion_with_retry( self, prompt=_prompts, **params ): if self.callback_manager.is_async: await self.callback_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) else: self.callback_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = await acompletion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Can't update token usage if streaming update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) def get_sub_prompts( self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-8
if len(prompts) != 1: raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts def create_llm_result( self, choices: Any, prompts: List[str], token_usage: Dict[str, int] ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( text=choice["text"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return LLMResult(generations=generations, llm_output=llm_output) def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator: """Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-9
stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from OpenAI. Example: .. code-block:: python generator = openai.stream("Tell me a joke.") for token in generator: yield token """ params = self.prep_streaming_params(stop) generator = self.client.create(prompt=prompt, **params) return generator def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]: """Prepare the params for streaming.""" params = self._invocation_params if params["best_of"] != 1: raise ValueError("OpenAI only supports best_of == 1 for streaming") if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params["stream"] = True return params @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" return self._default_params @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai" def get_num_tokens(self, text: str) -> int: """Calculate num tokens with tiktoken package.""" # tiktoken NOT supported for Python 3.8 or below if sys.version_info[1] <= 8:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-10
if sys.version_info[1] <= 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please it install it with `pip install tiktoken`." ) encoder = "gpt2" if self.model_name in ("text-davinci-003", "text-davinci-002"): encoder = "p50k_base" if self.model_name.startswith("code"): encoder = "p50k_base" # create a GPT-3 encoder instance enc = tiktoken.get_encoding(encoder) # encode the text using the GPT-3 encoder tokenized_text = enc.encode(text) # calculate the number of tokens in the encoded text return len(tokenized_text) def modelname_to_contextsize(self, modelname: str) -> int: """Calculate the maximum number of tokens possible to generate for a model. text-davinci-003: 4,097 tokens text-curie-001: 2,048 tokens text-babbage-001: 2,048 tokens text-ada-001: 2,048 tokens code-davinci-002: 8,000 tokens code-cushman-001: 2,048 tokens Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-11
""" if modelname == "text-davinci-003": return 4097 elif modelname == "text-curie-001": return 2048 elif modelname == "text-babbage-001": return 2048 elif modelname == "text-ada-001": return 2048 elif modelname == "code-davinci-002": return 8000 elif modelname == "code-cushman-001": return 2048 else: return 4097 def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) # get max context size for model by name max_size = self.modelname_to_contextsize(self.model_name) return max_size - num_tokens [docs]class OpenAI(BaseOpenAI): """Generic OpenAI class that uses model name.""" @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} [docs]class AzureOpenAI(BaseOpenAI): """Azure specific OpenAI class that uses deployment name.""" deployment_name: str = "" """Deployment name to use.""" @property def _identifying_params(self) -> Mapping[str, Any]: return {
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-12
def _identifying_params(self) -> Mapping[str, Any]: return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: return {**{"engine": self.deployment_name}, **super()._invocation_params} [docs]class OpenAIChat(BaseLLM, BaseModel): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any #: :meta private: model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" class Config: """Configuration for this pydantic object.""" extra = Extra.ignore @root_validator(pre=True)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-13
extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) try: import openai openai.api_key = openai_api_key except ImportError: raise ValueError( "Could not import openai python package. " "Please it install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`"
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-14
"`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params( self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for ChatGPT api, omitting max_tokens is equivalent to having no limit del params["max_tokens"] return messages, params def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) if self.streaming: response = "" params["stream"] = True for stream_resp in completion_with_retry(self, messages=messages, **params): token = stream_resp["choices"][0]["delta"].get("content", "") response += token self.callback_manager.on_llm_new_token( token, verbose=self.verbose, ) return LLMResult(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-15
token, verbose=self.verbose, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = completion_with_retry(self, messages=messages, **params) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) if self.streaming: response = "" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=messages, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if self.callback_manager.is_async: await self.callback_manager.on_llm_new_token( token, verbose=self.verbose, ) else: self.callback_manager.on_llm_new_token( token, verbose=self.verbose, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = await acompletion_with_retry( self, messages=messages, **params ) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
b5b3fac753c4-16
} return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" [docs] def get_num_tokens(self, text: str) -> int: """Calculate num tokens with tiktoken package.""" # tiktoken NOT supported for Python 3.8 or below if sys.version_info[1] <= 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please it install it with `pip install tiktoken`." ) # create a GPT-3.5-Turbo encoder instance enc = tiktoken.encoding_for_model("gpt-3.5-turbo") # encode the text using the GPT-3.5-Turbo encoder tokenized_text = enc.encode(text) # calculate the number of tokens in the encoded text return len(tokenized_text) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\openai.html"
014897749d1b-0
Source code for langchain.llms.petals """Wrapper around Petals API.""" import logging from typing import Any, Dict, List, Mapping, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) [docs]class Petals(LLM, BaseModel): """Wrapper around Petals Bloom models. To use, you should have the ``petals`` python package installed, and the environment variable ``HUGGINGFACE_API_KEY`` set with your API key. Any parameters that are valid to be passed to the call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import petals petals = Petals() """ client: Any """The client to use for the API calls.""" tokenizer: Any """The tokenizer to use for the API calls.""" model_name: str = "bigscience/bloom-petals" """The model to use.""" temperature: float = 0.7 """What sampling temperature to use""" max_new_tokens: int = 256 """The maximum number of new tokens to generate in the completion.""" top_p: float = 0.9 """The cumulative probability for top-p sampling.""" top_k: Optional[int] = None """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" do_sample: bool = True """Whether or not to use sampling; use greedy decoding otherwise.""" max_length: Optional[int] = None
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\petals.html"
014897749d1b-1
max_length: Optional[int] = None """The maximum length of the sequence to be generated.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" huggingface_api_key: Optional[str] = None class Config: """Configuration for this pydantic config.""" extra = Extra.forbid @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transfered to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingface_api_key = get_from_dict_or_env( values, "huggingface_api_key", "HUGGINGFACE_API_KEY" ) try: from petals import DistributedBloomForCausalLM from transformers import BloomTokenizerFast model_name = values["model_name"]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\petals.html"
014897749d1b-2
from transformers import BloomTokenizerFast model_name = values["model_name"] values["tokenizer"] = BloomTokenizerFast.from_pretrained(model_name) values["client"] = DistributedBloomForCausalLM.from_pretrained(model_name) values["huggingface_api_key"] = huggingface_api_key except ImportError: raise ValueError( "Could not import transformers or petals python package." "Please install with `pip install -U transformers petals`." ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling Petals API.""" normal_params = { "temperature": self.temperature, "max_new_tokens": self.max_new_tokens, "top_p": self.top_p, "top_k": self.top_k, "do_sample": self.do_sample, "max_length": self.max_length, } return {**normal_params, **self.model_kwargs} @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "petals" def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: """Call the Petals API.""" params = self._default_params inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"] outputs = self.client.generate(inputs, **params) text = self.tokenizer.decode(outputs[0]) if stop is not None:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\petals.html"
014897749d1b-3
text = self.tokenizer.decode(outputs[0]) if stop is not None: # I believe this is required since the stop tokens # are not enforced by the model parameters text = enforce_stop_tokens(text, stop) return text By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\petals.html"
97cde321bdef-0
Source code for langchain.llms.promptlayer_openai """PromptLayer wrapper.""" import datetime from typing import List, Optional from pydantic import BaseModel from langchain.llms import OpenAI, OpenAIChat from langchain.schema import LLMResult [docs]class PromptLayerOpenAI(OpenAI, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAI LLM can also be passed here. The PromptLayerOpenAI LLM adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAI openai = PromptLayerOpenAI(model_name="text-davinci-003") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop) request_end_time = datetime.datetime.now().timestamp()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\promptlayer_openai.html"
97cde321bdef-1
request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], self._identifying_params,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\promptlayer_openai.html"
97cde321bdef-2
"langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses [docs]class PromptLayerOpenAIChat(OpenAIChat, BaseModel): """Wrapper around OpenAI large language models. To use, you should have the ``openai`` and ``promptlayer`` python package installed, and the environment variable ``OPENAI_API_KEY`` and ``PROMPTLAYER_API_KEY`` set with your openAI API key and promptlayer key respectively. All parameters that can be passed to the OpenAIChat LLM can also be passed here. The PromptLayerOpenAIChat adds two optional parameters: ``pl_tags``: List of strings to tag the request with. ``return_pl_id``: If True, the PromptLayer request ID will be returned in the ``generation_info`` field of the ``Generation`` object. Example: .. code-block:: python from langchain.llms import PromptLayerOpenAIChat openaichat = PromptLayerOpenAIChat(model_name="gpt-3.5-turbo") """ pl_tags: Optional[List[str]] return_pl_id: Optional[bool] = False def _generate( self, prompts: List[str], stop: Optional[List[str]] = None
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\promptlayer_openai.html"
97cde321bdef-3
self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = super()._generate(prompts, stop) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request request_start_time = datetime.datetime.now().timestamp() generated_responses = await super()._agenerate(prompts, stop) request_end_time = datetime.datetime.now().timestamp() for i in range(len(prompts)): prompt = prompts[i]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\promptlayer_openai.html"
97cde321bdef-4
for i in range(len(prompts)): prompt = prompts[i] generation = generated_responses.generations[i][0] resp = { "text": generation.text, "llm_output": generated_responses.llm_output, } pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], self._identifying_params, self.pl_tags, resp, request_start_time, request_end_time, get_api_key(), return_pl_id=self.return_pl_id, ) if self.return_pl_id: if generation.generation_info is None or not isinstance( generation.generation_info, dict ): generation.generation_info = {} generation.generation_info["pl_request_id"] = pl_request_id return generated_responses By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\promptlayer_openai.html"
9897ea20cd19-0
Source code for langchain.llms.sagemaker_endpoint """Wrapper around Sagemaker InvokeEndpoint API.""" from abc import ABC, abstractmethod from typing import Any, Dict, List, Mapping, Optional, Union from pydantic import BaseModel, Extra, root_validator from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens class ContentHandlerBase(ABC): """A handler class to transform input from LLM to a format that SageMaker endpoint expects. Similarily, the class also handles transforming output from the SageMaker endpoint to a format that LLM class expects. """ """ Example: .. code-block:: python class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ content_type: Optional[str] = "text/plain" """The MIME type of the input data passed to endpoint""" accepts: Optional[str] = "text/plain" """The MIME type of the response data returned from endpoint""" @abstractmethod def transform_input( self, prompt: Union[str, List[str]], model_kwargs: Dict ) -> bytes: """Transforms the input to a format that model can accept as the request Body. Should return bytes or seekable file like object in the format specified in the content_type
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\sagemaker_endpoint.html"
9897ea20cd19-1
like object in the format specified in the content_type request header. """ @abstractmethod def transform_output(self, output: bytes) -> Any: """Transforms the output from the model to string that the LLM class expects. """ [docs]class SagemakerEndpoint(LLM, BaseModel): """Wrapper around custom Sagemaker Inference Endpoints. To use, you must supply the endpoint name from your deployed Sagemaker model & the region where it is deployed. To authenticate, the AWS client uses the following methods to automatically load credentials: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html If a specific credential profile should be used, you must pass the name of the profile from the ~/.aws/credentials file that is to be used. Make sure the credentials / roles used have the required policies to access the Sagemaker endpoint. See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html """ """ Example: .. code-block:: python from langchain import SagemakerEndpoint endpoint_name = ( "my-endpoint-name" ) region_name = ( "us-west-2" ) credentials_profile_name = ( "default" ) se = SagemakerEndpoint( endpoint_name=endpoint_name, region_name=region_name, credentials_profile_name=credentials_profile_name ) """ client: Any #: :meta private: endpoint_name: str = "" """The name of the endpoint from the deployed Sagemaker model. Must be unique within an AWS Region.""" region_name: str = ""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\sagemaker_endpoint.html"
9897ea20cd19-2
Must be unique within an AWS Region.""" region_name: str = "" """The aws region where the Sagemaker model is deployed, eg. `us-west-2`.""" credentials_profile_name: Optional[str] = None """The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which has either access keys or role information specified. If not specified, the default credential profile or, if on an EC2 instance, credentials from IMDS will be used. See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html """ content_handler: ContentHandlerBase """The content handler class that provides an input and output transform functions to handle formats between LLM and the endpoint. """ """ Example: .. code-block:: python class ContentHandler(ContentHandlerBase): content_type = "application/json" accepts = "application/json" def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes: input_str = json.dumps({prompt: prompt, **model_kwargs}) return input_str.encode('utf-8') def transform_output(self, output: bytes) -> str: response_json = json.loads(output.read().decode("utf-8")) return response_json[0]["generated_text"] """ model_kwargs: Optional[Dict] = None """Key word arguments to pass to the model.""" endpoint_kwargs: Optional[Dict] = None """Optional attributes passed to the invoke_endpoint function. See `boto3`_. docs for more info. .. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html> """
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\llms\\sagemaker_endpoint.html"