id
stringlengths 14
16
| text
stringlengths 45
2.05k
| source
stringlengths 53
111
|
---|---|---|
ea179fab1e3d-0 | Source code for langchain.vectorstores.milvus
"""Wrapper around the Milvus vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
[docs]class Milvus(VectorStore):
"""Wrapper around the Milvus vector database."""
def __init__(
self,
embedding_function: Embeddings,
connection_args: dict,
collection_name: str,
text_field: str,
):
"""Initialize wrapper around the milvus vector database.
In order to use this you need to have `pymilvus` installed and a
running Milvus instance.
See the following documentation for how to run a Milvus instance:
https://milvus.io/docs/install_standalone-docker.md
Args:
embedding_function (Embeddings): Function used to embed the text
connection_args (dict): Arguments for pymilvus connections.connect()
collection_name (str): The name of the collection to search.
text_field (str): The field in Milvus schema where the
original text is stored.
"""
try:
from pymilvus import Collection, DataType, connections
except ImportError:
raise ValueError(
"Could not import pymilvus python package. "
"Please install it with `pip install pymilvus`."
)
# Connecting to Milvus instance
if not connections.has_connection("default"):
connections.connect(**connection_args) | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-1 | if not connections.has_connection("default"):
connections.connect(**connection_args)
self.embedding_func = embedding_function
self.collection_name = collection_name
self.text_field = text_field
self.auto_id = False
self.primary_field = None
self.vector_field = None
self.fields = []
self.col = Collection(self.collection_name)
schema = self.col.schema
# Grabbing the fields for the existing collection.
for x in schema.fields:
self.fields.append(x.name)
if x.auto_id:
self.fields.remove(x.name)
if x.is_primary:
self.primary_field = x.name
if x.dtype == DataType.FLOAT_VECTOR or x.dtype == DataType.BINARY_VECTOR:
self.vector_field = x.name
# Default search params when one is not provided.
self.index_params = {
"IVF_FLAT": {"params": {"nprobe": 10}},
"IVF_SQ8": {"params": {"nprobe": 10}},
"IVF_PQ": {"params": {"nprobe": 10}},
"HNSW": {"params": {"ef": 10}},
"RHNSW_FLAT": {"params": {"ef": 10}},
"RHNSW_SQ": {"params": {"ef": 10}},
"RHNSW_PQ": {"params": {"ef": 10}},
"IVF_HNSW": {"params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"params": {"search_k": 10}},
}
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None, | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-2 | texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
partition_name: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[str]:
"""Insert text data into Milvus.
When using add_texts() it is assumed that a collecton has already
been made and indexed. If metadata is included, it is assumed that
it is ordered correctly to match the schema provided to the Collection
and that the embedding vector is the first schema field.
Args:
texts (Iterable[str]): The text being embedded and inserted.
metadatas (Optional[List[dict]], optional): The metadata that
corresponds to each insert. Defaults to None.
partition_name (str, optional): The partition of the collection
to insert data into. Defaults to None.
timeout: specified timeout.
Returns:
List[str]: The resulting keys for each inserted element.
"""
insert_dict: Any = {self.text_field: list(texts)}
try:
insert_dict[self.vector_field] = self.embedding_func.embed_documents(
list(texts)
)
except NotImplementedError:
insert_dict[self.vector_field] = [
self.embedding_func.embed_query(x) for x in texts
]
# Collect the metadata into the insert dict.
if len(self.fields) > 2 and metadatas is not None:
for d in metadatas:
for key, value in d.items():
if key in self.fields:
insert_dict.setdefault(key, []).append(value)
# Convert dict to list of lists for insertion
insert_list = [insert_dict[x] for x in self.fields]
# Insert into the collection. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-3 | # Insert into the collection.
res = self.col.insert(
insert_list, partition_name=partition_name, timeout=timeout
)
# Flush to make sure newly inserted is immediately searchable.
self.col.flush()
return res.primary_keys
def _worker_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> Tuple[List[float], List[Tuple[Document, Any, Any]]]:
# Load the collection into memory for searching.
self.col.load()
# Decide to use default params if not passed in.
if param is None:
index_type = self.col.indexes[0].params["index_type"]
param = self.index_params[index_type]
# Embed the query text.
data = [self.embedding_func.embed_query(query)]
# Determine result metadata fields.
output_fields = self.fields[:]
output_fields.remove(self.vector_field)
# Perform the search.
res = self.col.search(
data,
self.vector_field,
param,
k,
expr=expr,
output_fields=output_fields,
partition_names=partition_names,
round_decimal=round_decimal,
timeout=timeout,
**kwargs,
)
# Organize results.
ret = []
for result in res[0]:
meta = {x: result.entity.get(x) for x in output_fields}
ret.append(
( | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-4 | ret.append(
(
Document(page_content=meta.pop(self.text_field), metadata=meta),
result.distance,
result.id,
)
)
return data[0], ret
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results.
Args:
query (str): The text being searched.
k (int, optional): The amount of results ot return. Defaults to 4.
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
partition_names (List[str], optional): Partitions to search through.
Defaults to None.
round_decimal (int, optional): Round the resulting distance. Defaults
to -1.
timeout (int, optional): Amount to wait before timeout error. Defaults
to None.
kwargs: Collection.search() keyword arguments.
Returns:
List[float], List[Tuple[Document, any, any]]: search_embedding,
(Document, distance, primary_field) results.
"""
_, result = self._worker_search(
query, k, param, expr, partition_names, round_decimal, timeout, **kwargs
)
return [(x, y) for x, y, _ in result] | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-5 | )
return [(x, y) for x, y, _ in result]
[docs] def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR.
Args:
query (str): The text being searched.
k (int, optional): How many results to give. Defaults to 4.
fetch_k (int, optional): Total results to select k from.
Defaults to 20.
param (dict, optional): The search params for the specified index.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
partition_names (List[str], optional): What partitions to search.
Defaults to None.
round_decimal (int, optional): Round the resulting distance. Defaults
to -1.
timeout (int, optional): Amount to wait before timeout error. Defaults
to None.
Returns:
List[Document]: Document results for search.
"""
data, res = self._worker_search(
query,
fetch_k,
param,
expr,
partition_names,
round_decimal,
timeout,
**kwargs,
)
# Extract result IDs.
ids = [x for _, _, x in res] | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-6 | # Extract result IDs.
ids = [x for _, _, x in res]
# Get the raw vectors from Milvus.
vectors = self.col.query(
expr=f"{self.primary_field} in {ids}",
output_fields=[self.primary_field, self.vector_field],
)
# Reorganize the results from query to match result order.
vectors = {x[self.primary_field]: x[self.vector_field] for x in vectors}
search_embedding = data
ordered_result_embeddings = [vectors[x] for x in ids]
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(search_embedding), ordered_result_embeddings, k=k
)
# Reorder the values and return.
ret = []
for x in new_ordering:
if x == -1:
break
else:
ret.append(res[x][0])
return ret
[docs] def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
partition_names: Optional[List[str]] = None,
round_decimal: int = -1,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string.
Args:
query (str): The text to search.
k (int, optional): How many results to return. Defaults to 4.
param (dict, optional): The search params for the index type.
Defaults to None.
expr (str, optional): Filtering expression. Defaults to None. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-7 | Defaults to None.
expr (str, optional): Filtering expression. Defaults to None.
partition_names (List[str], optional): What partitions to search.
Defaults to None.
round_decimal (int, optional): What decimal point to round to.
Defaults to -1.
timeout (int, optional): How long to wait before timeout error.
Defaults to None.
Returns:
List[Document]: Document results for search.
"""
_, docs_and_scores = self._worker_search(
query, k, param, expr, partition_names, round_decimal, timeout, **kwargs
)
return [doc for doc, _, _ in docs_and_scores]
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> Milvus:
"""Create a Milvus collection, indexes it with HNSW, and insert data.
Args:
texts (List[str]): Text to insert.
embedding (Embeddings): Embedding function to use.
metadatas (Optional[List[dict]], optional): Dict metatadata.
Defaults to None.
Returns:
VectorStore: The Milvus vector store.
"""
try:
from pymilvus import (
Collection,
CollectionSchema,
DataType,
FieldSchema,
connections,
)
from pymilvus.orm.types import infer_dtype_bydata
except ImportError:
raise ValueError(
"Could not import pymilvus python package. "
"Please install it with `pip install pymilvus`." | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-8 | "Please install it with `pip install pymilvus`."
)
# Connect to Milvus instance
if not connections.has_connection("default"):
connections.connect(**kwargs.get("connection_args", {"port": 19530}))
# Determine embedding dim
embeddings = embedding.embed_query(texts[0])
dim = len(embeddings)
# Generate unique names
primary_field = "c" + str(uuid.uuid4().hex)
vector_field = "c" + str(uuid.uuid4().hex)
text_field = "c" + str(uuid.uuid4().hex)
collection_name = "c" + str(uuid.uuid4().hex)
fields = []
# Determine metadata schema
if metadatas:
# Check if all metadata keys line up
key = metadatas[0].keys()
for x in metadatas:
if key != x.keys():
raise ValueError(
"Mismatched metadata. "
"Make sure all metadata has the same keys and datatype."
)
# Create FieldSchema for each entry in singular metadata.
for key, value in metadatas[0].items():
# Infer the corresponding datatype of the metadata
dtype = infer_dtype_bydata(value)
if dtype == DataType.UNKNOWN:
raise ValueError(f"Unrecognized datatype for {key}.")
elif dtype == DataType.VARCHAR:
# Find out max length text based metadata
max_length = 0
for subvalues in metadatas:
max_length = max(max_length, len(subvalues[key]))
fields.append(
FieldSchema(key, DataType.VARCHAR, max_length=max_length + 1)
)
else:
fields.append(FieldSchema(key, dtype)) | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
ea179fab1e3d-9 | )
else:
fields.append(FieldSchema(key, dtype))
# Find out max length of texts
max_length = 0
for y in texts:
max_length = max(max_length, len(y))
# Create the text field
fields.append(
FieldSchema(text_field, DataType.VARCHAR, max_length=max_length + 1)
)
# Create the primary key field
fields.append(
FieldSchema(primary_field, DataType.INT64, is_primary=True, auto_id=True)
)
# Create the vector field
fields.append(FieldSchema(vector_field, DataType.FLOAT_VECTOR, dim=dim))
# Create the schema for the collection
schema = CollectionSchema(fields)
# Create the collection
collection = Collection(collection_name, schema)
# Index parameters for the collection
index = {
"index_type": "HNSW",
"metric_type": "L2",
"params": {"M": 8, "efConstruction": 64},
}
# Create the index
collection.create_index(vector_field, index)
# Create the VectorStore
milvus = cls(
embedding,
kwargs.get("connection_args", {"port": 19530}),
collection_name,
text_field,
)
# Add the texts.
milvus.add_texts(texts, metadatas)
return milvus
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Mar 24, 2023. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\milvus.html" |
bcd0338d91b4-0 | Source code for langchain.vectorstores.opensearch_vector_search
"""Wrapper around OpenSearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Dict, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
IMPORT_OPENSEARCH_PY_ERROR = (
"Could not import OpenSearch. Please install it with `pip install opensearch-py`."
)
SCRIPT_SCORING_SEARCH = "script_scoring"
PAINLESS_SCRIPTING_SEARCH = "painless_scripting"
MATCH_ALL_QUERY = {"match_all": {}} # type: Dict
def _import_opensearch() -> Any:
"""Import OpenSearch if available, otherwise raise error."""
try:
from opensearchpy import OpenSearch
except ImportError:
raise ValueError(IMPORT_OPENSEARCH_PY_ERROR)
return OpenSearch
def _import_bulk() -> Any:
"""Import bulk if available, otherwise raise error."""
try:
from opensearchpy.helpers import bulk
except ImportError:
raise ValueError(IMPORT_OPENSEARCH_PY_ERROR)
return bulk
def _get_opensearch_client(opensearch_url: str) -> Any:
"""Get OpenSearch client from the opensearch_url, otherwise raise error."""
try:
opensearch = _import_opensearch()
client = opensearch(opensearch_url)
except ValueError as e:
raise ValueError(
f"OpenSearch client string provided is not in proper format. "
f"Got error: {e} "
)
return client | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-1 | f"Got error: {e} "
)
return client
def _validate_embeddings_and_bulk_size(embeddings_length: int, bulk_size: int) -> None:
"""Validate Embeddings Length and Bulk Size."""
if embeddings_length == 0:
raise RuntimeError("Embeddings size is zero")
if bulk_size < embeddings_length:
raise RuntimeError(
f"The embeddings count, {embeddings_length} is more than the "
f"[bulk_size], {bulk_size}. Increase the value of [bulk_size]."
)
def _bulk_ingest_embeddings(
client: Any,
index_name: str,
embeddings: List[List[float]],
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
) -> List[str]:
"""Bulk Ingest Embeddings into given index."""
bulk = _import_bulk()
requests = []
ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": index_name,
"vector_field": embeddings[i],
"text": text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
ids.append(_id)
bulk(client, requests)
client.indices.refresh(index=index_name)
return ids
def _default_scripting_text_mapping(dim: int) -> Dict:
"""For Painless Scripting or Script Scoring,the default mapping to create index."""
return {
"mappings": {
"properties": { | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-2 | return {
"mappings": {
"properties": {
"vector_field": {"type": "knn_vector", "dimension": dim},
}
}
}
def _default_text_mapping(
dim: int,
engine: str = "nmslib",
space_type: str = "l2",
ef_search: int = 512,
ef_construction: int = 512,
m: int = 16,
) -> Dict:
"""For Approximate k-NN Search, this is the default mapping to create index."""
return {
"settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}},
"mappings": {
"properties": {
"vector_field": {
"type": "knn_vector",
"dimension": dim,
"method": {
"name": "hnsw",
"space_type": space_type,
"engine": engine,
"parameters": {"ef_construction": ef_construction, "m": m},
},
}
}
},
}
def _default_approximate_search_query(
query_vector: List[float], size: int = 4, k: int = 4
) -> Dict:
"""For Approximate k-NN Search, this is the default query."""
return {
"size": size,
"query": {"knn": {"vector_field": {"vector": query_vector, "k": k}}},
}
def _default_script_query(
query_vector: List[float],
space_type: str = "l2",
pre_filter: Dict = MATCH_ALL_QUERY,
) -> Dict: | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-3 | pre_filter: Dict = MATCH_ALL_QUERY,
) -> Dict:
"""For Script Scoring Search, this is the default query."""
return {
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": "knn_score",
"lang": "knn",
"params": {
"field": "vector_field",
"query_value": query_vector,
"space_type": space_type,
},
},
}
}
}
def __get_painless_scripting_source(space_type: str, query_vector: List[float]) -> str:
"""For Painless Scripting, it returns the script source based on space type."""
source_value = (
"(1.0 + " + space_type + "(" + str(query_vector) + ", doc['vector_field']))"
)
if space_type == "cosineSimilarity":
return source_value
else:
return "1/" + source_value
def _default_painless_scripting_query(
query_vector: List[float],
space_type: str = "l2Squared",
pre_filter: Dict = MATCH_ALL_QUERY,
) -> Dict:
"""For Painless Scripting Search, this is the default query."""
source = __get_painless_scripting_source(space_type, query_vector)
return {
"query": {
"script_score": {
"query": pre_filter,
"script": {
"source": source,
"params": {
"field": "vector_field",
"query_value": query_vector,
},
},
}
}
} | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-4 | },
},
}
}
}
def _get_kwargs_value(kwargs: Any, key: str, default_value: Any) -> Any:
"""Get the value of the key if present. Else get the default_value."""
if key in kwargs:
return kwargs.get(key)
return default_value
[docs]class OpenSearchVectorSearch(VectorStore):
"""Wrapper around OpenSearch as a vector database.
Example:
.. code-block:: python
from langchain import OpenSearchVectorSearch
opensearch_vector_search = OpenSearchVectorSearch(
"http://localhost:9200",
"embeddings",
embedding_function
)
"""
def __init__(
self, opensearch_url: str, index_name: str, embedding_function: Embeddings
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index_name = index_name
self.client = _get_opensearch_client(opensearch_url)
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
bulk_size: Bulk API request count; Default: 500
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = [
self.embedding_function.embed_documents([text])[0] for text in texts
] | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-5 | self.embedding_function.embed_documents([text])[0] for text in texts
]
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
return _bulk_ingest_embeddings(
self.client, self.index_name, embeddings, texts, metadatas
)
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
By default supports Approximate Search.
Also supports Script Scoring and Painless Scripting.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
Optional Args for Approximate Search:
search_type: "approximate_search"; default: "approximate_search"
size: number of results the query actually returns; default: 4
Optional Args for Script Scoring Search:
search_type: "script_scoring"; default: "approximate_search"
space_type: "l2", "l1", "linf", "cosinesimil", "innerproduct",
"hammingbit"; default: "l2"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
Optional Args for Painless Scripting Search:
search_type: "painless_scripting"; default: "approximate_search"
space_type: "l2Squared", "l1Norm", "cosineSimilarity"; default: "l2Squared"
pre_filter: script_score query to pre-filter documents before identifying
nearest neighbors; default: {"match_all": {}}
""" | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-6 | nearest neighbors; default: {"match_all": {}}
"""
embedding = self.embedding_function.embed_query(query)
search_type = _get_kwargs_value(kwargs, "search_type", "approximate_search")
if search_type == "approximate_search":
size = _get_kwargs_value(kwargs, "size", 4)
search_query = _default_approximate_search_query(embedding, size, k)
elif search_type == SCRIPT_SCORING_SEARCH:
space_type = _get_kwargs_value(kwargs, "space_type", "l2")
pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY)
search_query = _default_script_query(embedding, space_type, pre_filter)
elif search_type == PAINLESS_SCRIPTING_SEARCH:
space_type = _get_kwargs_value(kwargs, "space_type", "l2Squared")
pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY)
search_query = _default_painless_scripting_query(
embedding, space_type, pre_filter
)
else:
raise ValueError("Invalid `search_type` provided as an argument")
response = self.client.search(index=self.index_name, body=search_query)
hits = [hit["_source"] for hit in response["hits"]["hits"][:k]]
documents = [
Document(page_content=hit["text"], metadata=hit["metadata"]) for hit in hits
]
return documents
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
bulk_size: int = 500,
**kwargs: Any,
) -> OpenSearchVectorSearch: | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-7 | **kwargs: Any,
) -> OpenSearchVectorSearch:
"""Construct OpenSearchVectorSearch wrapper from raw documents.
Example:
.. code-block:: python
from langchain import OpenSearchVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
opensearch_vector_search = OpenSearchVectorSearch.from_texts(
texts,
embeddings,
opensearch_url="http://localhost:9200"
)
OpenSearch by default supports Approximate Search powered by nmslib, faiss
and lucene engines recommended for large datasets. Also supports brute force
search through Script Scoring and Painless Scripting.
Optional Keyword Args for Approximate Search:
engine: "nmslib", "faiss", "hnsw"; default: "nmslib"
space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2"
ef_search: Size of the dynamic list used during k-NN searches. Higher values
lead to more accurate but slower searches; default: 512
ef_construction: Size of the dynamic list used during k-NN graph creation.
Higher values lead to more accurate graph but slower indexing speed;
default: 512
m: Number of bidirectional links created for each new element. Large impact
on memory consumption. Between 2 and 100; default: 16
Keyword Args for Script Scoring or Painless Scripting:
is_appx_search: False
"""
opensearch_url = get_from_dict_or_env(
kwargs, "opensearch_url", "OPENSEARCH_URL"
) | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
bcd0338d91b4-8 | kwargs, "opensearch_url", "OPENSEARCH_URL"
)
client = _get_opensearch_client(opensearch_url)
embeddings = embedding.embed_documents(texts)
_validate_embeddings_and_bulk_size(len(embeddings), bulk_size)
dim = len(embeddings[0])
# Get the index name from either from kwargs or ENV Variable
# before falling back to random generation
index_name = get_from_dict_or_env(
kwargs, "index_name", "OPENSEARCH_INDEX_NAME", default=uuid.uuid4().hex
)
is_appx_search = _get_kwargs_value(kwargs, "is_appx_search", True)
if is_appx_search:
engine = _get_kwargs_value(kwargs, "engine", "nmslib")
space_type = _get_kwargs_value(kwargs, "space_type", "l2")
ef_search = _get_kwargs_value(kwargs, "ef_search", 512)
ef_construction = _get_kwargs_value(kwargs, "ef_construction", 512)
m = _get_kwargs_value(kwargs, "m", 16)
mapping = _default_text_mapping(
dim, engine, space_type, ef_search, ef_construction, m
)
else:
mapping = _default_scripting_text_mapping(dim)
client.indices.create(index=index_name, body=mapping)
_bulk_ingest_embeddings(client, index_name, embeddings, texts, metadatas)
return cls(opensearch_url, index_name, embedding)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Mar 24, 2023. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\opensearch_vector_search.html" |
1a602d598c64-0 | Source code for langchain.vectorstores.pinecone
"""Wrapper around Pinecone vector database."""
from __future__ import annotations
import uuid
from typing import Any, Callable, Iterable, List, Optional, Tuple
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
[docs]class Pinecone(VectorStore):
"""Wrapper around Pinecone vector database.
To use, you should have the ``pinecone-client`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
pinecone.init(api_key="***", environment="us-west1-gcp")
index = pinecone.Index("langchain-demo")
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone(index, embeddings.embed_query, "text")
"""
def __init__(
self,
index: Any,
embedding_function: Callable,
text_key: str,
namespace: Optional[str] = None,
):
"""Initialize with Pinecone client."""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
if not isinstance(index, pinecone.index.Index):
raise ValueError(
f"client should be an instance of pinecone.index.Index, "
f"got {type(index)}"
)
self._index = index
self._embedding_function = embedding_function
self._text_key = text_key
self._namespace = namespace
[docs] def add_texts( | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\pinecone.html" |
1a602d598c64-1 | self._namespace = namespace
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
namespace: Optional[str] = None,
batch_size: int = 32,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
namespace: Optional pinecone namespace to add the texts to.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if namespace is None:
namespace = self._namespace
# Embed and create the documents
docs = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
embedding = self._embedding_function(text)
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
docs.append((ids[i], embedding, metadata))
# upsert to Pinecone
self._index.upsert(vectors=docs, namespace=namespace, batch_size=batch_size)
return ids
[docs] def similarity_search_with_score(
self,
query: str,
k: int = 5,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""Return pinecone documents most similar to query, along with scores. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\pinecone.html" |
1a602d598c64-2 | """Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
if namespace is None:
namespace = self._namespace
query_obj = self._embedding_function(query)
docs = []
results = self._index.query(
[query_obj],
top_k=k,
include_metadata=True,
namespace=namespace,
filter=filter,
)
for res in results["matches"]:
metadata = res["metadata"]
text = metadata.pop(self._text_key)
docs.append((Document(page_content=text, metadata=metadata), res["score"]))
return docs
[docs] def similarity_search(
self,
query: str,
k: int = 5,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return pinecone documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
if namespace is None:
namespace = self._namespace
query_obj = self._embedding_function(query) | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\pinecone.html" |
1a602d598c64-3 | namespace = self._namespace
query_obj = self._embedding_function(query)
docs = []
results = self._index.query(
[query_obj],
top_k=k,
include_metadata=True,
namespace=namespace,
filter=filter,
)
for res in results["matches"]:
metadata = res["metadata"]
text = metadata.pop(self._text_key)
docs.append(Document(page_content=text, metadata=metadata))
return docs
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 32,
text_key: str = "text",
index_name: Optional[str] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> Pinecone:
"""Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Pinecone
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
) | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\pinecone.html" |
1a602d598c64-4 | "Please install it with `pip install pinecone-client`."
)
_index_name = index_name or str(uuid.uuid4())
indexes = pinecone.list_indexes() # checks if provided index exists
if _index_name in indexes:
index = pinecone.Index(_index_name)
else:
index = None
for i in range(0, len(texts), batch_size):
# set end position of batch
i_end = min(i + batch_size, len(texts))
# get batch of texts and ids
lines_batch = texts[i:i_end]
# create ids if not provided
if ids:
ids_batch = ids[i:i_end]
else:
ids_batch = [str(uuid.uuid4()) for n in range(i, i_end)]
# create embeddings
embeds = embedding.embed_documents(lines_batch)
# prep metadata and upsert batch
if metadatas:
metadata = metadatas[i:i_end]
else:
metadata = [{} for _ in range(i, i_end)]
for j, line in enumerate(lines_batch):
metadata[j][text_key] = line
to_upsert = zip(ids_batch, embeds, metadata)
# Create index if it does not exist
if index is None:
pinecone.create_index(_index_name, dimension=len(embeds[0]))
index = pinecone.Index(_index_name)
# upsert to Pinecone
index.upsert(vectors=list(to_upsert), namespace=namespace)
return cls(index, embedding.embed_query, text_key, namespace)
[docs] @classmethod
def from_existing_index(
cls,
index_name: str,
embedding: Embeddings, | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\pinecone.html" |
1a602d598c64-5 | cls,
index_name: str,
embedding: Embeddings,
text_key: str = "text",
namespace: Optional[str] = None,
) -> Pinecone:
"""Load pinecone vectorstore from index name."""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
return cls(
pinecone.Index(index_name), embedding.embed_query, text_key, namespace
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Mar 24, 2023. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\pinecone.html" |
7fbe5ab172b7-0 | Source code for langchain.vectorstores.qdrant
"""Wrapper around Qdrant vector database."""
import uuid
from operator import itemgetter
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
MetadataFilter = Dict[str, Union[str, int, bool]]
[docs]class Qdrant(VectorStore):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str,
embedding_function: Callable,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
) | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-1 | f"got {type(client)}"
)
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.embedding_function = embedding_function
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
[docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client.http import models as rest
ids = [uuid.uuid4().hex for _ in texts]
self.client.upsert(
collection_name=self.collection_name,
points=rest.Batch(
ids=ids,
vectors=[self.embedding_function(text) for text in texts],
payloads=self._build_payloads(
texts,
metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
),
)
return ids
[docs] def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-2 | Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(query, k, filter)
return list(map(itemgetter(0), results))
[docs] def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
query_filter=self._qdrant_filter_from_dict(filter),
with_payload=True,
limit=k,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
[docs] def max_marginal_relevance_search(
self, query: str, k: int = 4, fetch_k: int = 20
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args: | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-3 | among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
with_vectors=True,
limit=k,
)
embeddings = [result.vector for result in results]
mmr_selected = maximal_marginal_relevance(embedding, embeddings, k=k)
return [
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
)
for i in mmr_selected
]
[docs] @classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
return cast(
Qdrant, | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-4 | ) -> "Qdrant":
return cast(
Qdrant,
super().from_documents(
documents,
embedding,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
collection_name=collection_name,
distance_func=distance_func,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
**kwargs,
),
)
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
"""Construct Qdrant wrapper from raw documents.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas: | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-5 | metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If `true` - use gPRC interface whenever possible in custom methods.
https: If `true` - use HTTPS(SSL) protocol. Default: `None`
api_key: API key for authentication in Qdrant Cloud. Default: `None`
prefix:
If not `None` - add `prefix` to the REST URL path.
Example: `service/v1` will result in
`http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API.
Default: `None`
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: `None`
collection_name:
Name of the Qdrant collection to be used. If not provided,
will be created randomly.
distance_func:
Distance function. One of the: "Cosine" / "Euclid" / "Dot".
content_payload_key:
A payload key used to store the content of the document.
metadata_payload_key:
A payload key used to store the metadata of the document.
**kwargs:
Additional arguments passed directly into REST client initialization | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-6 | **kwargs:
Additional arguments passed directly into REST client initialization
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from qdrant_client.http import models as rest
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
**kwargs,
)
client.recreate_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
),
)
# Now generate the embeddings for all the texts | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-7 | ),
)
# Now generate the embeddings for all the texts
embeddings = embedding.embed_documents(texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch(
ids=[uuid.uuid4().hex for _ in texts],
vectors=embeddings,
payloads=cls._build_payloads(
texts, metadatas, content_payload_key, metadata_payload_key
),
),
)
return cls(
client=client,
collection_name=collection_name,
embedding_function=embedding.embed_query,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key), | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
7fbe5ab172b7-8 | return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
def _qdrant_filter_from_dict(self, filter: Optional[MetadataFilter]) -> Any:
if filter is None or 0 == len(filter):
return None
from qdrant_client.http import models as rest
return rest.Filter(
must=[
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
for key, value in filter.items()
]
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Mar 24, 2023. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\qdrant.html" |
bb80b371022f-0 | Source code for langchain.vectorstores.weaviate
"""Wrapper around weaviate vector database."""
from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional
from uuid import uuid4
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
[docs]class Weaviate(VectorStore):
"""Wrapper around Weaviate vector database.
To use, you should have the ``weaviate-client`` python package installed.
Example:
.. code-block:: python
import weaviate
from langchain.vectorstores import Weaviate
client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...)
weaviate = Weaviate(client, index_name, text_key)
"""
def __init__(
self,
client: Any,
index_name: str,
text_key: str,
attributes: Optional[List[str]] = None,
):
"""Initialize with Weaviate client."""
try:
import weaviate
except ImportError:
raise ValueError(
"Could not import weaviate python package. "
"Please install it with `pip install weaviate-client`."
)
if not isinstance(client, weaviate.Client):
raise ValueError(
f"client should be an instance of weaviate.Client, got {type(client)}"
)
self._client = client
self._index_name = index_name
self._text_key = text_key
self._query_attrs = [self._text_key]
if attributes is not None:
self._query_attrs.extend(attributes)
[docs] def add_texts(
self, | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\weaviate.html" |
bb80b371022f-1 | [docs] def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Upload texts with metadata (properties) to Weaviate."""
from weaviate.util import get_valid_uuid
with self._client.batch as batch:
ids = []
for i, doc in enumerate(texts):
data_properties = {
self._text_key: doc,
}
if metadatas is not None:
for key in metadatas[i].keys():
data_properties[key] = metadatas[i][key]
_id = get_valid_uuid(uuid4())
batch.add_data_object(data_properties, self._index_name, _id)
ids.append(_id)
return ids
[docs] def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Look up similar documents in weaviate."""
content: Dict[str, Any] = {"concepts": [query]}
if kwargs.get("search_distance"):
content["certainty"] = kwargs.get("search_distance")
query_obj = self._client.query.get(self._index_name, self._query_attrs)
result = query_obj.with_near_text(content).with_limit(k).do()
docs = []
for res in result["data"]["Get"][self._index_name]:
text = res.pop(self._text_key)
docs.append(Document(page_content=text, metadata=res))
return docs
[docs] @classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings, | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\weaviate.html" |
bb80b371022f-2 | cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> VectorStore:
"""Not implemented for Weaviate yet."""
raise NotImplementedError("weaviate does not currently support `from_texts`.")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Mar 24, 2023. | ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\vectorstores\\weaviate.html" |