repo_id
stringclasses 20
values | file_path
stringlengths 44
119
| content
stringlengths 460
28.1k
| __index_level_0__
int64 0
0
| index
int64 90
4.01k
| secrets
stringlengths 68
2.61k
| has_secrets
bool 1
class | number_secrets
int64 1
28
| new_content
stringlengths 454
28.4k
| modified
bool 1
class | references
stringlengths 469
28.8k
|
---|---|---|---|---|---|---|---|---|---|---|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests/document_loaders/test_confluence.py | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader, ContentFormat
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "[email protected]"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="[email protected]",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
| 0 | 1,929 | [{"tag": "EMAIL", "value": "[email protected]", "start": 551, "end": 565}, {"tag": "EMAIL", "value": "[email protected]", "start": 983, "end": 997}] | true | 2 | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader, ContentFormat
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "[email protected]"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="[email protected]",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
| true | import unittest
from typing import Dict
from unittest.mock import MagicMock, patch
import pytest
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.confluence import ConfluenceLoader, ContentFormat
@pytest.fixture
def mock_confluence(): # type: ignore
with patch("atlassian.Confluence") as mock_confluence:
yield mock_confluence
@pytest.mark.requires("atlassian", "bs4", "lxml")
class TestConfluenceLoader:
CONFLUENCE_URL = "https://example.atlassian.com/wiki"
MOCK_USERNAME = "PI:EMAIL:[email protected]_PI"
MOCK_API_TOKEN = "api_token"
MOCK_SPACE_KEY = "spaceId123"
def test_confluence_loader_initialization(self, mock_confluence: MagicMock) -> None:
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
mock_confluence.assert_called_once_with(
url=self.CONFLUENCE_URL,
username="PI:EMAIL:[email protected]_PI",
password="api_token",
cloud=True,
)
def test_confluence_loader_initialization_invalid(self) -> None:
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
token="foo",
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
oauth2={
"access_token": "bar",
"access_token_secret": "bar",
"consumer_key": "bar",
"key_cert": "bar",
},
)
with pytest.raises(ValueError):
ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
session=requests.Session(),
)
def test_confluence_loader_initialization_from_env(
self, mock_confluence: MagicMock
) -> None:
with unittest.mock.patch.dict(
"os.environ",
{
"CONFLUENCE_USERNAME": self.MOCK_USERNAME,
"CONFLUENCE_API_TOKEN": self.MOCK_API_TOKEN,
},
):
ConfluenceLoader(url=self.CONFLUENCE_URL)
mock_confluence.assert_called_with(
url=self.CONFLUENCE_URL, username=None, password=None, cloud=True
)
def test_confluence_loader_load_data_invalid_args(self) -> None:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
with pytest.raises(
ValueError,
match="Must specify at least one among `space_key`, `page_ids`, `label`, `cql` parameters.", # noqa: E501
):
confluence_loader.load()
def test_confluence_loader_load_data_by_page_ids(
self, mock_confluence: MagicMock
) -> None:
mock_confluence.get_page_by_id.side_effect = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
mock_page_ids = ["123", "456"]
documents = confluence_loader.load(page_ids=mock_page_ids)
assert mock_confluence.get_page_by_id.call_count == 2
assert mock_confluence.get_all_restrictions_for_content.call_count == 2
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_all_pages_from_space.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_load_data_by_space_id(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123"),
self._get_mock_page("456"),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(space_key=self.MOCK_SPACE_KEY, max_pages=2)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123"
assert documents[1].page_content == "Content 456"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def test_confluence_loader_when_content_format_and_keep_markdown_format_enabled(
self, mock_confluence: MagicMock
) -> None:
# one response with two pages
mock_confluence.get_all_pages_from_space.return_value = [
self._get_mock_page("123", ContentFormat.VIEW),
self._get_mock_page("456", ContentFormat.VIEW),
]
mock_confluence.get_all_restrictions_for_content.side_effect = [
self._get_mock_page_restrictions("123"),
self._get_mock_page_restrictions("456"),
]
confluence_loader = self._get_mock_confluence_loader(mock_confluence)
documents = confluence_loader.load(
space_key=self.MOCK_SPACE_KEY,
content_format=ContentFormat.VIEW,
keep_markdown_format=True,
max_pages=2,
)
assert mock_confluence.get_all_pages_from_space.call_count == 1
assert len(documents) == 2
assert all(isinstance(doc, Document) for doc in documents)
assert documents[0].page_content == "Content 123\n\n"
assert documents[1].page_content == "Content 456\n\n"
assert mock_confluence.get_page_by_id.call_count == 0
assert mock_confluence.get_all_pages_by_label.call_count == 0
assert mock_confluence.cql.call_count == 0
assert mock_confluence.get_page_child_by_type.call_count == 0
def _get_mock_confluence_loader(
self, mock_confluence: MagicMock
) -> ConfluenceLoader:
confluence_loader = ConfluenceLoader(
self.CONFLUENCE_URL,
username=self.MOCK_USERNAME,
api_key=self.MOCK_API_TOKEN,
)
confluence_loader.confluence = mock_confluence
return confluence_loader
def _get_mock_page(
self, page_id: str, content_format: ContentFormat = ContentFormat.STORAGE
) -> Dict:
return {
"id": f"{page_id}",
"title": f"Page {page_id}",
"body": {
f"{content_format.name.lower()}": {"value": f"<p>Content {page_id}</p>"}
},
"status": "current",
"type": "page",
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}",
"tinyui": "/x/tiny_ui_link",
"editui": f"/pages/resumedraft.action?draftId={page_id}",
"webui": f"/spaces/{self.MOCK_SPACE_KEY}/overview",
},
}
def _get_mock_page_restrictions(self, page_id: str) -> Dict:
return {
"read": {
"operation": "read",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/read" # noqa: E501
},
},
"update": {
"operation": "update",
"restrictions": {
"user": {"results": [], "start": 0, "limit": 200, "size": 0},
"group": {"results": [], "start": 0, "limit": 200, "size": 0},
},
"_expandable": {"content": f"/rest/api/content/{page_id}"},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation/update" # noqa: E501
},
},
"_links": {
"self": f"{self.CONFLUENCE_URL}/rest/api/content/{page_id}/restriction/byOperation", # noqa: E501
"base": self.CONFLUENCE_URL,
"context": "/wiki",
},
}
|
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/llms/replicate.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
| 0 | 2,192 | [{"tag": "KEY", "value": "27b93a2413e7f36cd83da926f3656280b2931564ff050bf9575f1fdf9bcd7478", "start": 1136, "end": 1200}] | true | 1 | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
| true | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.schema.output import GenerationChunk
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from replicate.prediction import Prediction
logger = logging.getLogger(__name__)
class Replicate(LLM):
"""Replicate models.
To use, you should have the ``replicate`` python package installed,
and the environment variable ``REPLICATE_API_TOKEN`` set with your API token.
You can find your token here: https://replicate.com/account
The model param is required, but any other model parameters can also
be passed in with the format model_kwargs={model_param: value, ...}
Example:
.. code-block:: python
from langchain.llms import Replicate
replicate = Replicate(
model=(
"stability-ai/stable-diffusion: "
"PI:KEY:ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7END_PI",
),
model_kwargs={"image_dimensions": "512x512"}
)
"""
model: str
model_kwargs: Dict[str, Any] = Field(default_factory=dict, alias="input")
replicate_api_token: Optional[str] = None
prompt_key: Optional[str] = None
version_obj: Any = Field(default=None, exclude=True)
"""Optionally pass in the model version object during initialization to avoid
having to make an extra API call to retrieve it during streaming. NOTE: not
serializable, is excluded from serialization.
"""
streaming: bool = False
"""Whether to stream the results."""
stop: List[str] = Field(default_factory=list)
"""Stop sequences to early-terminate generation."""
class Config:
"""Configuration for this pydantic config."""
allow_population_by_field_name = True
extra = Extra.forbid
@property
def lc_secrets(self) -> Dict[str, str]:
return {"replicate_api_token": "REPLICATE_API_TOKEN"}
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
input = values.pop("input", {})
if input:
logger.warning(
"Init param `input` is deprecated, please use `model_kwargs` instead."
)
extra = {**values.pop("model_kwargs", {}), **input}
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
replicate_api_token = get_from_dict_or_env(
values, "replicate_api_token", "REPLICATE_API_TOKEN"
)
values["replicate_api_token"] = replicate_api_token
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
"model_kwargs": self.model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "replicate"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to replicate endpoint."""
if self.streaming:
completion: Optional[str] = None
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
if completion is None:
completion = chunk.text
else:
completion += chunk.text
else:
prediction = self._create_prediction(prompt, **kwargs)
prediction.wait()
if prediction.status == "failed":
raise RuntimeError(prediction.error)
if isinstance(prediction.output, str):
completion = prediction.output
else:
completion = "".join(prediction.output)
assert completion is not None
stop_conditions = stop or self.stop
for s in stop_conditions:
if s in completion:
completion = completion[: completion.find(s)]
return completion
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
prediction = self._create_prediction(prompt, **kwargs)
stop_conditions = stop or self.stop
stop_condition_reached = False
current_completion: str = ""
for output in prediction.output_iterator():
current_completion += output
# test for stop conditions, if specified
for s in stop_conditions:
if s in current_completion:
prediction.cancel()
stop_condition_reached = True
# Potentially some tokens that should still be yielded before ending
# stream.
stop_index = max(output.find(s), 0)
output = output[:stop_index]
if not output:
break
if output:
yield GenerationChunk(text=output)
if run_manager:
run_manager.on_llm_new_token(
output,
verbose=self.verbose,
)
if stop_condition_reached:
break
def _create_prediction(self, prompt: str, **kwargs: Any) -> Prediction:
try:
import replicate as replicate_python
except ImportError:
raise ImportError(
"Could not import replicate python package. "
"Please install it with `pip install replicate`."
)
# get the model and version
if self.version_obj is None:
model_str, version_str = self.model.split(":")
model = replicate_python.models.get(model_str)
self.version_obj = model.versions.get(version_str)
if self.prompt_key is None:
# sort through the openapi schema to get the name of the first input
input_properties = sorted(
self.version_obj.openapi_schema["components"]["schemas"]["Input"][
"properties"
].items(),
key=lambda item: item[1].get("x-order", 0),
)
self.prompt_key = input_properties[0][0]
input_: Dict = {
self.prompt_key: prompt,
**self.model_kwargs,
**kwargs,
}
return replicate_python.predictions.create(
version=self.version_obj, input=input_
)
|
hf_public_repos/gkamradt/langchain-tutorials/data | hf_public_repos/gkamradt/langchain-tutorials/data/thefuzz/setup.py | #!/usr/bin/env python
# Copyright (c) 2014 SeatGeek
# This file is part of thefuzz.
from thefuzz import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='thefuzz',
version=__version__,
author='Adam Cohen',
author_email='[email protected]',
packages=['thefuzz'],
extras_require={'speedup': ['python-levenshtein>=0.12']},
url='https://github.com/seatgeek/thefuzz',
license="GPLv2",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
],
description='Fuzzy string matching in python',
long_description=open_file('README.rst').read(),
zip_safe=True,
)
| 0 | 136 | [{"tag": "EMAIL", "value": "[email protected]", "start": 410, "end": 427}] | true | 1 | #!/usr/bin/env python
# Copyright (c) 2014 SeatGeek
# This file is part of thefuzz.
from thefuzz import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='thefuzz',
version=__version__,
author='Adam Cohen',
author_email='[email protected]',
packages=['thefuzz'],
extras_require={'speedup': ['python-levenshtein>=0.12']},
url='https://github.com/seatgeek/thefuzz',
license="GPLv2",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
],
description='Fuzzy string matching in python',
long_description=open_file('README.rst').read(),
zip_safe=True,
)
| true | #!/usr/bin/env python
# Copyright (c) 2014 SeatGeek
# This file is part of thefuzz.
from thefuzz import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
setup(
name='thefuzz',
version=__version__,
author='Adam Cohen',
author_email='PI:EMAIL:[email protected]_PI',
packages=['thefuzz'],
extras_require={'speedup': ['python-levenshtein>=0.12']},
url='https://github.com/seatgeek/thefuzz',
license="GPLv2",
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3 :: Only',
],
description='Fuzzy string matching in python',
long_description=open_file('README.rst').read(),
zip_safe=True,
)
|
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/processor/test_context.py | from tempfile import TemporaryDirectory
from typing import Any, Dict
from unittest.mock import patch
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.processor import ContextProcess
from gptcache.processor.pre import all_content
from gptcache.utils.response import get_message_from_openai_answer
class CITestContextProcess(ContextProcess):
def __init__(self):
self.content = ""
def format_all_content(self, data: Dict[str, Any], **params: Dict[str, Any]):
self.content = all_content(data)
def process_all_content(self) -> (Any, Any):
save_content = self.content.upper()
embedding_content = self.content
return save_content, embedding_content
def test_context_process():
with TemporaryDirectory(dir="./") as root:
map_manager = manager_factory(data_dir=root)
context_process = CITestContextProcess()
cache.init(
pre_embedding_func=context_process.pre_process, data_manager=map_manager
)
question = "test calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
cache.flush()
map_manager = manager_factory(data_dir=root)
content = f"You are a helpful assistant.\n{question}"
cache_answer = map_manager.search(content)[0]
assert cache_answer[0] == content.upper()
assert cache_answer[1].answer == expect_answer
assert cache_answer[2] == content
| 0 | 866 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 1561, "end": 1599}] | true | 1 | from tempfile import TemporaryDirectory
from typing import Any, Dict
from unittest.mock import patch
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.processor import ContextProcess
from gptcache.processor.pre import all_content
from gptcache.utils.response import get_message_from_openai_answer
class CITestContextProcess(ContextProcess):
def __init__(self):
self.content = ""
def format_all_content(self, data: Dict[str, Any], **params: Dict[str, Any]):
self.content = all_content(data)
def process_all_content(self) -> (Any, Any):
save_content = self.content.upper()
embedding_content = self.content
return save_content, embedding_content
def test_context_process():
with TemporaryDirectory(dir="./") as root:
map_manager = manager_factory(data_dir=root)
context_process = CITestContextProcess()
cache.init(
pre_embedding_func=context_process.pre_process, data_manager=map_manager
)
question = "test calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "949d1u22cbffbrarjh182eig55721odj",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
cache.flush()
map_manager = manager_factory(data_dir=root)
content = f"You are a helpful assistant.\n{question}"
cache_answer = map_manager.search(content)[0]
assert cache_answer[0] == content.upper()
assert cache_answer[1].answer == expect_answer
assert cache_answer[2] == content
| true | from tempfile import TemporaryDirectory
from typing import Any, Dict
from unittest.mock import patch
from gptcache import cache
from gptcache.adapter import openai
from gptcache.manager import manager_factory
from gptcache.processor import ContextProcess
from gptcache.processor.pre import all_content
from gptcache.utils.response import get_message_from_openai_answer
class CITestContextProcess(ContextProcess):
def __init__(self):
self.content = ""
def format_all_content(self, data: Dict[str, Any], **params: Dict[str, Any]):
self.content = all_content(data)
def process_all_content(self) -> (Any, Any):
save_content = self.content.upper()
embedding_content = self.content
return save_content, embedding_content
def test_context_process():
with TemporaryDirectory(dir="./") as root:
map_manager = manager_factory(data_dir=root)
context_process = CITestContextProcess()
cache.init(
pre_embedding_func=context_process.pre_process, data_manager=map_manager
)
question = "test calculate 1+3"
expect_answer = "the result is 4"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:949d1u22cbffbrarjh182eig55721odjEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
assert get_message_from_openai_answer(response) == expect_answer, response
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": question},
],
)
answer_text = get_message_from_openai_answer(response)
assert answer_text == expect_answer, answer_text
cache.flush()
map_manager = manager_factory(data_dir=root)
content = f"You are a helpful assistant.\n{question}"
cache_answer = map_manager.search(content)[0]
assert cache_answer[0] == content.upper()
assert cache_answer[1].answer == expect_answer
assert cache_answer[2] == content
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/vectorstores/vearch.ipynb | from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoModel, AutoTokenizer
from langchain.vectorstores.vearch import Vearch
# repalce to your local model path
model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)query = "你好!"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
query = "你知道凌波微步吗,你知道都有谁学会了吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")# Add your local knowledge files
file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path"
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
# split text into sentences and embedding the sentences
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# replace to your model path
embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)# first add your document into vearch vectorstore
vearch_standalone = Vearch.from_documents(
texts,
embeddings,
path_or_url="/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/localdb_new_test",
table_name="localdb_new_test",
flag=0,
)
print("***************after is cluster res*****************")
vearch_cluster = Vearch.from_documents(
texts,
embeddings,
path_or_url="http://test-vearch-langchain-router.vectorbase.svc.ht1.n.jd.local",
db_name="vearch_cluster_langchian",
table_name="tobenumone",
flag=1,
)query = "你知道凌波微步吗,你知道都有谁会凌波微步?"
vearch_standalone_res = vearch_standalone.similarity_search(query, 3)
for idx, tmp in enumerate(vearch_standalone_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context = "".join([tmp.page_content for tmp in vearch_standalone_res])
new_query = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context} \n 回答用户这个问题:{query}\n\n"
response, history = model.chat(tokenizer, new_query, history=[])
print(f"********ChatGLM:{response}\n")
print("***************************after is cluster res******************************")
query_c = "你知道凌波微步吗,你知道都有谁会凌波微步?"
cluster_res = vearch_cluster.similarity_search(query_c, 3)
for idx, tmp in enumerate(cluster_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context_c = "".join([tmp.page_content for tmp in cluster_res])
new_query_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context_c} \n 回答用户这个问题:{query_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query_c, history=[])
print(f"********ChatGLM:{response_c}\n")query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")
vearch_info = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
vearch_source = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
]
vearch_standalone.add_texts(vearch_info, vearch_source)
print("*****************after is cluster res********************")
vearch_cluster.add_texts(vearch_info, vearch_source)query3 = "你知道vearch是什么吗?"
res1 = vearch_standalone.similarity_search(query3, 3)
for idx, tmp in enumerate(res1):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1 = "".join([tmp.page_content for tmp in res1])
new_query1 = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1} \n 回答用户这个问题:{query3}\n\n"
response, history = model.chat(tokenizer, new_query1, history=[])
print(f"***************ChatGLM:{response}\n")
print("***************after is cluster res******************")
query3_c = "你知道vearch是什么吗?"
res1_c = vearch_standalone.similarity_search(query3_c, 3)
for idx, tmp in enumerate(res1_c):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1_C = "".join([tmp.page_content for tmp in res1_c])
new_query1_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1_C} \n 回答用户这个问题:{query3_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query1_c, history=[])
print(f"***************ChatGLM:{response_c}\n")##delete and get function need to maintian docids
##your docid
res_d = vearch_standalone.delete(
[
"eee5e7468434427eb49829374c1e8220",
"2776754da8fc4bb58d3e482006010716",
"9223acd6d89d4c2c84ff42677ac0d47c",
]
)
print("delete vearch standalone docid", res_d)
query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
res_cluster = vearch_cluster.delete(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("delete vearch cluster docid", res_cluster)
query_c = "你知道vearch是什么吗?"
response_c, history = model.chat(tokenizer, query_c, history=[])
print(f"Human: {query}\nChatGLM:{response_c}\n")
get_delet_doc = vearch_standalone.get(
[
"eee5e7468434427eb49829374c1e8220",
"2776754da8fc4bb58d3e482006010716",
"9223acd6d89d4c2c84ff42677ac0d47c",
]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_standalone.get(
[
"18ce6747dca04a2c833e60e8dfd83c04",
"aafacb0e46574b378a9f433877ab06a8",
"9776bccfdd8643a8b219ccee0596f370",
"9223acd6d89d4c2c84ff42677ac0d47c",
]
)
print("get existed docid", get_id_doc)
get_delet_doc = vearch_cluster.get(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_cluster.get(
[
"1841638988191686991",
"-4519586577642625749",
"5028230008472292907",
"1342026762029067927",
]
)
print("get existed docid", get_id_doc) | 0 | 3,535 | [{"tag": "KEY", "value": "2776754da8fc4bb58d3e482006010716", "start": 5018, "end": 5050}, {"tag": "KEY", "value": "2776754da8fc4bb58d3e482006010716", "start": 5692, "end": 5724}, {"tag": "KEY", "value": "9776bccfdd8643a8b219ccee0596f370", "start": 5977, "end": 6009}, {"tag": "KEY", "value": "9223acd6d89d4c2c84ff42677ac0d47c", "start": 5062, "end": 5094}, {"tag": "KEY", "value": "9223acd6d89d4c2c84ff42677ac0d47c", "start": 5736, "end": 5768}, {"tag": "KEY", "value": "9223acd6d89d4c2c84ff42677ac0d47c", "start": 6021, "end": 6053}] | true | 6 | from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoModel, AutoTokenizer
from langchain.vectorstores.vearch import Vearch
# repalce to your local model path
model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)query = "你好!"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
query = "你知道凌波微步吗,你知道都有谁学会了吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")# Add your local knowledge files
file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path"
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
# split text into sentences and embedding the sentences
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# replace to your model path
embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)# first add your document into vearch vectorstore
vearch_standalone = Vearch.from_documents(
texts,
embeddings,
path_or_url="/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/localdb_new_test",
table_name="localdb_new_test",
flag=0,
)
print("***************after is cluster res*****************")
vearch_cluster = Vearch.from_documents(
texts,
embeddings,
path_or_url="http://test-vearch-langchain-router.vectorbase.svc.ht1.n.jd.local",
db_name="vearch_cluster_langchian",
table_name="tobenumone",
flag=1,
)query = "你知道凌波微步吗,你知道都有谁会凌波微步?"
vearch_standalone_res = vearch_standalone.similarity_search(query, 3)
for idx, tmp in enumerate(vearch_standalone_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context = "".join([tmp.page_content for tmp in vearch_standalone_res])
new_query = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context} \n 回答用户这个问题:{query}\n\n"
response, history = model.chat(tokenizer, new_query, history=[])
print(f"********ChatGLM:{response}\n")
print("***************************after is cluster res******************************")
query_c = "你知道凌波微步吗,你知道都有谁会凌波微步?"
cluster_res = vearch_cluster.similarity_search(query_c, 3)
for idx, tmp in enumerate(cluster_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context_c = "".join([tmp.page_content for tmp in cluster_res])
new_query_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context_c} \n 回答用户这个问题:{query_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query_c, history=[])
print(f"********ChatGLM:{response_c}\n")query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")
vearch_info = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
vearch_source = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
]
vearch_standalone.add_texts(vearch_info, vearch_source)
print("*****************after is cluster res********************")
vearch_cluster.add_texts(vearch_info, vearch_source)query3 = "你知道vearch是什么吗?"
res1 = vearch_standalone.similarity_search(query3, 3)
for idx, tmp in enumerate(res1):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1 = "".join([tmp.page_content for tmp in res1])
new_query1 = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1} \n 回答用户这个问题:{query3}\n\n"
response, history = model.chat(tokenizer, new_query1, history=[])
print(f"***************ChatGLM:{response}\n")
print("***************after is cluster res******************")
query3_c = "你知道vearch是什么吗?"
res1_c = vearch_standalone.similarity_search(query3_c, 3)
for idx, tmp in enumerate(res1_c):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1_C = "".join([tmp.page_content for tmp in res1_c])
new_query1_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1_C} \n 回答用户这个问题:{query3_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query1_c, history=[])
print(f"***************ChatGLM:{response_c}\n")##delete and get function need to maintian docids
##your docid
res_d = vearch_standalone.delete(
[
"eee5e7468434427eb49829374c1e8220",
"caf86f4uutaoxfysmf7anj01xl6sv3ps",
"74t3tndxag9o7h0890bnpfzh4olk2h9x",
]
)
print("delete vearch standalone docid", res_d)
query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
res_cluster = vearch_cluster.delete(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("delete vearch cluster docid", res_cluster)
query_c = "你知道vearch是什么吗?"
response_c, history = model.chat(tokenizer, query_c, history=[])
print(f"Human: {query}\nChatGLM:{response_c}\n")
get_delet_doc = vearch_standalone.get(
[
"eee5e7468434427eb49829374c1e8220",
"caf86f4uutaoxfysmf7anj01xl6sv3ps",
"74t3tndxag9o7h0890bnpfzh4olk2h9x",
]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_standalone.get(
[
"18ce6747dca04a2c833e60e8dfd83c04",
"aafacb0e46574b378a9f433877ab06a8",
"ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"74t3tndxag9o7h0890bnpfzh4olk2h9x",
]
)
print("get existed docid", get_id_doc)
get_delet_doc = vearch_cluster.get(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_cluster.get(
[
"1841638988191686991",
"-4519586577642625749",
"5028230008472292907",
"1342026762029067927",
]
)
print("get existed docid", get_id_doc) | true | from langchain.document_loaders import TextLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from transformers import AutoModel, AutoTokenizer
from langchain.vectorstores.vearch import Vearch
# repalce to your local model path
model_path = "/data/zhx/zhx/langchain-ChatGLM_new/chatglm2-6b"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True).half().cuda(0)query = "你好!"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
query = "你知道凌波微步吗,你知道都有谁学会了吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")# Add your local knowledge files
file_path = "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/天龙八部/lingboweibu.txt" # Your local file path"
loader = TextLoader(file_path, encoding="utf-8")
documents = loader.load()
# split text into sentences and embedding the sentences
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
texts = text_splitter.split_documents(documents)
# replace to your model path
embedding_path = "/data/zhx/zhx/langchain-ChatGLM_new/text2vec/text2vec-large-chinese"
embeddings = HuggingFaceEmbeddings(model_name=embedding_path)# first add your document into vearch vectorstore
vearch_standalone = Vearch.from_documents(
texts,
embeddings,
path_or_url="/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/localdb_new_test",
table_name="localdb_new_test",
flag=0,
)
print("***************after is cluster res*****************")
vearch_cluster = Vearch.from_documents(
texts,
embeddings,
path_or_url="http://test-vearch-langchain-router.vectorbase.svc.ht1.n.jd.local",
db_name="vearch_cluster_langchian",
table_name="tobenumone",
flag=1,
)query = "你知道凌波微步吗,你知道都有谁会凌波微步?"
vearch_standalone_res = vearch_standalone.similarity_search(query, 3)
for idx, tmp in enumerate(vearch_standalone_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context = "".join([tmp.page_content for tmp in vearch_standalone_res])
new_query = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context} \n 回答用户这个问题:{query}\n\n"
response, history = model.chat(tokenizer, new_query, history=[])
print(f"********ChatGLM:{response}\n")
print("***************************after is cluster res******************************")
query_c = "你知道凌波微步吗,你知道都有谁会凌波微步?"
cluster_res = vearch_cluster.similarity_search(query_c, 3)
for idx, tmp in enumerate(cluster_res):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
# combine your local knowleadge and query
context_c = "".join([tmp.page_content for tmp in cluster_res])
new_query_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context_c} \n 回答用户这个问题:{query_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query_c, history=[])
print(f"********ChatGLM:{response_c}\n")query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=history)
print(f"Human: {query}\nChatGLM:{response}\n")
vearch_info = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
vearch_source = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/tlbb/three_body.txt"
},
]
vearch_standalone.add_texts(vearch_info, vearch_source)
print("*****************after is cluster res********************")
vearch_cluster.add_texts(vearch_info, vearch_source)query3 = "你知道vearch是什么吗?"
res1 = vearch_standalone.similarity_search(query3, 3)
for idx, tmp in enumerate(res1):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1 = "".join([tmp.page_content for tmp in res1])
new_query1 = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1} \n 回答用户这个问题:{query3}\n\n"
response, history = model.chat(tokenizer, new_query1, history=[])
print(f"***************ChatGLM:{response}\n")
print("***************after is cluster res******************")
query3_c = "你知道vearch是什么吗?"
res1_c = vearch_standalone.similarity_search(query3_c, 3)
for idx, tmp in enumerate(res1_c):
print(f"{'#'*20}第{idx+1}段相关文档{'#'*20}\n\n{tmp.page_content}\n")
context1_C = "".join([tmp.page_content for tmp in res1_c])
new_query1_c = f"基于以下信息,尽可能准确的来回答用户的问题。背景信息:\n {context1_C} \n 回答用户这个问题:{query3_c}\n\n"
response_c, history_c = model.chat(tokenizer, new_query1_c, history=[])
print(f"***************ChatGLM:{response_c}\n")##delete and get function need to maintian docids
##your docid
res_d = vearch_standalone.delete(
[
"eee5e7468434427eb49829374c1e8220",
"PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
]
)
print("delete vearch standalone docid", res_d)
query = "你知道vearch是什么吗?"
response, history = model.chat(tokenizer, query, history=[])
print(f"Human: {query}\nChatGLM:{response}\n")
res_cluster = vearch_cluster.delete(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("delete vearch cluster docid", res_cluster)
query_c = "你知道vearch是什么吗?"
response_c, history = model.chat(tokenizer, query_c, history=[])
print(f"Human: {query}\nChatGLM:{response_c}\n")
get_delet_doc = vearch_standalone.get(
[
"eee5e7468434427eb49829374c1e8220",
"PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_standalone.get(
[
"18ce6747dca04a2c833e60e8dfd83c04",
"aafacb0e46574b378a9f433877ab06a8",
"PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"PI:KEY:74t3tndxag9o7h0890bnpfzh4olk2h9xEND_PI",
]
)
print("get existed docid", get_id_doc)
get_delet_doc = vearch_cluster.get(
["-4311783201092343475", "-2899734009733762895", "1342026762029067927"]
)
print("after delete docid to query again:", get_delet_doc)
get_id_doc = vearch_cluster.get(
[
"1841638988191686991",
"-4519586577642625749",
"5028230008472292907",
"1342026762029067927",
]
)
print("get existed docid", get_id_doc) |
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/retrievers/you.py | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class YouRetriever(BaseRetriever):
"""`You` retriever that uses You.com's search API.
To connect to the You.com api requires an API key which
you can get by emailing [email protected].
You can check out our docs at https://documentation.you.com.
You need to set the environment variable `YDC_API_KEY` for retriever to operate.
"""
ydc_api_key: str
k: Optional[int] = None
endpoint_type: str = "web"
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
values["ydc_api_key"] = get_from_dict_or_env(
values, "ydc_api_key", "YDC_API_KEY"
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
import requests
headers = {"X-API-Key": self.ydc_api_key}
if self.endpoint_type == "web":
results = requests.get(
f"https://api.ydc-index.io/search?query={query}",
headers=headers,
).json()
docs = []
for hit in results["hits"]:
for snippet in hit["snippets"]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == "snippet":
results = requests.get(
f"https://api.ydc-index.io/snippet_search?query={query}",
headers=headers,
).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(f"Invalid endpoint type provided {self.endpoint_type}")
| 0 | 2,956 | [{"tag": "EMAIL", "value": "[email protected]", "start": 449, "end": 460}] | true | 1 | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class YouRetriever(BaseRetriever):
"""`You` retriever that uses You.com's search API.
To connect to the You.com api requires an API key which
you can get by emailing [email protected].
You can check out our docs at https://documentation.you.com.
You need to set the environment variable `YDC_API_KEY` for retriever to operate.
"""
ydc_api_key: str
k: Optional[int] = None
endpoint_type: str = "web"
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
values["ydc_api_key"] = get_from_dict_or_env(
values, "ydc_api_key", "YDC_API_KEY"
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
import requests
headers = {"X-API-Key": self.ydc_api_key}
if self.endpoint_type == "web":
results = requests.get(
f"https://api.ydc-index.io/search?query={query}",
headers=headers,
).json()
docs = []
for hit in results["hits"]:
for snippet in hit["snippets"]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == "snippet":
results = requests.get(
f"https://api.ydc-index.io/snippet_search?query={query}",
headers=headers,
).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(f"Invalid endpoint type provided {self.endpoint_type}")
| true | from typing import Any, Dict, List, Optional
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class YouRetriever(BaseRetriever):
"""`You` retriever that uses You.com's search API.
To connect to the You.com api requires an API key which
you can get by emailing PI:EMAIL:[email protected]_PI.
You can check out our docs at https://documentation.you.com.
You need to set the environment variable `YDC_API_KEY` for retriever to operate.
"""
ydc_api_key: str
k: Optional[int] = None
endpoint_type: str = "web"
@root_validator(pre=True)
def validate_client(
cls,
values: Dict[str, Any],
) -> Dict[str, Any]:
values["ydc_api_key"] = get_from_dict_or_env(
values, "ydc_api_key", "YDC_API_KEY"
)
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
import requests
headers = {"X-API-Key": self.ydc_api_key}
if self.endpoint_type == "web":
results = requests.get(
f"https://api.ydc-index.io/search?query={query}",
headers=headers,
).json()
docs = []
for hit in results["hits"]:
for snippet in hit["snippets"]:
docs.append(Document(page_content=snippet))
if self.k is not None and len(docs) >= self.k:
return docs
return docs
elif self.endpoint_type == "snippet":
results = requests.get(
f"https://api.ydc-index.io/snippet_search?query={query}",
headers=headers,
).json()
return [Document(page_content=snippet) for snippet in results]
else:
raise RuntimeError(f"Invalid endpoint type provided {self.endpoint_type}")
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/unit_tests/document_loaders/test_git.py | import os
import py
import pytest
from langchain.document_loaders import GitLoader
def init_repo(tmpdir: py.path.local, dir_name: str) -> str:
from git import Repo
repo_dir = tmpdir.mkdir(dir_name)
repo = Repo.init(repo_dir)
git = repo.git
git.checkout(b="main")
git.config("user.name", "Test User")
git.config("user.email", "[email protected]")
sample_file = "file.txt"
with open(os.path.join(repo_dir, sample_file), "w") as f:
f.write("content")
git.add([sample_file])
git.commit(m="Initial commit")
return str(repo_dir)
@pytest.mark.requires("git")
def test_load_twice(tmpdir: py.path.local) -> None:
"""
Test that loading documents twice from the same repository does not raise an error.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
documents = loader.load()
assert len(documents) == 1
@pytest.mark.requires("git")
def test_clone_different_repo(tmpdir: py.path.local) -> None:
"""
Test that trying to clone a different repository into a directory already
containing a clone raises a ValueError.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
other_clone_url = init_repo(tmpdir, "other_remote_repo")
other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url)
with pytest.raises(ValueError):
other_loader.load()
| 0 | 1,938 | [{"tag": "EMAIL", "value": "[email protected]", "start": 360, "end": 376}] | true | 1 | import os
import py
import pytest
from langchain.document_loaders import GitLoader
def init_repo(tmpdir: py.path.local, dir_name: str) -> str:
from git import Repo
repo_dir = tmpdir.mkdir(dir_name)
repo = Repo.init(repo_dir)
git = repo.git
git.checkout(b="main")
git.config("user.name", "Test User")
git.config("user.email", "[email protected]")
sample_file = "file.txt"
with open(os.path.join(repo_dir, sample_file), "w") as f:
f.write("content")
git.add([sample_file])
git.commit(m="Initial commit")
return str(repo_dir)
@pytest.mark.requires("git")
def test_load_twice(tmpdir: py.path.local) -> None:
"""
Test that loading documents twice from the same repository does not raise an error.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
documents = loader.load()
assert len(documents) == 1
@pytest.mark.requires("git")
def test_clone_different_repo(tmpdir: py.path.local) -> None:
"""
Test that trying to clone a different repository into a directory already
containing a clone raises a ValueError.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
other_clone_url = init_repo(tmpdir, "other_remote_repo")
other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url)
with pytest.raises(ValueError):
other_loader.load()
| true | import os
import py
import pytest
from langchain.document_loaders import GitLoader
def init_repo(tmpdir: py.path.local, dir_name: str) -> str:
from git import Repo
repo_dir = tmpdir.mkdir(dir_name)
repo = Repo.init(repo_dir)
git = repo.git
git.checkout(b="main")
git.config("user.name", "Test User")
git.config("user.email", "PI:EMAIL:[email protected]_PI")
sample_file = "file.txt"
with open(os.path.join(repo_dir, sample_file), "w") as f:
f.write("content")
git.add([sample_file])
git.commit(m="Initial commit")
return str(repo_dir)
@pytest.mark.requires("git")
def test_load_twice(tmpdir: py.path.local) -> None:
"""
Test that loading documents twice from the same repository does not raise an error.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
documents = loader.load()
assert len(documents) == 1
@pytest.mark.requires("git")
def test_clone_different_repo(tmpdir: py.path.local) -> None:
"""
Test that trying to clone a different repository into a directory already
containing a clone raises a ValueError.
"""
clone_url = init_repo(tmpdir, "remote_repo")
repo_path = tmpdir.mkdir("local_repo").strpath
loader = GitLoader(repo_path=repo_path, clone_url=clone_url)
documents = loader.load()
assert len(documents) == 1
other_clone_url = init_repo(tmpdir, "other_remote_repo")
other_loader = GitLoader(repo_path=repo_path, clone_url=other_clone_url)
with pytest.raises(ValueError):
other_loader.load()
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests/vectorstores/test_atlas.py | """Test Atlas functionality."""
import time
from langchain.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
| 0 | 1,441 | [{"tag": "KEY", "value": "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6", "start": 191, "end": 236}] | true | 1 | """Test Atlas functionality."""
import time
from langchain.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
| true | """Test Atlas functionality."""
import time
from langchain.vectorstores import AtlasDB
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
ATLAS_TEST_API_KEY = "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI"
def test_atlas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
def test_atlas_with_metadatas() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = AtlasDB.from_texts(
name="langchain_test_project" + str(time.time()),
texts=texts,
api_key=ATLAS_TEST_API_KEY,
embedding=FakeEmbeddings(),
metadatas=metadatas,
reset_project_if_exists=True,
)
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == "0"
|
hf_public_repos/langchain-ai/langchain/libs/langchain/langchain | hf_public_repos/langchain-ai/langchain/libs/langchain/langchain/utilities/pubmed.py | import json
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "[email protected]"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print(
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
| 0 | 2,689 | [{"tag": "EMAIL", "value": "[email protected]", "start": 1576, "end": 1598}] | true | 1 | import json
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "[email protected]"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print(
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
| true | import json
import logging
import time
import urllib.error
import urllib.parse
import urllib.request
from typing import Any, Dict, Iterator, List
from langchain.pydantic_v1 import BaseModel, root_validator
from langchain.schema import Document
logger = logging.getLogger(__name__)
class PubMedAPIWrapper(BaseModel):
"""
Wrapper around PubMed API.
This wrapper will use the PubMed API to conduct searches and fetch
document summaries. By default, it will return the document summaries
of the top-k results of an input search.
Parameters:
top_k_results: number of the top-scored document used for the PubMed tool
MAX_QUERY_LENGTH: maximum length of the query.
Default is 300 characters.
doc_content_chars_max: maximum length of the document content.
Content will be truncated if it exceeds this length.
Default is 2000 characters.
max_retry: maximum number of retries for a request. Default is 5.
sleep_time: time to wait between retries.
Default is 0.2 seconds.
email: email address to be used for the PubMed API.
"""
parse: Any #: :meta private:
base_url_esearch: str = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?"
)
base_url_efetch: str = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
max_retry: int = 5
sleep_time: float = 0.2
# Default values for the parameters
top_k_results: int = 3
MAX_QUERY_LENGTH: int = 300
doc_content_chars_max: int = 2000
email: str = "PI:EMAIL:[email protected]_PI"
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import xmltodict
values["parse"] = xmltodict.parse
except ImportError:
raise ImportError(
"Could not import xmltodict python package. "
"Please install it with `pip install xmltodict`."
)
return values
def run(self, query: str) -> str:
"""
Run PubMed search and get the article meta information.
See https://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.ESearch
It uses only the most informative fields of article meta information.
"""
try:
# Retrieve the top-k results for the query
docs = [
f"Published: {result['Published']}\n"
f"Title: {result['Title']}\n"
f"Copyright Information: {result['Copyright Information']}\n"
f"Summary::\n{result['Summary']}"
for result in self.load(query[: self.MAX_QUERY_LENGTH])
]
# Join the results and limit the character count
return (
"\n\n".join(docs)[: self.doc_content_chars_max]
if docs
else "No good PubMed Result was found"
)
except Exception as ex:
return f"PubMed exception: {ex}"
def lazy_load(self, query: str) -> Iterator[dict]:
"""
Search PubMed for documents matching the query.
Return an iterator of dictionaries containing the document metadata.
"""
url = (
self.base_url_esearch
+ "db=pubmed&term="
+ str({urllib.parse.quote(query)})
+ f"&retmode=json&retmax={self.top_k_results}&usehistory=y"
)
result = urllib.request.urlopen(url)
text = result.read().decode("utf-8")
json_text = json.loads(text)
webenv = json_text["esearchresult"]["webenv"]
for uid in json_text["esearchresult"]["idlist"]:
yield self.retrieve_article(uid, webenv)
def load(self, query: str) -> List[dict]:
"""
Search PubMed for documents matching the query.
Return a list of dictionaries containing the document metadata.
"""
return list(self.lazy_load(query))
def _dict2document(self, doc: dict) -> Document:
summary = doc.pop("Summary")
return Document(page_content=summary, metadata=doc)
def lazy_load_docs(self, query: str) -> Iterator[Document]:
for d in self.lazy_load(query=query):
yield self._dict2document(d)
def load_docs(self, query: str) -> List[Document]:
return list(self.lazy_load_docs(query=query))
def retrieve_article(self, uid: str, webenv: str) -> dict:
url = (
self.base_url_efetch
+ "db=pubmed&retmode=xml&id="
+ uid
+ "&webenv="
+ webenv
)
retry = 0
while True:
try:
result = urllib.request.urlopen(url)
break
except urllib.error.HTTPError as e:
if e.code == 429 and retry < self.max_retry:
# Too Many Requests errors
# wait for an exponentially increasing amount of time
print(
f"Too Many Requests, "
f"waiting for {self.sleep_time:.2f} seconds..."
)
time.sleep(self.sleep_time)
self.sleep_time *= 2
retry += 1
else:
raise e
xml_text = result.read().decode("utf-8")
text_dict = self.parse(xml_text)
return self._parse_article(uid, text_dict)
def _parse_article(self, uid: str, text_dict: dict) -> dict:
try:
ar = text_dict["PubmedArticleSet"]["PubmedArticle"]["MedlineCitation"][
"Article"
]
except KeyError:
ar = text_dict["PubmedArticleSet"]["PubmedBookArticle"]["BookDocument"]
abstract_text = ar.get("Abstract", {}).get("AbstractText", [])
summaries = [
f"{txt['@Label']}: {txt['#text']}"
for txt in abstract_text
if "#text" in txt and "@Label" in txt
]
summary = (
"\n".join(summaries)
if summaries
else (
abstract_text
if isinstance(abstract_text, str)
else (
"\n".join(str(value) for value in abstract_text.values())
if isinstance(abstract_text, dict)
else "No abstract available"
)
)
)
a_d = ar.get("ArticleDate", {})
pub_date = "-".join(
[a_d.get("Year", ""), a_d.get("Month", ""), a_d.get("Day", "")]
)
return {
"uid": uid,
"Title": ar.get("ArticleTitle", ""),
"Published": pub_date,
"Copyright Information": ar.get("Abstract", {}).get(
"CopyrightInformation", ""
),
"Summary": summary,
}
|
hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests | hf_public_repos/langchain-ai/langchain/libs/langchain/tests/integration_tests/document_loaders/test_mastodon.py | """Tests for the Mastodon toots loader"""
from langchain.document_loaders import MastodonTootsLoader
def test_mastodon_toots_loader() -> None:
"""Test Mastodon toots loader with an external query."""
# Query the Mastodon CEO's account
loader = MastodonTootsLoader(
mastodon_accounts=["@[email protected]"], number_toots=1
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["user_info"]["id"] == 1
| 0 | 1,633 | [{"tag": "EMAIL", "value": "[email protected]", "start": 308, "end": 331}] | true | 1 | """Tests for the Mastodon toots loader"""
from langchain.document_loaders import MastodonTootsLoader
def test_mastodon_toots_loader() -> None:
"""Test Mastodon toots loader with an external query."""
# Query the Mastodon CEO's account
loader = MastodonTootsLoader(
mastodon_accounts=["@[email protected]"], number_toots=1
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["user_info"]["id"] == 1
| true | """Tests for the Mastodon toots loader"""
from langchain.document_loaders import MastodonTootsLoader
def test_mastodon_toots_loader() -> None:
"""Test Mastodon toots loader with an external query."""
# Query the Mastodon CEO's account
loader = MastodonTootsLoader(
mastodon_accounts=["@PI:EMAIL:[email protected]_PI"], number_toots=1
)
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["user_info"]["id"] == 1
|
hf_public_repos/zilliztech/GPTCache/tests/unit_tests | hf_public_repos/zilliztech/GPTCache/tests/unit_tests/adapter/test_langchain_models.py | import asyncio
import os
import random
from unittest.mock import patch
from gptcache import Cache, Config
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache, get
from gptcache.adapter.langchain_models import LangChainLLMs, LangChainChat, _cache_msg_data_convert
from gptcache.processor.pre import get_prompt, last_content_without_template, get_messages_last_content
from gptcache.utils import import_pydantic, import_langchain
from gptcache.utils.response import get_message_from_openai_answer
import_pydantic()
import_langchain()
from langchain import OpenAI, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
def test_langchain_llms():
question = "test_langchain_llms"
expect_answer = "hello"
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai,cache_obj=llm_cache)
assert str(langchain_openai) == str(llm)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = llm(prompt=question)
assert expect_answer == answer
answer = llm(prompt=question)
assert expect_answer == answer
def test_langchain_chats():
question = [HumanMessage(content="test_langchain_chats")]
question2 = [HumanMessage(content="test_langchain_chats2")]
msg = "chat models"
expect_answer = {
"role": "assistant",
"message": msg,
"content": msg,
}
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_messages_last_content,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = ChatOpenAI(temperature=0)
chat = LangChainChat(chat=langchain_openai,cache_obj=llm_cache)
assert chat.get_num_tokens("hello") == langchain_openai.get_num_tokens("hello")
assert chat.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")]) \
== langchain_openai.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")])
with patch("openai.ChatCompletion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
with patch("openai.ChatCompletion.acreate") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
answer = asyncio.run(chat.agenerate([question]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
def test_last_content_without_template():
string_prompt = PromptTemplate.from_template("tell me a joke about {subject}")
template = string_prompt.template
cache_obj = Cache()
data_dir = str(random.random())
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj, pre_func=last_content_without_template, config=Config(template=template))
subject_str = "animal"
expect_answer = "this is a joke"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": string_prompt.format(subject=subject_str)},
],
cache_obj=cache_obj,
)
assert get_message_from_openai_answer(response) == expect_answer, response
cache_obj.flush()
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj)
cache_res = get(str([subject_str]), cache_obj=cache_obj)
print(str([subject_str]))
assert cache_res == expect_answer, cache_res
| 0 | 859 | [{"tag": "KEY", "value": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD", "start": 5596, "end": 5634}, {"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 1446, "end": 1484}, {"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 3146, "end": 3184}, {"tag": "KEY", "value": "chatcmpl-6ptKqrhgRoVchm58Bby0UvJzq2ZuQ", "start": 3951, "end": 3989}] | true | 4 | import asyncio
import os
import random
from unittest.mock import patch
from gptcache import Cache, Config
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache, get
from gptcache.adapter.langchain_models import LangChainLLMs, LangChainChat, _cache_msg_data_convert
from gptcache.processor.pre import get_prompt, last_content_without_template, get_messages_last_content
from gptcache.utils import import_pydantic, import_langchain
from gptcache.utils.response import get_message_from_openai_answer
import_pydantic()
import_langchain()
from langchain import OpenAI, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
def test_langchain_llms():
question = "test_langchain_llms"
expect_answer = "hello"
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai,cache_obj=llm_cache)
assert str(langchain_openai) == str(llm)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = llm(prompt=question)
assert expect_answer == answer
answer = llm(prompt=question)
assert expect_answer == answer
def test_langchain_chats():
question = [HumanMessage(content="test_langchain_chats")]
question2 = [HumanMessage(content="test_langchain_chats2")]
msg = "chat models"
expect_answer = {
"role": "assistant",
"message": msg,
"content": msg,
}
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_messages_last_content,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = ChatOpenAI(temperature=0)
chat = LangChainChat(chat=langchain_openai,cache_obj=llm_cache)
assert chat.get_num_tokens("hello") == langchain_openai.get_num_tokens("hello")
assert chat.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")]) \
== langchain_openai.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")])
with patch("openai.ChatCompletion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
with patch("openai.ChatCompletion.acreate") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6b",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
answer = asyncio.run(chat.agenerate([question]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
def test_last_content_without_template():
string_prompt = PromptTemplate.from_template("tell me a joke about {subject}")
template = string_prompt.template
cache_obj = Cache()
data_dir = str(random.random())
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj, pre_func=last_content_without_template, config=Config(template=template))
subject_str = "animal"
expect_answer = "this is a joke"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "caf86f4uutaoxfysmf7anj01xl6sv3ps",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": string_prompt.format(subject=subject_str)},
],
cache_obj=cache_obj,
)
assert get_message_from_openai_answer(response) == expect_answer, response
cache_obj.flush()
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj)
cache_res = get(str([subject_str]), cache_obj=cache_obj)
print(str([subject_str]))
assert cache_res == expect_answer, cache_res
| true | import asyncio
import os
import random
from unittest.mock import patch
from gptcache import Cache, Config
from gptcache.adapter import openai
from gptcache.adapter.api import init_similar_cache, get
from gptcache.adapter.langchain_models import LangChainLLMs, LangChainChat, _cache_msg_data_convert
from gptcache.processor.pre import get_prompt, last_content_without_template, get_messages_last_content
from gptcache.utils import import_pydantic, import_langchain
from gptcache.utils.response import get_message_from_openai_answer
import_pydantic()
import_langchain()
from langchain import OpenAI, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
def test_langchain_llms():
question = "test_langchain_llms"
expect_answer = "hello"
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_prompt,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = OpenAI(model_name="text-ada-001")
llm = LangChainLLMs(llm=langchain_openai,cache_obj=llm_cache)
assert str(langchain_openai) == str(llm)
with patch("openai.Completion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"text": expect_answer,
}
],
"created": 1677825456,
"id": "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = llm(prompt=question)
assert expect_answer == answer
answer = llm(prompt=question)
assert expect_answer == answer
def test_langchain_chats():
question = [HumanMessage(content="test_langchain_chats")]
question2 = [HumanMessage(content="test_langchain_chats2")]
msg = "chat models"
expect_answer = {
"role": "assistant",
"message": msg,
"content": msg,
}
llm_cache = Cache()
llm_cache.init(
pre_embedding_func=get_messages_last_content,
)
os.environ["OPENAI_API_KEY"] = "API"
langchain_openai = ChatOpenAI(temperature=0)
chat = LangChainChat(chat=langchain_openai,cache_obj=llm_cache)
assert chat.get_num_tokens("hello") == langchain_openai.get_num_tokens("hello")
assert chat.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")]) \
== langchain_openai.get_num_tokens_from_messages(messages=[HumanMessage(content="test_langchain_chats")])
with patch("openai.ChatCompletion.create") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
with patch("openai.ChatCompletion.acreate") as mock_create:
mock_create.return_value = {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": expect_answer,
}
],
"delta": {"role": "assistant"},
"created": 1677825456,
"id": "PI:KEY:ax5kh6jaqkcd2tiexxs8v6xjo8yv8a6bEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion",
"usage": {
"completion_tokens": 301,
"prompt_tokens": 36,
"total_tokens": 337
}
}
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = chat(messages=question)
assert answer == _cache_msg_data_convert(msg).generations[0].message
answer = asyncio.run(chat.agenerate([question]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
answer = asyncio.run(chat.agenerate([question2]))
assert answer.generations[0][0].text == _cache_msg_data_convert(msg).generations[0].text
def test_last_content_without_template():
string_prompt = PromptTemplate.from_template("tell me a joke about {subject}")
template = string_prompt.template
cache_obj = Cache()
data_dir = str(random.random())
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj, pre_func=last_content_without_template, config=Config(template=template))
subject_str = "animal"
expect_answer = "this is a joke"
with patch("openai.ChatCompletion.create") as mock_create:
datas = {
"choices": [
{
"message": {"content": expect_answer, "role": "assistant"},
"finish_reason": "stop",
"index": 0,
}
],
"created": 1677825464,
"id": "PI:KEY:caf86f4uutaoxfysmf7anj01xl6sv3psEND_PI",
"model": "gpt-3.5-turbo-0301",
"object": "chat.completion.chunk",
}
mock_create.return_value = datas
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": string_prompt.format(subject=subject_str)},
],
cache_obj=cache_obj,
)
assert get_message_from_openai_answer(response) == expect_answer, response
cache_obj.flush()
init_similar_cache(data_dir=data_dir, cache_obj=cache_obj)
cache_res = get(str([subject_str]), cache_obj=cache_obj)
print(str([subject_str]))
assert cache_res == expect_answer, cache_res
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/document_loaders/mastodon.ipynb | from langchain.document_loaders import MastodonTootsLoader#!pip install Mastodon.pyloader = MastodonTootsLoader(
mastodon_accounts=["@[email protected]"],
number_toots=50, # Default value is 100
)
# Or set up access information to use a Mastodon app.
# Note that the access token can either be passed into
# constructor or you can set the environment "MASTODON_ACCESS_TOKEN".
# loader = MastodonTootsLoader(
# access_token="<ACCESS TOKEN OF MASTODON APP>",
# api_base_url="<API BASE URL OF MASTODON APP INSTANCE>",
# mastodon_accounts=["@[email protected]"],
# number_toots=50, # Default value is 100
# )documents = loader.load()
for doc in documents[:3]:
print(doc.page_content)
print("=" * 80) | 0 | 3,916 | [{"tag": "EMAIL", "value": "[email protected]", "start": 138, "end": 161}, {"tag": "EMAIL", "value": "[email protected]", "start": 566, "end": 589}] | true | 2 | from langchain.document_loaders import MastodonTootsLoader#!pip install Mastodon.pyloader = MastodonTootsLoader(
mastodon_accounts=["@[email protected]"],
number_toots=50, # Default value is 100
)
# Or set up access information to use a Mastodon app.
# Note that the access token can either be passed into
# constructor or you can set the environment "MASTODON_ACCESS_TOKEN".
# loader = MastodonTootsLoader(
# access_token="<ACCESS TOKEN OF MASTODON APP>",
# api_base_url="<API BASE URL OF MASTODON APP INSTANCE>",
# mastodon_accounts=["@[email protected]"],
# number_toots=50, # Default value is 100
# )documents = loader.load()
for doc in documents[:3]:
print(doc.page_content)
print("=" * 80) | true | from langchain.document_loaders import MastodonTootsLoader#!pip install Mastodon.pyloader = MastodonTootsLoader(
mastodon_accounts=["@PI:EMAIL:[email protected]_PI"],
number_toots=50, # Default value is 100
)
# Or set up access information to use a Mastodon app.
# Note that the access token can either be passed into
# constructor or you can set the environment "MASTODON_ACCESS_TOKEN".
# loader = MastodonTootsLoader(
# access_token="<ACCESS TOKEN OF MASTODON APP>",
# api_base_url="<API BASE URL OF MASTODON APP INSTANCE>",
# mastodon_accounts=["@PI:EMAIL:[email protected]_PI"],
# number_toots=50, # Default value is 100
# )documents = loader.load()
for doc in documents[:3]:
print(doc.page_content)
print("=" * 80) |
hf_public_repos/zilliztech | hf_public_repos/zilliztech/GPTCache/setup.py | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
]
def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="gptcache",
packages=find_packages(),
version=find_version("gptcache", "__init__.py"),
author="SimFG",
author_email="[email protected]",
description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat "
"applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, "
"similar to how Redis works for traditional applications.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements('requirements.txt'),
url="https://github.com/zilliztech/GPTCache",
license='https://opensource.org/license/mit/',
python_requires='>=3.8.1',
entry_points={
'console_scripts': [
'gptcache_server=gptcache_server.server:main',
],
},
)
| 0 | 782 | [{"tag": "EMAIL", "value": "[email protected]", "start": 1000, "end": 1018}] | true | 1 | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
]
def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="gptcache",
packages=find_packages(),
version=find_version("gptcache", "__init__.py"),
author="SimFG",
author_email="[email protected]",
description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat "
"applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, "
"similar to how Redis works for traditional applications.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements('requirements.txt'),
url="https://github.com/zilliztech/GPTCache",
license='https://opensource.org/license/mit/',
python_requires='>=3.8.1',
entry_points={
'console_scripts': [
'gptcache_server=gptcache_server.server:main',
],
},
)
| true | import codecs
import os
import re
from typing import List
import setuptools
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
]
def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name="gptcache",
packages=find_packages(),
version=find_version("gptcache", "__init__.py"),
author="SimFG",
author_email="PI:EMAIL:[email protected]_PI",
description="GPTCache, a powerful caching library that can be used to speed up and lower the cost of chat "
"applications that rely on the LLM service. GPTCache works as a memcache for AIGC applications, "
"similar to how Redis works for traditional applications.",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=parse_requirements('requirements.txt'),
url="https://github.com/zilliztech/GPTCache",
license='https://opensource.org/license/mit/',
python_requires='>=3.8.1',
entry_points={
'console_scripts': [
'gptcache_server=gptcache_server.server:main',
],
},
)
|
hf_public_repos/langchain-ai/langchain/libs/experimental/tests | hf_public_repos/langchain-ai/langchain/libs/experimental/tests/unit_tests/test_reversible_data_anonymizer.py | import os
from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is [email protected]"
anonymizer = PresidioReversibleAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "[email protected]"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioReversibleAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 1
anonymized_name = persons[0]
assert anonymized_text.count(anonymized_name) == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count(anonymized_name) == 3
assert anonymizer.deanonymizer_mapping["PERSON"][anonymized_name] == "John Smith"
text = "This is Jane Smith"
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 2
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymizer_mapping() -> None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
)
anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.")
# ["PERSON", "PHONE_NUMBER"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"444 555 6666"
in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values()
)
text_to_anonymize = (
"And my name is Jane Doe, my email is [email protected] and "
"my credit card is 4929 5319 6292 5362."
)
anonymizer.anonymize(text_to_anonymize)
# ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"[email protected]"
in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values()
)
assert (
"4929 5319 6292 5362"
in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values()
)
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymize() -> None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_save_load_deanonymizer_mapping() -> None:
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize("Hello, my name is John Doe.")
try:
anonymizer.save_deanonymizer_mapping("test_file.json")
assert os.path.isfile("test_file.json")
anonymizer = PresidioReversibleAnonymizer()
anonymizer.load_deanonymizer_mapping("test_file.json")
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
finally:
os.remove("test_file.json")
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
| 0 | 1,319 | [{"tag": "EMAIL", "value": "[email protected]", "start": 2213, "end": 2232}, {"tag": "EMAIL", "value": "[email protected]", "start": 2381, "end": 2400}, {"tag": "EMAIL", "value": "[email protected]", "start": 6241, "end": 6255}, {"tag": "EMAIL", "value": "[email protected]", "start": 6593, "end": 6607}] | true | 4 | import os
from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is [email protected]"
anonymizer = PresidioReversibleAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "[email protected]"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioReversibleAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 1
anonymized_name = persons[0]
assert anonymized_text.count(anonymized_name) == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count(anonymized_name) == 3
assert anonymizer.deanonymizer_mapping["PERSON"][anonymized_name] == "John Smith"
text = "This is Jane Smith"
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 2
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymizer_mapping() -> None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
)
anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.")
# ["PERSON", "PHONE_NUMBER"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"444 555 6666"
in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values()
)
text_to_anonymize = (
"And my name is Jane Doe, my email is [email protected] and "
"my credit card is 4929 5319 6292 5362."
)
anonymizer.anonymize(text_to_anonymize)
# ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"[email protected]"
in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values()
)
assert (
"4929 5319 6292 5362"
in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values()
)
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymize() -> None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_save_load_deanonymizer_mapping() -> None:
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize("Hello, my name is John Doe.")
try:
anonymizer.save_deanonymizer_mapping("test_file.json")
assert os.path.isfile("test_file.json")
anonymizer = PresidioReversibleAnonymizer()
anonymizer.load_deanonymizer_mapping("test_file.json")
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
finally:
os.remove("test_file.json")
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
| true | import os
from typing import Iterator, List
import pytest
from . import is_libcublas_available
@pytest.fixture(scope="module", autouse=True)
def check_spacy_model() -> Iterator[None]:
import spacy
if not spacy.util.is_package("en_core_web_lg"):
pytest.skip(reason="Spacy model 'en_core_web_lg' not installed")
yield
@pytest.fixture(scope="module", autouse=True)
def check_libcublas() -> Iterator[None]:
if not is_libcublas_available():
pytest.skip(reason="libcublas.so is not available")
yield
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], False), (["PHONE_NUMBER"], True), (None, False)],
)
def test_anonymize(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text)
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
@pytest.mark.parametrize(
"analyzed_fields,should_contain",
[(["PERSON"], True), (["PHONE_NUMBER"], True), (None, True)],
)
def test_anonymize_allow_list(analyzed_fields: List[str], should_contain: bool) -> None:
"""Test anonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=analyzed_fields)
anonymized_text = anonymizer.anonymize(text, allow_list=["John Doe"])
assert ("John Doe" in anonymized_text) == should_contain
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_multiple() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "John Smith's phone number is 313-666-7440 and email is PI:EMAIL:[email protected]_PI"
anonymizer = PresidioReversibleAnonymizer()
anonymized_text = anonymizer.anonymize(text)
for phrase in ["John Smith", "313-666-7440", "PI:EMAIL:[email protected]_PI"]:
assert phrase not in anonymized_text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_check_instances() -> None:
"""Test anonymizing multiple items in a sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"This is John Smith. John Smith works in a bakery." "John Smith is a good guy"
)
anonymizer = PresidioReversibleAnonymizer(["PERSON"], faker_seed=42)
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 1
anonymized_name = persons[0]
assert anonymized_text.count(anonymized_name) == 3
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text.count(anonymized_name) == 3
assert anonymizer.deanonymizer_mapping["PERSON"][anonymized_name] == "John Smith"
text = "This is Jane Smith"
anonymized_text = anonymizer.anonymize(text)
persons = list(anonymizer.deanonymizer_mapping["PERSON"].keys())
assert len(persons) == 2
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_anonymize_with_custom_operator() -> None:
"""Test anonymize a name with a custom operator"""
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
custom_operator = {"PERSON": OperatorConfig("replace", {"new_value": "NAME"})}
anonymizer = PresidioReversibleAnonymizer(operators=custom_operator)
text = "Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "NAME was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_add_recognizer_operator() -> None:
"""
Test add recognizer and anonymize a new type of entity and with a custom operator
"""
from presidio_analyzer import PatternRecognizer
from presidio_anonymizer.entities import OperatorConfig
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
titles_list = ["Sir", "Madam", "Professor"]
custom_recognizer = PatternRecognizer(
supported_entity="TITLE", deny_list=titles_list
)
anonymizer.add_recognizer(custom_recognizer)
# anonymizing with custom recognizer
text = "Madam Jane Doe was here."
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "<TITLE> Jane Doe was here."
# anonymizing with custom recognizer and operator
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=[])
anonymizer.add_recognizer(custom_recognizer)
custom_operator = {"TITLE": OperatorConfig("replace", {"new_value": "Dear"})}
anonymizer.add_operators(custom_operator)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == "Dear Jane Doe was here."
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymizer_mapping() -> None:
"""Test if deanonymizer mapping is correctly populated"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(
analyzed_fields=["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
)
anonymizer.anonymize("Hello, my name is John Doe and my number is 444 555 6666.")
# ["PERSON", "PHONE_NUMBER"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 2
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"444 555 6666"
in anonymizer.deanonymizer_mapping.get("PHONE_NUMBER", {}).values()
)
text_to_anonymize = (
"And my name is Jane Doe, my email is PI:EMAIL:[email protected]_PI and "
"my credit card is 4929 5319 6292 5362."
)
anonymizer.anonymize(text_to_anonymize)
# ["PERSON", "PHONE_NUMBER", "EMAIL_ADDRESS", "CREDIT_CARD"]
assert len(anonymizer.deanonymizer_mapping.keys()) == 4
assert "Jane Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
assert (
"PI:EMAIL:[email protected]_PI"
in anonymizer.deanonymizer_mapping.get("EMAIL_ADDRESS", {}).values()
)
assert (
"4929 5319 6292 5362"
in anonymizer.deanonymizer_mapping.get("CREDIT_CARD", {}).values()
)
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_deanonymize() -> None:
"""Test deanonymizing a name in a simple sentence"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = "Hello, my name is John Doe."
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymized_text = anonymizer.anonymize(text)
deanonymized_text = anonymizer.deanonymize(anonymized_text)
assert deanonymized_text == text
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_save_load_deanonymizer_mapping() -> None:
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
anonymizer = PresidioReversibleAnonymizer(analyzed_fields=["PERSON"])
anonymizer.anonymize("Hello, my name is John Doe.")
try:
anonymizer.save_deanonymizer_mapping("test_file.json")
assert os.path.isfile("test_file.json")
anonymizer = PresidioReversibleAnonymizer()
anonymizer.load_deanonymizer_mapping("test_file.json")
assert "John Doe" in anonymizer.deanonymizer_mapping.get("PERSON", {}).values()
finally:
os.remove("test_file.json")
@pytest.mark.requires("presidio_analyzer", "presidio_anonymizer", "faker")
def test_non_faker_values() -> None:
"""Test anonymizing multiple items in a sentence without faker values"""
from langchain_experimental.data_anonymizer import PresidioReversibleAnonymizer
text = (
"My name is John Smith. Your name is Adam Smith. Her name is Jane Smith."
"Our names are: John Smith, Adam Smith, Jane Smith."
)
expected_result = (
"My name is <PERSON>. Your name is <PERSON_2>. Her name is <PERSON_3>."
"Our names are: <PERSON>, <PERSON_2>, <PERSON_3>."
)
anonymizer = PresidioReversibleAnonymizer(add_default_faker_operators=False)
anonymized_text = anonymizer.anonymize(text)
assert anonymized_text == expected_result
|
hf_public_repos/langchain-ai/langchain/docs/docs/integrations | hf_public_repos/langchain-ai/langchain/docs/docs/integrations/vectorstores/atlas.ipynb | import time
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import SpacyTextSplitter
from langchain.vectorstores import AtlasDB
from langchain.document_loaders import TextLoaderATLAS_TEST_API_KEY = "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6"loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = SpacyTextSplitter(separator="|")
texts = []
for doc in text_splitter.split_documents(documents):
texts.extend(doc.page_content.split("|"))
texts = [e.strip() for e in texts]db = AtlasDB.from_texts(
texts=texts,
name="test_index_" + str(time.time()), # unique name for your vector store
description="test_index", # a description for your vector store
api_key=ATLAS_TEST_API_KEY,
index_kwargs={"build_topic_model": True},
)db.project.wait_for_project_lock()db.project | 0 | 3,538 | [{"tag": "KEY", "value": "7xDPkYXSYDc1_ErdTPIcoAR9RNd8YDlkS3nVNXcVoIMZ6", "start": 237, "end": 282}] | true | 1 | import time
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import SpacyTextSplitter
from langchain.vectorstores import AtlasDB
from langchain.document_loaders import TextLoaderATLAS_TEST_API_KEY = "ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7"loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = SpacyTextSplitter(separator="|")
texts = []
for doc in text_splitter.split_documents(documents):
texts.extend(doc.page_content.split("|"))
texts = [e.strip() for e in texts]db = AtlasDB.from_texts(
texts=texts,
name="test_index_" + str(time.time()), # unique name for your vector store
description="test_index", # a description for your vector store
api_key=ATLAS_TEST_API_KEY,
index_kwargs={"build_topic_model": True},
)db.project.wait_for_project_lock()db.project | true | import time
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import SpacyTextSplitter
from langchain.vectorstores import AtlasDB
from langchain.document_loaders import TextLoaderATLAS_TEST_API_KEY = "PI:KEY:ngw6fo1pu3tjgnp9jnlp7vnwvfqb9yn7END_PI"loader = TextLoader("../../modules/state_of_the_union.txt")
documents = loader.load()
text_splitter = SpacyTextSplitter(separator="|")
texts = []
for doc in text_splitter.split_documents(documents):
texts.extend(doc.page_content.split("|"))
texts = [e.strip() for e in texts]db = AtlasDB.from_texts(
texts=texts,
name="test_index_" + str(time.time()), # unique name for your vector store
description="test_index", # a description for your vector store
api_key=ATLAS_TEST_API_KEY,
index_kwargs={"build_topic_model": True},
)db.project.wait_for_project_lock()db.project |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 31