jet-taekyo commited on
Commit
ed83bfd
1 Parent(s): 9f1b514

add custom langchain wrappers

Browse files
.gitignore CHANGED
@@ -1 +1,2 @@
1
  __pycache__/
 
 
1
  __pycache__/
2
+ *.env
aimakerspace/langchain_wrappers/__init__.py ADDED
File without changes
aimakerspace/langchain_wrappers/langchain_chat_models.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from typing import Optional
4
+ from langchain_openai import ChatOpenAI
5
+ import inspect
6
+
7
+ load_dotenv(os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) , '.env'))
8
+
9
+ class MyChatOpenAI:
10
+ @classmethod
11
+ def from_model(
12
+ cls,
13
+ model: str = 'gpt-4o-mini',
14
+ *,
15
+ langsmith_project: str = 'default',
16
+ temperature: float = 0.7,
17
+ max_tokens: Optional[int] = 4096,
18
+ max_retries: int = 1,
19
+ **kwargs
20
+ )-> ChatOpenAI:
21
+
22
+ os.environ['LANGCHAIN_PROJECT'] = langsmith_project
23
+ if model in ['gpt-4o', 'GPT-4o', 'GPT-4O', 'gpt-4O', 'gpt4o', 'GPT4o', 'GPT4O', 'gpt4O']:
24
+ model = 'gpt-4o'
25
+ elif model in ['gpt-4o-mini', 'GPT-4o-mini', 'GPT-4O-mini', 'gpt-4O-mini', 'gpt4o-mini', 'GPT4o-mini', 'GPT4O-mini', 'gpt4O-mini', 'gpt4omini', 'GPT4omini', 'GPT4Omini', 'gpt4Omini']:
26
+ model = 'gpt-4o-mini'
27
+ else:
28
+ raise ValueError(f"Model {model} is currently not supported. Supported models are: ['gpt-4o', 'gpt-4o-mini']")
29
+
30
+
31
+ return ChatOpenAI(
32
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
33
+ model=model,
34
+ temperature=temperature,
35
+ max_tokens=max_tokens,
36
+ max_retries=max_retries,
37
+ **kwargs
38
+ )
39
+
40
+
41
+ @classmethod
42
+ def get_model_price(cls)-> dict:
43
+ # Dictionary to store the cost of input and output tokens for each model
44
+ supported_models = {'gpt-4o' : (5, 15)} # gpt-4o model: input cost = $5 per 1M tokens, output cost = $15 per 1M tokens
45
+ supported_models.update({'gpt-4o-mini' : (0.15, 0.6)}) # gpt-4o-mini model: input cost = $0.15 per 1M tokens, output cost = $0.6 per 1M tokens
46
+
47
+ return supported_models
48
+
49
+
50
+
aimakerspace/langchain_wrappers/langchain_embedding_models.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from typing import Optional
4
+ from langchain_openai.embeddings import OpenAIEmbeddings
5
+ import inspect
6
+
7
+ load_dotenv(os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) , '.env'))
8
+
9
+ class MyOpenAIEmbeddings:
10
+ @classmethod
11
+ def from_model(
12
+ cls,
13
+ model: str = 'small',
14
+ *,
15
+ dimensions: Optional[int] = None,
16
+ max_retries: int = 1,
17
+ **kwargs
18
+ )-> OpenAIEmbeddings:
19
+
20
+ if model in ['text-embedding-3-small', 'TEXT-EMBEDDING-3-SMALL', 'small', 'SMALL']:
21
+ model = 'text-embedding-3-small'
22
+ dimensions = 1536 if dimensions is None else dimensions
23
+ elif model in ['text-embedding-3-large', 'TEXT-EMBEDDING-3-LARGE', 'large', 'LARGE']:
24
+ model = 'text-embedding-3-large'
25
+ dimensions = 3072 if dimensions is None else dimensions
26
+ else:
27
+ raise ValueError(f"Model {model} is currently not supported. Supported models are: ['text-embedding-3-small', 'text-embedding-3-large']")
28
+
29
+
30
+ return OpenAIEmbeddings(
31
+ openai_api_key=os.getenv("OPENAI_API_KEY"),
32
+ model=model,
33
+ dimensions=dimensions,
34
+ max_retries=max_retries,
35
+ **kwargs
36
+ )
37
+
38
+
39
+ @classmethod
40
+ def get_model_price(cls)-> dict:
41
+ # Dictionary to store the cost of input and output tokens for each model
42
+ supported_models = {'text-embedding-3-small' : 0.02} # text-embedding-3-small model: $0.02 per 1M tokens
43
+ supported_models.update({'text-embedding-3-large' : 0.13}) # text-embedding-3-large model: $0.13 per 1M tokens
44
+
45
+ return supported_models
46
+
47
+
48
+