Spaces:
Running
Running
DeL-TaiseiOzaki
commited on
Commit
•
fac50d3
1
Parent(s):
20c757d
- services/llm_service.py +8 -6
services/llm_service.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from typing import Optional, List, Dict, Any
|
2 |
-
import
|
3 |
import anthropic
|
4 |
from dataclasses import dataclass
|
5 |
from config.llm_settings import LLMSettings
|
@@ -22,7 +22,7 @@ class LLMService:
|
|
22 |
if self.settings.anthropic_api_key:
|
23 |
self.claude_client = anthropic.Anthropic(api_key=self.settings.anthropic_api_key)
|
24 |
if self.settings.openai_api_key:
|
25 |
-
|
26 |
|
27 |
self.conversation_history: List[Message] = []
|
28 |
|
@@ -87,17 +87,19 @@ class LLMService:
|
|
87 |
prompt = self.create_prompt(content, query)
|
88 |
self._add_to_history("user", prompt)
|
89 |
|
90 |
-
if self.current_model == '
|
91 |
response = self.claude_client.messages.create(
|
92 |
model="claude-3-5-sonnet-latest",
|
93 |
-
messages=self._format_messages_for_claude()
|
|
|
94 |
)
|
95 |
answer = response.content[0].text
|
96 |
|
97 |
else: # gpt
|
98 |
-
response =
|
99 |
model="gpt-4o",
|
100 |
-
messages=self._format_messages_for_gpt()
|
|
|
101 |
)
|
102 |
answer = response.choices[0].message.content
|
103 |
|
|
|
1 |
from typing import Optional, List, Dict, Any
|
2 |
+
from openai import OpenAI
|
3 |
import anthropic
|
4 |
from dataclasses import dataclass
|
5 |
from config.llm_settings import LLMSettings
|
|
|
22 |
if self.settings.anthropic_api_key:
|
23 |
self.claude_client = anthropic.Anthropic(api_key=self.settings.anthropic_api_key)
|
24 |
if self.settings.openai_api_key:
|
25 |
+
self.openai_client = OpenAI(api_key=self.settings.openai_api_key)
|
26 |
|
27 |
self.conversation_history: List[Message] = []
|
28 |
|
|
|
87 |
prompt = self.create_prompt(content, query)
|
88 |
self._add_to_history("user", prompt)
|
89 |
|
90 |
+
if self.current_model == 'claude':
|
91 |
response = self.claude_client.messages.create(
|
92 |
model="claude-3-5-sonnet-latest",
|
93 |
+
messages=self._format_messages_for_claude(),
|
94 |
+
max_tokens=1024
|
95 |
)
|
96 |
answer = response.content[0].text
|
97 |
|
98 |
else: # gpt
|
99 |
+
response = self.openai_client.chat.completion.create(
|
100 |
model="gpt-4o",
|
101 |
+
messages=self._format_messages_for_gpt(),
|
102 |
+
max_tokens=1024
|
103 |
)
|
104 |
answer = response.choices[0].message.content
|
105 |
|