Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 1,799 Bytes
0cc999a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from ..presets import *
from ..utils import *
from .base_model import BaseLLMModel
class Claude_Client(BaseLLMModel):
def __init__(self, model_name, api_secret) -> None:
super().__init__(model_name=model_name)
self.api_secret = api_secret
if None in [self.api_secret]:
raise Exception("请在配置文件或者环境变量中设置Claude的API Secret")
self.claude_client = Anthropic(api_key=self.api_secret)
def get_answer_stream_iter(self):
system_prompt = self.system_prompt
history = self.history
if system_prompt is not None:
history = [construct_system(system_prompt), *history]
completion = self.claude_client.completions.create(
model=self.model_name,
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT}{history}{AI_PROMPT}",
stream=True,
)
if completion is not None:
partial_text = ""
for chunk in completion:
partial_text += chunk.completion
yield partial_text
else:
yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG
def get_answer_at_once(self):
system_prompt = self.system_prompt
history = self.history
if system_prompt is not None:
history = [construct_system(system_prompt), *history]
completion = self.claude_client.completions.create(
model=self.model_name,
max_tokens_to_sample=300,
prompt=f"{HUMAN_PROMPT}{history}{AI_PROMPT}",
)
if completion is not None:
return completion.completion, len(completion.completion)
else:
return "获取资源错误", 0
|