syglobal1203 commited on
Commit
2d597ae
1 Parent(s): edfa641

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -3
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
  import os
4
 
5
  MODELS = {
@@ -11,16 +12,33 @@ MODELS = {
11
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
12
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
13
  "Cohere Command R+": "CohereForAI/c4ai-command-r-plus",
14
- "Cohere Aya-23-35B": "CohereForAI/aya-23-35B"
 
15
  }
16
 
17
  def get_client(model_name):
 
 
18
  model_id = MODELS[model_name]
19
  hf_token = os.getenv("HF_TOKEN")
20
  if not hf_token:
21
  raise ValueError("HF_TOKEN environment variable is required")
22
  return InferenceClient(model_id, token=hf_token)
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def respond(
25
  message,
26
  chat_history,
@@ -31,7 +49,16 @@ def respond(
31
  system_message,
32
  ):
33
  try:
34
- client = get_client(model_name)
 
 
 
 
 
 
 
 
 
35
  except ValueError as e:
36
  chat_history.append((message, str(e)))
37
  return chat_history
@@ -115,4 +142,4 @@ with gr.Blocks() as demo:
115
  clear_button.click(clear_conversation, outputs=chatbot, queue=False)
116
 
117
  if __name__ == "__main__":
118
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import openai # OpenAI API 추가
4
  import os
5
 
6
  MODELS = {
 
12
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
13
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
14
  "Cohere Command R+": "CohereForAI/c4ai-command-r-plus",
15
+ "Cohere Aya-23-35B": "CohereForAI/aya-23-35B",
16
+ "OpenAI gpt-4o-mini": "openai/gpt-4o-mini" # 새로운 모델 추가
17
  }
18
 
19
  def get_client(model_name):
20
+ if model_name == "OpenAI gpt-4o-mini":
21
+ return None # OpenAI 모델은 별도의 클라이언트가 필요 없음
22
  model_id = MODELS[model_name]
23
  hf_token = os.getenv("HF_TOKEN")
24
  if not hf_token:
25
  raise ValueError("HF_TOKEN environment variable is required")
26
  return InferenceClient(model_id, token=hf_token)
27
 
28
+ def call_openai_api(content, system_message, max_tokens, temperature, top_p):
29
+ openai.api_key = os.getenv("OPENAI_API_KEY")
30
+ response = openai.ChatCompletion.create(
31
+ model="gpt-4o-mini", # 또는 다른 모델 ID 사용
32
+ messages=[
33
+ {"role": "system", "content": system_message},
34
+ {"role": "user", "content": content},
35
+ ],
36
+ max_tokens=max_tokens,
37
+ temperature=temperature,
38
+ top_p=top_p,
39
+ )
40
+ return response.choices[0].message['content']
41
+
42
  def respond(
43
  message,
44
  chat_history,
 
49
  system_message,
50
  ):
51
  try:
52
+ if model_name == "OpenAI gpt-4o-mini":
53
+ # OpenAI 모델에 대한 응답 처리
54
+ assistant_message = call_openai_api(message, system_message, max_tokens, temperature, top_p)
55
+ chat_history.append((message, assistant_message))
56
+ yield chat_history
57
+ else:
58
+ client = get_client(model_name)
59
+ if client is None:
60
+ raise ValueError("Model client initialization failed")
61
+
62
  except ValueError as e:
63
  chat_history.append((message, str(e)))
64
  return chat_history
 
142
  clear_button.click(clear_conversation, outputs=chatbot, queue=False)
143
 
144
  if __name__ == "__main__":
145
+ demo.launch()