import gradio as gr from langchain_openai import ChatOpenAI from langchain_core.messages import HumanMessage, AIMessage from llm import DeepSeekLLM from config import settings deep_seek_llm = DeepSeekLLM(api_key=settings.deep_seek_api_key) def init_chat(): return ChatOpenAI( model=deep_seek_llm.default_model, api_key=deep_seek_llm.api_key, base_url=deep_seek_llm.base_url, temperature=deep_seek_llm.default_temperature, max_tokens=deep_seek_llm.default_max_tokens, ) def predict(message, history, chat): if chat is None: chat = init_chat() history_messages = [] for human, assistant in history: history_messages.append(HumanMessage(content=human)) history_messages.append(AIMessage(content=assistant)) history_messages.append(HumanMessage(content=message.text)) response_message = '' for chunk in chat.stream(history_messages): response_message = response_message + chunk.content yield response_message def update_chat(_chat, _model: str, _temperature: float, _max_tokens: int): _chat = ChatOpenAI( model=_model, api_key=deep_seek_llm.api_key, base_url=deep_seek_llm.base_url, temperature=_temperature, max_tokens=_max_tokens, ) return _chat with gr.Blocks() as app: with gr.Tab('聊天'): chat_engine = gr.State(value=None) with gr.Row(): with gr.Column(scale=2, min_width=600): chatbot = gr.ChatInterface( predict, multimodal=True, chatbot=gr.Chatbot(elem_id="chatbot", height=600, show_share_button=False), textbox=gr.MultimodalTextbox(lines=1), additional_inputs=[chat_engine] ) with gr.Column(scale=1, min_width=300): with gr.Accordion('Select Model', open=True): with gr.Column(): model = gr.Dropdown( label='模型', choices=deep_seek_llm.support_models, value=deep_seek_llm.default_model ) temperature = gr.Slider( minimum=0.0, maximum=1.0, step=0.1, value=deep_seek_llm.default_temperature, label="Temperature", key="temperature", ) max_tokens = gr.Number( minimum=1024, maximum=1024 * 20, step=128, value=deep_seek_llm.default_max_tokens, label="Max Tokens", key="max_tokens", ) model.change(fn=update_chat, inputs=[chat_engine, model, temperature, max_tokens], outputs=[chat_engine]) temperature.change(fn=update_chat, inputs=[chat_engine, model, temperature, max_tokens], outputs=[chat_engine]) max_tokens.change(fn=update_chat, inputs=[chat_engine, model, temperature, max_tokens], outputs=[chat_engine]) with gr.Tab('画图'): with gr.Row(): with gr.Column(scale=2, min_width=600): gr.Image(label="Input Image") with gr.Column(scale=1, min_width=300): gr.Textbox(label="LoRA") app.launch(debug=settings.debug, show_api=False)