|
import gradio as gr |
|
import requests |
|
import os |
|
|
|
token = os.getenv("HF_TOKEN") |
|
|
|
|
|
def generate_text(prompt, model_choice, max_tokens, *other_params): |
|
|
|
model_urls = { |
|
"GPT-3.5": "https://api-inference.huggingface.co/models/ai-forever/ruGPT-3.5-13B", |
|
"GPT-4": "https://api-inference.huggingface.co/models/ai-forever/ruGPT-4" |
|
} |
|
|
|
api_url = model_urls[model_choice] |
|
|
|
|
|
headers = { |
|
"Authorization": f"Bearer {token}" |
|
} |
|
payload = { |
|
"inputs": prompt, |
|
"parameters": {"max_length": max_tokens}, |
|
"options": {"use_cache": False} |
|
} |
|
|
|
|
|
response = requests.post(api_url, headers=headers, json=payload) |
|
if response.status_code == 200: |
|
|
|
return response.json()[0]['generated_text'] |
|
else: |
|
|
|
return "Error: " + response.text |
|
|
|
|
|
with gr.Blocks() as demo: |
|
with gr.Tab("Базовые настройки"): |
|
with gr.Row(): |
|
prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Введите текст...") |
|
model_choice = gr.Radio(["GPT-3.5", "GPT-4"], label="Выбор модели", value="GPT-3.5") |
|
|
|
with gr.Tab("Расширенные настройки"): |
|
with gr.Row(): |
|
max_tokens = gr.Slider(100, 5000, step=1, label="Максимум токенов") |
|
|
|
|
|
with gr.Row(): |
|
generate_btn = gr.Button("Генерация") |
|
|
|
with gr.Row(): |
|
output_text = gr.Textbox(label="Ответ", placeholder="Сгенерированный текст будет здесь...") |
|
|
|
|
|
generate_btn.click( |
|
fn=generate_text, |
|
inputs=[prompt, model_choice, max_tokens], |
|
outputs=output_text |
|
) |
|
|
|
|
|
demo.launch() |