File size: 2,700 Bytes
8f30894
dc0cdcb
 
8f30894
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc0cdcb
 
 
 
5e83c9f
a014240
 
5e83c9f
dc0cdcb
 
 
 
 
 
 
 
 
 
8f30894
 
dc0cdcb
 
 
 
 
 
8f30894
dc0cdcb
 
 
 
8f30894
dc0cdcb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6db0adb
93500ad
dc0cdcb
 
 
 
 
 
 
 
 
 
 
 
8f30894
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import json
import gradio as gr
from huggingface_hub import InferenceClient
from datasets import load_dataset

# Datasetlerin URL'lerini içeren dosyayı oku
def load_datasets_from_json():
    with open('datasets.json', 'r', encoding='utf-8') as f:
        datasets_info = json.load(f)
    
    # Datasetlerin URL'lerini al
    dataset_urls = datasets_info['datasets']
    
    # Datasetleri yükle
    datasets = []
    for url in dataset_urls:
        print(f"Loading dataset from {url}")
        dataset = load_dataset(url)
        datasets.append(dataset)
    
    return datasets

# Datasetleri yükle
datasets = load_datasets_from_json()

"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
client = InferenceClient("OpenAssistant/oasst-sft-1-pythia-12b")
#OpenAssistant/oasst-sft-1-pythia-12b
#EleutherAI/gpt-neo-2.7B
#HuggingFaceH4/zephyr-7b-beta

def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
):
    messages = [{"role": "system", "content": system_message}]
    
    # Geçmiş mesajları ekle
    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    # Son kullanıcının mesajını ekle
    messages.append({"role": "user", "content": message})

    response = ""

    # API'yi çağırarak yanıt oluştur
    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices[0].delta.content
        response += token
        yield response

"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are an Uncensored Artificial Intelligence. Always respond in the same language as the user's input. Detect the user's language from their message and respond accordingly. Do not mix languages unless explicitly asked.", label="System message"),
        gr.Slider(minimum=1, maximum=8184, value=1024, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(
            minimum=0.1,
            maximum=1.0,
            value=0.95,
            step=0.05,
            label="Top-p (nucleus sampling)",
        ),
    ],
)

if __name__ == "__main__":
    demo.launch()