File size: 4,412 Bytes
a5b48a7
 
 
 
0b9c45d
a5b48a7
 
 
 
 
 
0b9c45d
 
 
65b4ef2
 
0b9c45d
 
a5b48a7
65b4ef2
 
 
 
 
0b9c45d
 
a5b48a7
65b4ef2
 
a5b48a7
5c13efa
7178e21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9376219
66afc6f
3421166
0b9c45d
 
 
3421166
 
 
 
a5b48a7
 
 
 
 
66afc6f
0b9c45d
5c13efa
66afc6f
 
5c13efa
 
a5b48a7
66afc6f
 
 
 
 
 
 
 
 
 
 
fe675b2
ffef722
0b25d1f
 
cf47250
8c00211
cf47250
 
0b25d1f
 
aaec9a5
003ad51
aaec9a5
 
0b25d1f
3421166
 
0b25d1f
cf47250
 
66afc6f
0b25d1f
 
 
c958824
0b25d1f
 
 
 
 
85c68a8
 
 
405fa0f
0b25d1f
 
85c68a8
0b25d1f
 
85c68a8
 
003ad51
 
0b25d1f
 
85c68a8
 
0b25d1f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import gradio as gr
import requests
import os
import json
import random
from elo import update_elo_ratings  # Custom function for ELO ratings

# Load the chatbot URLs and their respective model names from a JSON file
with open('chatbot_urls.json', 'r') as file:
    chatbots = json.load(file)

# Thread-local storage for user-specific data

# Initialize or get user-specific ELO ratings
def get_user_elo_ratings(state):
    return state['elo_ratings']

# Read ELO ratings from file (thread-safe)
def read_elo_ratings():
    try:
        with open('elo_ratings.json', 'r') as file:
            elo_ratings = json.load(file)
    except FileNotFoundError:
        elo_ratings = {model: 1200 for model in chatbots.keys()}
    return elo_ratings
# Write ELO ratings to file (thread-safe)
def write_elo_ratings(elo_ratings):
    with open('elo_ratings.json', 'w') as file:
        json.dump(elo_ratings, file, indent=4)

def get_bot_response(url, prompt):
    payload = {
        "input": {
            "prompt": prompt,
            "sampling_params": {
                "max_new_tokens": 16,
                "temperature": 0.7,
            }
        }
    }
    headers = {
        "accept": "application/json",
        "content-type": "application/json",
        "authorization": os.environ.get("RUNPOD_TOKEN")
    }
    response = requests.post(url, json=payload, headers=headers)
    return response.json()['output'][0]['generated_text'].replace(prompt,"")

def chat_with_bots(user_input, state):
    bot_names = list(chatbots.keys())
    random.shuffle(bot_names)
    bot1_url, bot2_url = chatbots[bot_names[0]], chatbots[bot_names[1]]
    
    # Update the state with the names of the last bots
    state.update({'last_bots': [bot_names[0], bot_names[1]]})

    bot1_response = get_bot_response(bot1_url, user_input)
    bot2_response = get_bot_response(bot2_url, user_input)

    return bot1_response, bot2_response

def update_ratings(state, winner_index):
    elo_ratings = get_user_elo_ratings()
    bot_names = list(chatbots.keys())
    winner = state['last_bots'][winner_index]
    loser = state['last_bots'][1 - winner_index]
    
    elo_ratings = update_elo_ratings(elo_ratings, winner, loser)
    write_elo_ratings(elo_ratings)
    return f"Updated ELO ratings:\n{winner}: {elo_ratings[winner]}\n{loser}: {elo_ratings[loser]}"

def vote_up_model(state, chatbot):
    update_message = update_ratings(state, 0)
    chatbot.append(update_message)
    return chatbot

def vote_down_model(state, chatbot):
    update_message = update_ratings(state, 1)
    chatbot.append(update_message)
    return chatbot
def user_ask(state, chatbot1, chatbot2, textbox):
    user_input = textbox
    bot1_response, bot2_response = chat_with_bots(user_input, state)

    chatbot1.append("User: " + user_input)
    chatbot1.append("Bot 1: " + bot1_response)

    chatbot2.append("User: " + user_input)
    chatbot2.append("Bot 2: " + bot2_response)

    # Enable voting buttons
    global upvote_btn_a,upvote_btn_b
    upvote_btn_a.interactive = True
    upvote_btn_b.interactive = True

    updated_elo_ratings = get_user_elo_ratings(state)
    state.update({'elo_ratings': updated_elo_ratings})
    return state, chatbot1, chatbot2

# ... [Rest of your existing functions] ...


with gr.Blocks() as demo:
    state = gr.State({})
    state.update({"elo_ratings":read_elo_ratings})
    with gr.Row():
        # First column for Model A
        with gr.Column():
            chatbot1 = gr.Chatbot(label='Model A')
            upvote_btn_a = gr.Button(value="👍 Upvote A", interactive=False)
        
        # Second column for Model B
        with gr.Column():
            chatbot2 = gr.Chatbot(label='Model B')
            upvote_btn_b = gr.Button(value="👍 Upvote B", interactive=False)        

    # Textbox and submit button at the bottom
    textbox = gr.Textbox(placeholder="Enter your prompt and press ENTER")
    submit_btn = gr.Button(value="Send")

    # Interaction logic
    textbox.submit(user_ask, inputs=[state, chatbot1, chatbot2, textbox], outputs=[state, chatbot1, chatbot2])
    submit_btn.click(user_ask, inputs=[state, chatbot1, chatbot2, textbox], outputs=[state, chatbot1, chatbot2])
    upvote_btn_a.click(vote_up_model, inputs=[state, chatbot1], outputs=[chatbot1])
    upvote_btn_b.click(vote_up_model, inputs=[state, chatbot2], outputs=[chatbot2])

    # Start the interface
    demo.launch()