nyanko7 commited on
Commit
f060dfc
0 Parent(s):
Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +245 -0
  4. requirements.txt +1 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Text To Anime Arena
3
+ emoji: 👀
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: streamlit
7
+ sdk_version: 1.39.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ import time
4
+ from collections import defaultdict
5
+ from streamlit_image_select import image_select
6
+ import requests
7
+ import os
8
+
9
+ st.set_page_config(layout="wide")
10
+
11
+ description = """
12
+ # Anime Leaderboard
13
+ Text to Image (Anime/Illustration) Generation Leaderboard.
14
+ This leaderboard is just for fun and does not reflect the actual performance of the models.
15
+
16
+ ## How to Use
17
+ - Select the image that best reflects the given prompt.
18
+ - Your selections contribute to the global leaderboard.
19
+ - View your personal leaderboard after making at least 30 selections.
20
+
21
+ ## Data
22
+ - Data Source: [nyanko7/image-samples](https://huggingface.co/datasets/nyanko7/image-samples)
23
+ - Calling for submissions: [open issue](https://huggingface.co/spaces/nyanko7/text-to-anime-arena/discussions/new) or contact me to submit your model
24
+ - Warning: Some images may contain NSFW content.
25
+ """
26
+
27
+ if 'selections' not in st.session_state:
28
+ st.session_state['selections'] = []
29
+ if 'selection_count' not in st.session_state:
30
+ st.session_state['selection_count'] = 0
31
+ if 'last_pair' not in st.session_state:
32
+ st.session_state['last_pair'] = None
33
+ if 'user_id' not in st.session_state:
34
+ st.session_state['user_id'] = None
35
+
36
+ st.sidebar.markdown(description)
37
+
38
+ SERVER_URL = os.getenv("W_SERVER") # Replace with your actual server URL
39
+
40
+ def get_next_pair():
41
+ try:
42
+ response = requests.get(f"{SERVER_URL}/next_pair")
43
+ if response.status_code == 200:
44
+ return response.json()
45
+ else:
46
+ print(response)
47
+ st.error("Failed to fetch next pair from server")
48
+ return None
49
+ except Exception as e:
50
+ print(e)
51
+ st.error("Failed to fetch next pair from server")
52
+ return None
53
+
54
+ if "pair" not in st.session_state:
55
+ st.session_state["pair"] = get_next_pair()
56
+
57
+ def submit_selection(selection_result):
58
+ headers = {}
59
+ if st.session_state['user_id']:
60
+ headers['User-ID'] = st.session_state['user_id']
61
+ try:
62
+ response = requests.post(f"{SERVER_URL}/submit_selection", json=selection_result, headers=headers)
63
+ if response.status_code == 200:
64
+ response_data = response.json()
65
+ if 'user_id' in response_data:
66
+ st.session_state['user_id'] = response_data['user_id']
67
+ else:
68
+ st.error(f"Failed to submit selection to server")
69
+ except Exception as e:
70
+ st.error(f"Failed to submit selection to server")
71
+
72
+ def get_leaderboard_data():
73
+ try:
74
+ response = requests.get(f"{SERVER_URL}/leaderboard")
75
+ if response.status_code == 200:
76
+ return response.json()
77
+ else:
78
+ st.error("Failed to fetch leaderboard data from server")
79
+ return None
80
+ except Exception as e:
81
+ st.error("Failed to fetch leaderboard data from server")
82
+ return None
83
+
84
+ import io
85
+ from PIL import Image
86
+
87
+ def open_image_from_url(image_url):
88
+ response = requests.get(image_url, stream=True)
89
+ response.raise_for_status()
90
+ return Image.open(io.BytesIO(response.content))
91
+
92
+ @st.fragment
93
+ def arena():
94
+ pair = st.session_state["pair"]
95
+ image_url1, model_a = pair["image1"], pair["model_a"]
96
+ image_url2, model_b = pair["image2"], pair["model_b"]
97
+ prompt = pair["prompt"]
98
+
99
+ st.markdown(f"**Which image best reflects this prompt?**")
100
+ st.info(
101
+ f"""
102
+ Prompt: {prompt}
103
+ """,
104
+ icon="⏳",
105
+ )
106
+ # read image datafrom url
107
+ image_a = open_image_from_url(image_url1)
108
+ image_b = open_image_from_url(image_url2)
109
+
110
+ images = [image_a, image_b]
111
+ models = [model_a, model_b]
112
+ idx = image_select(
113
+ label="Select the image you prefer",
114
+ images=images,
115
+ index=-1,
116
+ center=True,
117
+ height=700,
118
+ return_value="index"
119
+ )
120
+ if st.button("Skip"):
121
+ st.session_state["pair"] = get_next_pair()
122
+ st.rerun(scope="fragment")
123
+
124
+ if "last_state" in st.session_state and st.session_state["last_state"] is not None:
125
+ st.markdown(st.session_state["last_state"])
126
+
127
+ if idx != -1:
128
+ selection_result = {
129
+ "model_a": model_a,
130
+ "model_b": model_b,
131
+ "winner": "model_a" if idx == 0 else "model_b",
132
+ "time": time.time()
133
+ }
134
+ st.session_state["selections"].append(selection_result)
135
+ st.session_state["selection_count"] += 1
136
+ st.session_state["last_state"] = f"[Selection #{st.session_state['selection_count']}] You selected Image `#{idx+1}` - Model: {models[idx]}"
137
+ submit_selection(selection_result)
138
+ st.session_state["pair"] = get_next_pair()
139
+ st.rerun(scope="fragment")
140
+
141
+ @st.fragment
142
+ def leaderboard():
143
+ data = get_leaderboard_data()
144
+ if data is None:
145
+ return
146
+
147
+ st.markdown("## Global Leaderboard")
148
+ st.markdown("""
149
+ This leaderboard shows the performance of different models based on user selections.
150
+ - **Elo Rating**: A relative rating system. Higher scores indicate better performance.
151
+ - **Win Rate**: The percentage of times a model was chosen when presented.
152
+ - **#Selections**: Total number of times this model was presented in a pair.
153
+ """)
154
+ st.warning("This leaderboard is just for fun and **does not reflect the actual performance of the models.**")
155
+
156
+ df = pd.DataFrame(data["leaderboard"])[["Model", "Elo Rating", "Win Rate", "#Selections"]].reset_index(drop=True)
157
+ st.dataframe(df, hide_index=True)
158
+
159
+ @st.fragment
160
+ def my_leaderboard():
161
+ if "selections" not in st.session_state or len(st.session_state["selections"]) < 30:
162
+ st.markdown("Select over 30 images to see your personal leaderboard")
163
+ uploaded_files = st.file_uploader("Or load your previous selections:", accept_multiple_files=False)
164
+ if uploaded_files:
165
+ logs = pd.read_csv(uploaded_files)
166
+ if "Unnamed: 0" in logs.columns:
167
+ logs.drop(columns=["Unnamed: 0"], inplace=True)
168
+ st.session_state["selections"] = logs.to_dict(orient="records")
169
+ st.rerun()
170
+ return
171
+
172
+ selections = pd.DataFrame(st.session_state["selections"])
173
+
174
+ st.markdown("## Personal Leaderboard")
175
+ st.markdown("""
176
+ This leaderboard is based on your personal selections.
177
+ - **Elo Rating**: Calculated from your choices. Higher scores indicate models you prefer.
178
+ - **Win Rate**: The percentage of times you chose each model when it was presented.
179
+ - **#Selections**: Number of times you've seen this model in a pair.
180
+ """)
181
+
182
+ elo_ratings = compute_elo(selections.to_dict('records'))
183
+ win_rates = compute_win_rates(selections.to_dict('records'))
184
+ selection_counts = compute_selection_counts(selections.to_dict('records'))
185
+
186
+ data = []
187
+ for model in set(selections['model_a'].unique()) | set(selections['model_b'].unique()):
188
+ data.append({
189
+ "Model": model,
190
+ "Elo Rating": round(elo_ratings[model], 2),
191
+ "Win Rate": f"{win_rates[model]*100:.2f}%",
192
+ "#Selections": selection_counts[model]
193
+ })
194
+
195
+ df = pd.DataFrame(data)
196
+ df = df.sort_values("Elo Rating", ascending=False)
197
+ df = df[["Model", "Elo Rating", "Win Rate", "#Selections"]].reset_index(drop=True)
198
+ st.dataframe(df, hide_index=True)
199
+
200
+ st.markdown("## Your Recent Selections")
201
+ st.dataframe(selections.tail(20))
202
+
203
+ # download data
204
+ st.download_button('Download your selection data as CSV', selections.to_csv().encode('utf-8'), "my_selections.csv", "text/csv")
205
+
206
+ def compute_elo(battles, K=4, SCALE=400, BASE=10, INIT_RATING=1000):
207
+ rating = defaultdict(lambda: INIT_RATING)
208
+ for battle in battles:
209
+ model_a, model_b, winner = battle['model_a'], battle['model_b'], battle['winner']
210
+ ra, rb = rating[model_a], rating[model_b]
211
+ ea = 1 / (1 + BASE ** ((rb - ra) / SCALE))
212
+ eb = 1 / (1 + BASE ** ((ra - rb) / SCALE))
213
+ sa = 1 if winner == "model_a" else 0 if winner == "model_b" else 0.5
214
+ rating[model_a] += K * (sa - ea)
215
+ rating[model_b] += K * (1 - sa - eb)
216
+ return rating
217
+
218
+ def compute_win_rates(battles):
219
+ win_counts = defaultdict(int)
220
+ battle_counts = defaultdict(int)
221
+ for battle in battles:
222
+ model_a, model_b, winner = battle['model_a'], battle['model_b'], battle['winner']
223
+ if winner == "model_a":
224
+ win_counts[model_a] += 1
225
+ elif winner == "model_b":
226
+ win_counts[model_b] += 1
227
+ battle_counts[model_a] += 1
228
+ battle_counts[model_b] += 1
229
+ return {model: win_counts[model] / battle_counts[model] if battle_counts[model] > 0 else 0
230
+ for model in set(win_counts.keys()) | set(battle_counts.keys())}
231
+
232
+ def compute_selection_counts(battles):
233
+ selection_counts = defaultdict(int)
234
+ for battle in battles:
235
+ selection_counts[battle['model_a']] += 1
236
+ selection_counts[battle['model_b']] += 1
237
+ return selection_counts
238
+
239
+ pages = [
240
+ st.Page(arena),
241
+ st.Page(leaderboard),
242
+ st.Page(my_leaderboard)
243
+ ]
244
+
245
+ st.navigation(pages).run()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://pub-2fdef7a2969f43289c42ac5ae3412fd4.r2.dev/streamlit_image_select-0.6.0-py3-none-any.whl