XufengDuan commited on
Commit
e157bd5
1 Parent(s): dcbdce4

update scripts

Browse files
app.py CHANGED
@@ -457,7 +457,7 @@ with demo:
457
  def background_init_and_process():
458
  global original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df
459
  original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space()
460
- process_pending_evals()
461
 
462
  scheduler = BackgroundScheduler()
463
  scheduler.add_job(background_init_and_process, 'date', run_date=datetime.datetime.now()) # 立即执行
 
457
  def background_init_and_process():
458
  global original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df
459
  original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space()
460
+ #process_pending_evals()
461
 
462
  scheduler = BackgroundScheduler()
463
  scheduler.add_job(background_init_and_process, 'date', run_date=datetime.datetime.now()) # 立即执行
src/backend/evaluate_model.py CHANGED
@@ -95,23 +95,39 @@ class Evaluator:
95
  '''开始评估模型的结果'''
96
  self.humanlike = self.eval_model.evaluate_humanlike(self.generated_summaries_df, envs.HUMAN_DATA, f"./generation_results/{self.model}.csv")
97
 
98
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  '''原始指标'''
 
101
  # self.hallucination_scores, self.eval_results = self.eval_model.evaluate_hallucination(
102
  # self.generated_summaries_df)
103
  # factual_consistency_rate = self.eval_model.compute_factual_consistency_rate()
104
  # hallucination_rate = self.eval_model.hallucination_rate
105
- factual_consistency_rate = 0
106
- answer_rate = 0
107
- avg_summary_len = 0
108
-
109
- results = util.format_results(model_name=self.model, revision=self.revision,
110
- precision=self.precision,
111
- factual_consistency_rate=factual_consistency_rate,
112
- hallucination_rate=self.humanlike,
113
- answer_rate=answer_rate,
114
- avg_summary_len=avg_summary_len)
115
  return results
116
  except FileNotFoundError:
117
  logging.error(f"File not found: {envs.DATASET_PATH}")
 
95
  '''开始评估模型的结果'''
96
  self.humanlike = self.eval_model.evaluate_humanlike(self.generated_summaries_df, envs.HUMAN_DATA, f"./generation_results/{self.model}.csv")
97
 
98
+ all_results = self.humanlike
99
+ # Prepare individual experiment scores and CIs
100
+ experiment_results = {}
101
+ for exp, data in all_results['per_experiment'].items():
102
+ experiment_results[f'{exp}'] = data['average_js_divergence']
103
+ experiment_results[f'{exp}_ci'] = data['confidence_interval']
104
+
105
+ # Write results into results using util.format_results
106
+ results = util.format_results(
107
+ model_name=self.model,
108
+ revision=self.revision,
109
+ precision=self.precision,
110
+ overall_js=all_results['overall']['average_js_divergence'],
111
+ overall_ci=all_results['overall']['confidence_interval'],
112
+ **experiment_results # Unpack the experiment results
113
+ )
114
 
115
  '''原始指标'''
116
+
117
  # self.hallucination_scores, self.eval_results = self.eval_model.evaluate_hallucination(
118
  # self.generated_summaries_df)
119
  # factual_consistency_rate = self.eval_model.compute_factual_consistency_rate()
120
  # hallucination_rate = self.eval_model.hallucination_rate
121
+ # factual_consistency_rate = 0
122
+ # answer_rate = 0
123
+ # avg_summary_len = 0
124
+ #
125
+ # results = util.format_results(model_name=self.model, revision=self.revision,
126
+ # precision=self.precision,
127
+ # factual_consistency_rate=factual_consistency_rate,
128
+ # hallucination_rate=self.humanlike,
129
+ # answer_rate=answer_rate,
130
+ # avg_summary_len=avg_summary_len)
131
  return results
132
  except FileNotFoundError:
133
  logging.error(f"File not found: {envs.DATASET_PATH}")
src/backend/model_operations.py CHANGED
@@ -28,6 +28,7 @@ import src.envs as envs
28
  # # import pandas as pd
29
  # import scipy
30
  from scipy.spatial.distance import jensenshannon
 
31
  import numpy as np
32
  import spacy_transformers
33
 
@@ -238,7 +239,6 @@ class SummaryGenerator:
238
 
239
  def extract_responses(text, trigger_words=None):
240
  if trigger_words is None:
241
- # 如果没有提供特定的触发词列表,则使用默认值
242
  trigger_words = ["sure", "okay", "yes"]
243
 
244
  try:
@@ -248,7 +248,7 @@ class SummaryGenerator:
248
 
249
  sentences = [sentence.split(':', 1)[-1].strip() if ':' in sentence else sentence for
250
  sentence in sentences]
251
- if any(sentences[0].lower().startswith(word) for word in trigger_words):
252
  _response1 = sentences[1].strip() if len(sentences) > 1 else None
253
  _response2 = sentences[2].strip() if len(sentences) > 2 else None
254
  else:
@@ -279,10 +279,8 @@ class SummaryGenerator:
279
  Experiment_ID.append(ID)
280
  Questions_ID.append(q_column[j])
281
  User_prompt.append(_user_prompt)
282
-
283
  Response.append(_response2)
284
-
285
- Factor_2.append(V2_column[j])
286
  Stimuli_1.append(Stimuli_2_column[j])
287
  Item_ID.append(Item_column[j])
288
  Condition.append(Condition_column[j])
@@ -292,10 +290,7 @@ class SummaryGenerator:
292
  Questions_ID.append(str(q_column[j]) + '1')
293
  User_prompt.append(_user_prompt)
294
  Response.append(_response1)
295
-
296
-
297
-
298
- Factor_2.append(V2_column[j])
299
  Stimuli_1.append(Stimuli_1_column[j])
300
  Item_ID.append(Item_column[j])
301
  Condition.append(Condition_column[j])
@@ -343,7 +338,7 @@ class SummaryGenerator:
343
  together_ai_api_models = ['mixtral', 'dbrx', 'wizardlm']
344
  for together_ai_api_model in together_ai_api_models:
345
  if together_ai_api_model in self.model_id.lower():
346
- using_together_api = True
347
  break
348
  # print('适用哪一种LLM',together_ai_api_model , using_together_api)
349
  # print(self.model_id.lower()) #meta-llama/llama-2-7b-chat-hf
@@ -358,7 +353,7 @@ class SummaryGenerator:
358
  payload = {
359
  "model": self.model_id,
360
  # "max_tokens": 4096,
361
- 'max_new_tokens': 50,
362
  # "temperature": 0.0,
363
  # 'repetition_penalty': 1.1 if 'mixtral' in self.model_id.lower() else 1
364
  }
@@ -408,7 +403,7 @@ class SummaryGenerator:
408
  )
409
 
410
  generation_args = {
411
- "max_new_tokens": 50,
412
  "return_full_text": False,
413
  #"temperature": 0.0,
414
  "do_sample": False,
@@ -422,7 +417,7 @@ class SummaryGenerator:
422
  print(prompt)
423
  input_ids = self.tokenizer(prompt, return_tensors="pt").to('cuda')
424
  with torch.no_grad():
425
- outputs = self.local_model.generate(**input_ids, max_new_tokens=50, do_sample=True, pad_token_id=self.tokenizer.eos_token_id)
426
  result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
427
  result = result.replace(prompt[0], '')
428
  print(result)
@@ -430,45 +425,83 @@ class SummaryGenerator:
430
 
431
 
432
  elif self.local_model is None:
433
- # print(self.model_id)
434
- # print(self.api_base)
435
- # mistralai/Mistral-7B-Instruct-v0.1
436
- # https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1
437
- # Using HF API or download checkpoints
438
- try: # try use HuggingFace API
439
- from huggingface_hub import InferenceClient
440
- print("token_for_request:",envs.TOKEN)
441
- print(self.model_id)
442
- client = InferenceClient(self.model_id,api_key=envs.TOKEN,headers={"X-use-cache": "false"})
443
- messages = [{"role": "system", "content": system_prompt},{"role": "user", "content": user_prompt}]
444
- # outputs = client.chat_completion(messages, max_tokens=50)
445
- result = None
446
- while result is None:
447
- outputs = client.chat_completion(messages, max_tokens=50)
448
- result = outputs['choices'][0]['message']['content']
449
-
450
- if result is None:
451
- time.sleep(1) # Optional: Add a small delay before retrying
452
-
453
- return result
454
-
455
- except Exception as e:
456
- print(f"Error with TOKEN: {envs.TOKEN}, trying with TOKEN1")
457
  try:
458
- client = InferenceClient(self.model_id, api_key=envs.TOKEN1, headers={"X-use-cache": "false"})
 
459
  messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
460
  result = None
 
461
  while result is None:
462
- outputs = client.chat_completion(messages, max_tokens=50)
463
  result = outputs['choices'][0]['message']['content']
464
 
465
  if result is None:
466
  time.sleep(1) # Optional: Add a small delay before retrying
467
 
468
  return result
469
- except Exception as ee:
470
- print(f"Error with TOKEN1: {envs.TOKEN1}")
471
- raise ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472
 
473
  # except: # fail to call api. run it locally.
474
  # self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, trust_remote_code=True)
@@ -505,7 +538,7 @@ class SummaryGenerator:
505
  "temperature": 0,
506
  "top_p": 0.95, # cannot change
507
  "top_k": 0,
508
- "max_output_tokens": 50,
509
  # "response_mime_type": "application/json",
510
  }
511
  safety_settings = [
@@ -545,7 +578,7 @@ class SummaryGenerator:
545
  messages=[{"role": "system", "content": system_prompt},
546
  {"role": "user", "content": user_prompt}],
547
  # temperature=0.0,
548
- max_tokens=50,
549
  api_key = os.getenv('OpenAI_key')
550
  )
551
  result = response['choices'][0]['message']['content']
@@ -643,8 +676,10 @@ class EvaluationModel:
643
  sentences = [sentence.split(':', 1)[-1].strip() if ':' in sentence else sentence
644
  for sentence in sentences]
645
  rs = [sentence.strip() for sentence in sentences if sentence.strip()]
646
-
 
647
  '''Exp1'''
 
648
  if summaries_df["Experiment"][i] == "E1":
649
  print("E1", rs)
650
  rs = rs.replace('"','')
@@ -658,7 +693,7 @@ class EvaluationModel:
658
 
659
 
660
  '''Exp2'''
661
-
662
  elif summaries_df["Experiment"][i] == "E2":
663
  # rs = summaries_df["Response"][i].strip()
664
  rs = rs.split(' ')
@@ -677,6 +712,7 @@ class EvaluationModel:
677
  output.append("Other")
678
 
679
  '''Exp3'''
 
680
  elif summaries_df["Experiment"][i] == "E3":
681
  # rs = summaries_df["Response"][i].strip()
682
  print("E3", rs)
@@ -906,18 +942,28 @@ class EvaluationModel:
906
  for i in range(len(summaries_df["Experiment"])):
907
  # vote_1_1, vote_1_2, vote_1_3 = 0, 0, 0
908
  # print()
 
909
  if pd.isna(summaries_df["Response"][i]):
910
  output.append("Other")
911
  continue
912
  rs = summaries_df["Response"][i].strip().lower()
 
 
 
 
 
 
913
  '''Exp1'''
 
914
  if summaries_df["Experiment"][i] == "E1":
915
  print("E1", rs)
916
- rs = rs.replace('"','')
917
- if rs == "round":
918
- # vote_1_1 += 1
 
 
919
  output.append("Round")
920
- elif rs == "spiky":
921
  output.append("Spiky")
922
  else:
923
  output.append("Other")
@@ -926,7 +972,6 @@ class EvaluationModel:
926
  '''Exp2'''
927
 
928
  elif summaries_df["Experiment"][i] == "E2":
929
- # rs = summaries_df["Response"][i].strip()
930
  rs = rs.split(' ')
931
  print("E2", rs)
932
  male, female = 0, 0
@@ -946,7 +991,7 @@ class EvaluationModel:
946
  elif summaries_df["Experiment"][i] == "E3":
947
  # rs = summaries_df["Response"][i].strip()
948
  print("E3", rs)
949
- rs = rs.replace('"', '')
950
  pair = summaries_df["Factor 2"][i]
951
  word1, word2 = pair.split('_')
952
 
@@ -975,7 +1020,8 @@ class EvaluationModel:
975
  print(f"Unexpected error: {e}")
976
  output.append("Other")
977
  continue
978
-
 
979
  target = summaries_df["Factor 2"][i].strip().lower()
980
  pair = target + "_" + meaning_word
981
  print("E4:", pair)
@@ -1053,7 +1099,6 @@ class EvaluationModel:
1053
  doc = nlp1(sentence)
1054
  subject = "None"
1055
  obj = "None"
1056
- # 遍历依存关系,寻找主语和宾语
1057
  for token in doc:
1058
  if token.dep_ == "nsubj":
1059
  subject = token.text
@@ -1078,14 +1123,24 @@ class EvaluationModel:
1078
 
1079
  '''Exp7'''
1080
  elif summaries_df["Experiment"][i] == "E7":
1081
- # rs = summaries_df["Response"][i].strip().lower()
1082
- rs = rs.replace(".", "").replace(",", "")
1083
- print("E7",rs)
1084
- if rs == "no":
1085
- output.append("0")
1086
- elif rs == "yes":
1087
- output.append("1")
1088
- else:
 
 
 
 
 
 
 
 
 
 
1089
  output.append("Other")
1090
 
1091
  '''Exp8'''
@@ -1136,14 +1191,17 @@ class EvaluationModel:
1136
 
1137
  '''Exp10'''
1138
  elif summaries_df["Experiment"][i] == "E10":
1139
- # rs = summaries_df["Response"][i].strip()
1140
- rs = rs.replace(".", "")
1141
- if rs == "yes":
 
 
 
1142
  output.append("1")
1143
  else:
1144
  output.append("0")
1145
  else:
1146
- print("can;t find the Exp:", summaries_df["Experiment"][i])
1147
  output.append("NA")
1148
  # print(output)
1149
  # exit()
@@ -1207,6 +1265,7 @@ class EvaluationModel:
1207
  human_df = pd.concat([human_df, human_e5], ignore_index=True)
1208
  llm_df = pd.concat([llm_df, llm_e5], ignore_index=True)
1209
 
 
1210
  ### Calculate Average JS Divergence ###
1211
 
1212
  # Extract the relevant columns for JS divergence calculation
@@ -1216,14 +1275,14 @@ class EvaluationModel:
1216
  # Get unique Question_IDs present in both datasets
1217
  common_question_ids = set(human_responses['Question_ID']).intersection(set(llm_responses['Question_ID']))
1218
 
1219
- # Initialize a list to store JS divergence for each Question_ID
1220
- js_divergence_list = []
1221
- js_divergence ={}
1222
 
1223
  # Calculate JS divergence for each common Question_ID
1224
  for q_id in common_question_ids:
1225
  # Get response distributions for the current Question_ID in both datasets
1226
- human_dist = human_responses[human_responses['Question_ID'] == q_id]['Coding'].value_counts(normalize=True)
 
1227
  llm_dist = llm_responses[llm_responses['Question_ID'] == q_id]['Coding'].value_counts(normalize=True)
1228
 
1229
  # Reindex the distributions to have the same index, filling missing values with 0
@@ -1231,28 +1290,94 @@ class EvaluationModel:
1231
  human_dist = human_dist.reindex(all_responses, fill_value=0)
1232
  llm_dist = llm_dist.reindex(all_responses, fill_value=0)
1233
 
1234
- # Calculate JS divergence and add to the list
1235
  js_div = jensenshannon(human_dist, llm_dist, base=2)
1236
  experiment_id = q_id.split('_')[1]
 
1237
  if experiment_id not in js_divergence:
1238
  js_divergence[experiment_id] = []
1239
  js_divergence[experiment_id].append(js_div)
1240
 
1241
- js_divergence_list.append(js_div)
1242
- #js_divergence[q_id] = js_div
1243
-
1244
-
1245
-
1246
- # Calculate the average JS divergence
1247
- # JS per experiment
1248
- avg_js_divergence_per_experiment = {exp: 1- np.nanmean(divs) for exp, divs in js_divergence.items()}
1249
- print(avg_js_divergence_per_experiment)
1250
-
1251
- # JS overall
1252
- avg_js_divergence = 1 - np.nanmean(js_divergence_list)
1253
- print("avg_js_divergence:", avg_js_divergence)
1254
 
1255
- return avg_js_divergence
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1256
 
1257
 
1258
  def evaluate_humanlike(self, summaries_df: object, human_data_path: object, result_save_path: object) -> object:
@@ -1271,7 +1396,7 @@ class EvaluationModel:
1271
  # print(f'Save human coding results to {save_path}')
1272
  # fpath = Path(save_path)
1273
  # fpath.parent.mkdir(parents=True, exist_ok=True)
1274
- # self.data.to_csv(fpath)
1275
 
1276
 
1277
  '''coding llm data'''
 
28
  # # import pandas as pd
29
  # import scipy
30
  from scipy.spatial.distance import jensenshannon
31
+ from scipy.stats import bootstrap
32
  import numpy as np
33
  import spacy_transformers
34
 
 
239
 
240
  def extract_responses(text, trigger_words=None):
241
  if trigger_words is None:
 
242
  trigger_words = ["sure", "okay", "yes"]
243
 
244
  try:
 
248
 
249
  sentences = [sentence.split(':', 1)[-1].strip() if ':' in sentence else sentence for
250
  sentence in sentences]
251
+ if any(sentences[0].lower().startswith(word) for word in trigger_words) and len(sentences)>2:
252
  _response1 = sentences[1].strip() if len(sentences) > 1 else None
253
  _response2 = sentences[2].strip() if len(sentences) > 2 else None
254
  else:
 
279
  Experiment_ID.append(ID)
280
  Questions_ID.append(q_column[j])
281
  User_prompt.append(_user_prompt)
 
282
  Response.append(_response2)
283
+ Factor_2.append(_response)
 
284
  Stimuli_1.append(Stimuli_2_column[j])
285
  Item_ID.append(Item_column[j])
286
  Condition.append(Condition_column[j])
 
290
  Questions_ID.append(str(q_column[j]) + '1')
291
  User_prompt.append(_user_prompt)
292
  Response.append(_response1)
293
+ Factor_2.append(_response)
 
 
 
294
  Stimuli_1.append(Stimuli_1_column[j])
295
  Item_ID.append(Item_column[j])
296
  Condition.append(Condition_column[j])
 
338
  together_ai_api_models = ['mixtral', 'dbrx', 'wizardlm']
339
  for together_ai_api_model in together_ai_api_models:
340
  if together_ai_api_model in self.model_id.lower():
341
+ #using_together_api = True
342
  break
343
  # print('适用哪一种LLM',together_ai_api_model , using_together_api)
344
  # print(self.model_id.lower()) #meta-llama/llama-2-7b-chat-hf
 
353
  payload = {
354
  "model": self.model_id,
355
  # "max_tokens": 4096,
356
+ 'max_new_tokens': 100,
357
  # "temperature": 0.0,
358
  # 'repetition_penalty': 1.1 if 'mixtral' in self.model_id.lower() else 1
359
  }
 
403
  )
404
 
405
  generation_args = {
406
+ "max_new_tokens": 100,
407
  "return_full_text": False,
408
  #"temperature": 0.0,
409
  "do_sample": False,
 
417
  print(prompt)
418
  input_ids = self.tokenizer(prompt, return_tensors="pt").to('cuda')
419
  with torch.no_grad():
420
+ outputs = self.local_model.generate(**input_ids, max_new_tokens=100, do_sample=True, pad_token_id=self.tokenizer.eos_token_id)
421
  result = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
422
  result = result.replace(prompt[0], '')
423
  print(result)
 
425
 
426
 
427
  elif self.local_model is None:
428
+ import random
429
+ def get_random_token():
430
+ i = random.randint(1, 20)
431
+ token = getattr(envs, f"TOKEN{i}")
432
+ return token, i
433
+
434
+ tokens_tried = set()
435
+
436
+ while len(tokens_tried) < 10:
437
+ token,i = get_random_token()
438
+
439
+ if token in tokens_tried:
440
+ continue
441
+
442
+ tokens_tried.add(token)
443
+ print(f"Trying with token: TOKEN{i}")
444
+
 
 
 
 
 
 
 
445
  try:
446
+ from huggingface_hub import InferenceClient
447
+ client = InferenceClient(self.model_id, api_key=token, headers={"X-use-cache": "false"})
448
  messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
449
  result = None
450
+
451
  while result is None:
452
+ outputs = client.chat_completion(messages, max_tokens=100)
453
  result = outputs['choices'][0]['message']['content']
454
 
455
  if result is None:
456
  time.sleep(1) # Optional: Add a small delay before retrying
457
 
458
  return result
459
+
460
+ except Exception as e:
461
+ print(f"Error with token: {token}, trying another token...")
462
+ continue
463
+
464
+ raise Exception("All tokens failed.")
465
+ # print(self.model_id)
466
+ # print(self.api_base)
467
+ # mistralai/Mistral-7B-Instruct-v0.1
468
+ # https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1
469
+ # Using HF API or download checkpoints
470
+ # try: # try use HuggingFace API
471
+ # from huggingface_hub import InferenceClient
472
+ # print("token_for_request:",envs.TOKEN)
473
+ # print(self.model_id)
474
+ # client = InferenceClient(self.model_id,api_key=envs.TOKEN,headers={"X-use-cache": "false"})
475
+ # messages = [{"role": "system", "content": system_prompt},{"role": "user", "content": user_prompt}]
476
+ # # outputs = client.chat_completion(messages, max_tokens=100)
477
+ # result = None
478
+ # while result is None:
479
+ # outputs = client.chat_completion(messages, max_tokens=100)
480
+ # result = outputs['choices'][0]['message']['content']
481
+ #
482
+ # if result is None:
483
+ # time.sleep(1) # Optional: Add a small delay before retrying
484
+ #
485
+ # return result
486
+ #
487
+ # except Exception as e:
488
+ # print(f"Error with TOKEN: {envs.TOKEN}, trying with TOKEN1")
489
+ # try:
490
+ # client = InferenceClient(self.model_id, api_key=envs.TOKEN1, headers={"X-use-cache": "false"})
491
+ # messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
492
+ # result = None
493
+ # while result is None:
494
+ # outputs = client.chat_completion(messages, max_tokens=100)
495
+ # result = outputs['choices'][0]['message']['content']
496
+ #
497
+ # if result is None:
498
+ # time.sleep(1) # Optional: Add a small delay before retrying
499
+ #
500
+ # return result
501
+ # except Exception as ee:
502
+ # print(f"Error with TOKEN1: {envs.TOKEN1}")
503
+ # raise ee
504
+
505
 
506
  # except: # fail to call api. run it locally.
507
  # self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, trust_remote_code=True)
 
538
  "temperature": 0,
539
  "top_p": 0.95, # cannot change
540
  "top_k": 0,
541
+ "max_output_tokens": 100,
542
  # "response_mime_type": "application/json",
543
  }
544
  safety_settings = [
 
578
  messages=[{"role": "system", "content": system_prompt},
579
  {"role": "user", "content": user_prompt}],
580
  # temperature=0.0,
581
+ max_tokens=100,
582
  api_key = os.getenv('OpenAI_key')
583
  )
584
  result = response['choices'][0]['message']['content']
 
676
  sentences = [sentence.split(':', 1)[-1].strip() if ':' in sentence else sentence
677
  for sentence in sentences]
678
  rs = [sentence.strip() for sentence in sentences if sentence.strip()]
679
+ rs = '\n'.join(rs)
680
+ rs = rs.replace("[", '').replace("]", '')
681
  '''Exp1'''
682
+ # period and comma will affect the result
683
  if summaries_df["Experiment"][i] == "E1":
684
  print("E1", rs)
685
  rs = rs.replace('"','')
 
693
 
694
 
695
  '''Exp2'''
696
+ # not the first pronoun
697
  elif summaries_df["Experiment"][i] == "E2":
698
  # rs = summaries_df["Response"][i].strip()
699
  rs = rs.split(' ')
 
712
  output.append("Other")
713
 
714
  '''Exp3'''
715
+ #
716
  elif summaries_df["Experiment"][i] == "E3":
717
  # rs = summaries_df["Response"][i].strip()
718
  print("E3", rs)
 
942
  for i in range(len(summaries_df["Experiment"])):
943
  # vote_1_1, vote_1_2, vote_1_3 = 0, 0, 0
944
  # print()
945
+ # data cleaning
946
  if pd.isna(summaries_df["Response"][i]):
947
  output.append("Other")
948
  continue
949
  rs = summaries_df["Response"][i].strip().lower()
950
+ sentences = rs.split('\n')
951
+ sentences = [sentence.split(':', 1)[-1].strip() if ':' in sentence else sentence
952
+ for sentence in sentences]
953
+ rs = [sentence.strip() for sentence in sentences if sentence.strip()]
954
+ rs = '\n'.join(rs)
955
+ rs = rs.replace('[', '').replace(']','').replace('.','')
956
  '''Exp1'''
957
+ # the period and comma will affect the result
958
  if summaries_df["Experiment"][i] == "E1":
959
  print("E1", rs)
960
+ rs = rs.replace('"', '') # Remove any unnecessary quotation marks
961
+ rs_cleaned = rs.replace(',', '') # Remove periods and commas
962
+
963
+ # Use 'contains' instead of 'equals' for keyword matching to avoid issues caused by punctuation
964
+ if "round" in rs_cleaned:
965
  output.append("Round")
966
+ elif "spiky" in rs_cleaned:
967
  output.append("Spiky")
968
  else:
969
  output.append("Other")
 
972
  '''Exp2'''
973
 
974
  elif summaries_df["Experiment"][i] == "E2":
 
975
  rs = rs.split(' ')
976
  print("E2", rs)
977
  male, female = 0, 0
 
991
  elif summaries_df["Experiment"][i] == "E3":
992
  # rs = summaries_df["Response"][i].strip()
993
  print("E3", rs)
994
+ rs = rs.replace('"', '').lower().replace(".","")
995
  pair = summaries_df["Factor 2"][i]
996
  word1, word2 = pair.split('_')
997
 
 
1020
  print(f"Unexpected error: {e}")
1021
  output.append("Other")
1022
  continue
1023
+ meaning_word = meaning_word.replace('.', '')
1024
+ meaning_word = meaning_word.replace(';', '')
1025
  target = summaries_df["Factor 2"][i].strip().lower()
1026
  pair = target + "_" + meaning_word
1027
  print("E4:", pair)
 
1099
  doc = nlp1(sentence)
1100
  subject = "None"
1101
  obj = "None"
 
1102
  for token in doc:
1103
  if token.dep_ == "nsubj":
1104
  subject = token.text
 
1123
 
1124
  '''Exp7'''
1125
  elif summaries_df["Experiment"][i] == "E7":
1126
+ # Remove periods and commas, then convert to lowercase
1127
+ rs = rs.replace(".", "").replace(",", "").lower()
1128
+ print("E7", rs)
1129
+
1130
+ # Split the response into words
1131
+ words = rs.split(' ')
1132
+ found = False
1133
+
1134
+ for word in words:
1135
+ if word == "no":
1136
+ output.append("0")
1137
+ found = True
1138
+ break
1139
+ elif word == "yes":
1140
+ output.append("1")
1141
+ found = True
1142
+ break
1143
+ if not found:
1144
  output.append("Other")
1145
 
1146
  '''Exp8'''
 
1191
 
1192
  '''Exp10'''
1193
  elif summaries_df["Experiment"][i] == "E10":
1194
+ # Remove periods from the response
1195
+ rs = rs.replace(".", "").lower() # Convert to lowercase to ensure case-insensitivity
1196
+ print("E10", rs)
1197
+
1198
+ # Check if the response contains "yes"
1199
+ if "yes" in rs:
1200
  output.append("1")
1201
  else:
1202
  output.append("0")
1203
  else:
1204
+ print("cant find the Exp:", summaries_df["Experiment"][i])
1205
  output.append("NA")
1206
  # print(output)
1207
  # exit()
 
1265
  human_df = pd.concat([human_df, human_e5], ignore_index=True)
1266
  llm_df = pd.concat([llm_df, llm_e5], ignore_index=True)
1267
 
1268
+
1269
  ### Calculate Average JS Divergence ###
1270
 
1271
  # Extract the relevant columns for JS divergence calculation
 
1275
  # Get unique Question_IDs present in both datasets
1276
  common_question_ids = set(human_responses['Question_ID']).intersection(set(llm_responses['Question_ID']))
1277
 
1278
+ # Initialize a dictionary to store JS divergence for each experiment
1279
+ js_divergence = {}
 
1280
 
1281
  # Calculate JS divergence for each common Question_ID
1282
  for q_id in common_question_ids:
1283
  # Get response distributions for the current Question_ID in both datasets
1284
+ human_dist = human_responses[human_responses['Question_ID'] == q_id]['Coding'].value_counts(
1285
+ normalize=True)
1286
  llm_dist = llm_responses[llm_responses['Question_ID'] == q_id]['Coding'].value_counts(normalize=True)
1287
 
1288
  # Reindex the distributions to have the same index, filling missing values with 0
 
1290
  human_dist = human_dist.reindex(all_responses, fill_value=0)
1291
  llm_dist = llm_dist.reindex(all_responses, fill_value=0)
1292
 
1293
+ # Calculate JS divergence
1294
  js_div = jensenshannon(human_dist, llm_dist, base=2)
1295
  experiment_id = q_id.split('_')[1]
1296
+
1297
  if experiment_id not in js_divergence:
1298
  js_divergence[experiment_id] = []
1299
  js_divergence[experiment_id].append(js_div)
1300
 
1301
+ # Calculate the average JS divergence per experiment and the confidence interval
1302
+ results = {}
1303
+ for exp, divs in js_divergence.items():
1304
+ avg_js_divergence = 1 - np.nanmean(divs)
1305
+ ci_lower, ci_upper = bootstrap((divs,), np.nanmean, confidence_level=0.95,
1306
+ n_resamples=1000).confidence_interval
1307
+ results[exp] = {
1308
+ 'average_js_divergence': avg_js_divergence,
1309
+ 'confidence_interval': (1 - ci_upper, 1 - ci_lower) # Adjust for 1 - score
1310
+ }
 
 
 
1311
 
1312
+ # Calculate the overall average JS divergence and confidence interval
1313
+ overall_js_divergence = 1 - np.nanmean([js for divs in js_divergence.values() for js in divs])
1314
+ flattened_js_divergence = np.concatenate([np.array(divs) for divs in js_divergence.values()])
1315
+
1316
+ # 计算总体的置信区间
1317
+ overall_ci_lower, overall_ci_upper = bootstrap(
1318
+ (flattened_js_divergence,),
1319
+ np.nanmean,
1320
+ confidence_level=0.95,
1321
+ n_resamples=1000
1322
+ ).confidence_interval
1323
+
1324
+ # Combine all results into one dictionary
1325
+ all_results = {
1326
+ 'overall': {
1327
+ 'average_js_divergence': overall_js_divergence,
1328
+ 'confidence_interval': (1 - overall_ci_upper, 1 - overall_ci_lower)
1329
+ },
1330
+ 'per_experiment': results
1331
+ }
1332
+
1333
+ return all_results
1334
+
1335
+ # ### Calculate Average JS Divergence ###
1336
+ #
1337
+ # # Extract the relevant columns for JS divergence calculation
1338
+ # human_responses = human_df[['Question_ID', 'Coding']]
1339
+ # llm_responses = llm_df[['Question_ID', 'Coding']]
1340
+ #
1341
+ # # Get unique Question_IDs present in both datasets
1342
+ # common_question_ids = set(human_responses['Question_ID']).intersection(set(llm_responses['Question_ID']))
1343
+ #
1344
+ # # Initialize a list to store JS divergence for each Question_ID
1345
+ # js_divergence_list = []
1346
+ # js_divergence ={}
1347
+ #
1348
+ # # Calculate JS divergence for each common Question_ID
1349
+ # for q_id in common_question_ids:
1350
+ # # Get response distributions for the current Question_ID in both datasets
1351
+ # human_dist = human_responses[human_responses['Question_ID'] == q_id]['Coding'].value_counts(normalize=True)
1352
+ # llm_dist = llm_responses[llm_responses['Question_ID'] == q_id]['Coding'].value_counts(normalize=True)
1353
+ #
1354
+ # # Reindex the distributions to have the same index, filling missing values with 0
1355
+ # all_responses = set(human_dist.index).union(set(llm_dist.index))
1356
+ # human_dist = human_dist.reindex(all_responses, fill_value=0)
1357
+ # llm_dist = llm_dist.reindex(all_responses, fill_value=0)
1358
+ #
1359
+ # # Calculate JS divergence and add to the list
1360
+ # js_div = jensenshannon(human_dist, llm_dist, base=2)
1361
+ # experiment_id = q_id.split('_')[1]
1362
+ # if experiment_id not in js_divergence:
1363
+ # js_divergence[experiment_id] = []
1364
+ # js_divergence[experiment_id].append(js_div)
1365
+ #
1366
+ # js_divergence_list.append(js_div)
1367
+ # #js_divergence[q_id] = js_div
1368
+ #
1369
+ #
1370
+ #
1371
+ # # Calculate the average JS divergence
1372
+ # # JS per experiment
1373
+ # avg_js_divergence_per_experiment = {exp: 1- np.nanmean(divs) for exp, divs in js_divergence.items()}
1374
+ # print(avg_js_divergence_per_experiment)
1375
+ #
1376
+ # # JS overall
1377
+ # avg_js_divergence = 1 - np.nanmean(js_divergence_list)
1378
+ # print("avg_js_divergence:", avg_js_divergence)
1379
+ #
1380
+ # return avg_js_divergence
1381
 
1382
 
1383
  def evaluate_humanlike(self, summaries_df: object, human_data_path: object, result_save_path: object) -> object:
 
1396
  # print(f'Save human coding results to {save_path}')
1397
  # fpath = Path(save_path)
1398
  # fpath.parent.mkdir(parents=True, exist_ok=True)
1399
+ # self.data.to_csv(fpath)
1400
 
1401
 
1402
  '''coding llm data'''
src/backend/util.py CHANGED
@@ -35,9 +35,49 @@ def create_pairs(df):
35
  return pairs
36
 
37
 
38
- def format_results(model_name: str, revision: str, precision: str,
39
- factual_consistency_rate: float, hallucination_rate: float,
40
- answer_rate: float, avg_summary_len: float) -> dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  """
42
  Formats the evaluation results into a structured dictionary.
43
 
@@ -45,34 +85,28 @@ def format_results(model_name: str, revision: str, precision: str,
45
  model_name (str): The name of the evaluated model.
46
  revision (str): The revision hash of the model.
47
  precision (str): The precision with which the evaluation was run.
48
- factual_consistency_rate (float): The factual consistency rate.
49
- hallucination_rate (float): The hallucination rate.
50
- answer_rate (float): The answer rate.
51
- avg_summary_len (float): The average summary length.
52
 
53
  Returns:
54
  dict: A dictionary containing the structured evaluation results.
55
  """
 
56
  results = {
57
  "config": {
58
- "model_dtype": precision, # Precision with which you ran the evaluation
59
- "model_name": model_name, # Name of the model
60
- "model_sha": revision # Hash of the model
61
  },
62
  "results": {
63
- "hallucination_rate": {
64
- "hallucination_rate": round(hallucination_rate,3)
65
- },
66
- "factual_consistency_rate": {
67
- "factual_consistency_rate": round(factual_consistency_rate,1)
68
- },
69
- "answer_rate": {
70
- "answer_rate": round(answer_rate*100,1)
71
- },
72
- "average_summary_length": {
73
- "average_summary_length": round(avg_summary_len,1)
74
- },
75
  }
76
  }
77
 
 
 
 
 
78
  return results
 
35
  return pairs
36
 
37
 
38
+ # def format_results(model_name: str, revision: str, precision: str,
39
+ # factual_consistency_rate: float, hallucination_rate: float,
40
+ # answer_rate: float, avg_summary_len: float) -> dict:
41
+ # """
42
+ # Formats the evaluation results into a structured dictionary.
43
+ #
44
+ # Args:
45
+ # model_name (str): The name of the evaluated model.
46
+ # revision (str): The revision hash of the model.
47
+ # precision (str): The precision with which the evaluation was run.
48
+ # factual_consistency_rate (float): The factual consistency rate.
49
+ # hallucination_rate (float): The hallucination rate.
50
+ # answer_rate (float): The answer rate.
51
+ # avg_summary_len (float): The average summary length.
52
+ #
53
+ # Returns:
54
+ # dict: A dictionary containing the structured evaluation results.
55
+ # """
56
+ # results = {
57
+ # "config": {
58
+ # "model_dtype": precision, # Precision with which you ran the evaluation
59
+ # "model_name": model_name, # Name of the model
60
+ # "model_sha": revision # Hash of the model
61
+ # },
62
+ # "results": {
63
+ # "hallucination_rate": {
64
+ # "hallucination_rate": round(hallucination_rate,3)
65
+ # },
66
+ # "factual_consistency_rate": {
67
+ # "factual_consistency_rate": round(factual_consistency_rate,1)
68
+ # },
69
+ # "answer_rate": {
70
+ # "answer_rate": round(answer_rate*100,1)
71
+ # },
72
+ # "average_summary_length": {
73
+ # "average_summary_length": round(avg_summary_len,1)
74
+ # },
75
+ # }
76
+ # }
77
+ #
78
+ # return results
79
+
80
+ def format_results(model_name: str, revision: str, precision: str, overall_js: float, overall_ci: tuple, **experiment_scores) -> dict:
81
  """
82
  Formats the evaluation results into a structured dictionary.
83
 
 
85
  model_name (str): The name of the evaluated model.
86
  revision (str): The revision hash of the model.
87
  precision (str): The precision with which the evaluation was run.
88
+ overall_js (float): The overall average JS divergence.
89
+ overall_ci (tuple): The confidence interval for the overall JS divergence.
90
+ experiment_scores: Experiment-specific scores and confidence intervals (E1, E1_ci, E2, E2_ci, ...).
 
91
 
92
  Returns:
93
  dict: A dictionary containing the structured evaluation results.
94
  """
95
+ # Initialize the base structure
96
  results = {
97
  "config": {
98
+ "model_dtype": precision, # Precision with which you ran the evaluation
99
+ "model_name": model_name, # Name of the model
100
+ "model_sha": revision # Hash of the model
101
  },
102
  "results": {
103
+ "overall_js_divergence": overall_js, # Overall JS divergence
104
+ "overall_confidence_interval": overall_ci, # Confidence interval for the overall JS divergence
 
 
 
 
 
 
 
 
 
 
105
  }
106
  }
107
 
108
+ # Add experiment-specific results to the dictionary
109
+ for exp_name, score in experiment_scores.items():
110
+ results["results"][exp_name] = score # Add each experiment score and its CI
111
+
112
  return results
src/display/about.py CHANGED
@@ -10,8 +10,29 @@ class Task:
10
 
11
  class Tasks(Enum):
12
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
13
- hallucination_rate = Task("hallucination_rate",
14
- "hallucination_rate", "Humanlike Score (%)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  # factual_consistency_rate = Task("factual_consistency_rate", "factual_consistency_rate", "Factual Consistency Rate (%)")
16
  # answer_rate = Task("answer_rate", "answer_rate", "Answer Rate (%)")
17
  # average_summary_length = Task("average_summary_length",
@@ -23,10 +44,7 @@ TITLE = """<h1 align="center" id="space-title">Humanlike Evaluation Model (HEM)
23
 
24
  # What does your leaderboard evaluate?
25
  INTRODUCTION_TEXT = """
26
- This leaderboard (by [Vectara](https://vectara.com)) evaluates how often an LLM introduces hallucinations when summarizing a document. <br>
27
- The leaderboard utilizes [HHEM](https://huggingface.co/vectara/hallucination_evaluation_model), an open source hallucination detection model.<br>
28
- An improved version (HHEM v2) is integrated into the [Vectara platform](https://console.vectara.com/signup/?utm_source=huggingface&utm_medium=space&utm_term=integration&utm_content=console&utm_campaign=huggingface-space-integration-console).
29
-
30
  """
31
 
32
  # Which evaluations are you running? how can people reproduce what you have?
@@ -105,49 +123,24 @@ The results are structured in JSON as follows:
105
  }
106
  }
107
  ```
108
- For additional queries or model submissions, please contact minseok@vectara.com.
109
  """
110
 
111
  EVALUATION_QUEUE_TEXT = """
112
- ## Some good practices before submitting a model
113
-
114
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
115
- ```python
116
- from transformers import AutoConfig, AutoModel, AutoTokenizer
117
- config = AutoConfig.from_pretrained("your model name", revision=revision)
118
- model = AutoModel.from_pretrained("your model name", revision=revision)
119
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
120
- ```
121
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
122
-
123
- Note: make sure your model is public!
124
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
125
-
126
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
127
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
128
-
129
- ### 3) Make sure your model has an open license!
130
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
131
-
132
- ### 4) Fill up your model card
133
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
134
 
135
- ## In case of model failure
136
- If your model is displayed in the `FAILED` category, its execution stopped.
137
- Make sure you have followed the above steps first.
138
  """
139
 
140
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
141
  CITATION_BUTTON_TEXT = r"""
142
  @dataset{HughesBae2023,
143
- author = {Simon Hughes and Minseok Bae},
144
- title = {Vectara Hallucination Leaderboard},
145
- year = {2023},
146
- month = {11},
147
- publisher = {Vectara, Inc},
148
  doi = {},
149
- url = {https://github.com/vectara/hallucination-leaderboard},
150
- abstract = {A leaderboard comparing LLM performance at maintaining factual consistency when summarizing a set of facts.},
151
- keywords = {nlp, llm, hallucination, nli, machine learning},
152
  license = {Apache-2.0},
153
  }"""
 
10
 
11
  class Tasks(Enum):
12
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
13
+ Overall = Task("overall_js", "overall_js", "Overall Humanlike %")
14
+ Overall_ci = Task("overall_ci", "overall_ci", "Overall Humanlike %")
15
+ E1 = Task("E1", "E1", "E1 Humanlike %")
16
+ E1_ci = Task("E1", "E1_ci", "E1 CI")
17
+ E2 = Task("E2", "E2", "E2 Humanlike %")
18
+ E2_ci = Task("E2", "E2_ci", "E2 CI")
19
+ E3 = Task("E3", "E3", "E3 Humanlike %")
20
+ E3_ci = Task("E3", "E3_ci", "E3 CI")
21
+ E4 = Task("E4", "E4", "E4 Humanlike %")
22
+ E4_ci = Task("E4", "E4_ci", "E4 CI")
23
+ E5 = Task("E5", "E5", "E5 Humanlike %")
24
+ E5_ci = Task("E5", "E5_ci", "E5 CI")
25
+ E6 = Task("E6", "E6", "E6 Humanlike %")
26
+ E6_ci = Task("E6", "E6_ci", "E6 CI")
27
+ E7 = Task("E7", "E7", "E7 Humanlike %")
28
+ E7_ci = Task("E7", "E7_ci", "E7 CI")
29
+ E8 = Task("E8", "E8", "E8 Humanlike %")
30
+ E8_ci = Task("E8", "E8_ci", "E8 CI")
31
+ E9 = Task("E9", "E9", "E9 Humanlike %")
32
+ E9_ci = Task("E9", "E9_ci", "E9 CI")
33
+ E10 = Task("E10", "E10", "E10 Humanlike %")
34
+ E10_ci = Task("E10", "E10_ci", "E10 CI")
35
+
36
  # factual_consistency_rate = Task("factual_consistency_rate", "factual_consistency_rate", "Factual Consistency Rate (%)")
37
  # answer_rate = Task("answer_rate", "answer_rate", "Answer Rate (%)")
38
  # average_summary_length = Task("average_summary_length",
 
44
 
45
  # What does your leaderboard evaluate?
46
  INTRODUCTION_TEXT = """
47
+ This leaderboard (by [Xufeng Duan](https://xufengduan.github.io/)) evaluates the similarities between human and model responses in language use <br>
 
 
 
48
  """
49
 
50
  # Which evaluations are you running? how can people reproduce what you have?
 
123
  }
124
  }
125
  ```
126
+ For additional queries or model submissions, please contact xufeng.duan@link.cuhk.edu.hk.
127
  """
128
 
129
  EVALUATION_QUEUE_TEXT = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
 
 
 
131
  """
132
 
133
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
134
  CITATION_BUTTON_TEXT = r"""
135
  @dataset{HughesBae2023,
136
+ author = {Xufeng Duan, Bei Xiao, Xuemei Tang, Zhenguang Cai},
137
+ title = {Humanlike Leaderboard},
138
+ year = {2024},
139
+ month = {8},
140
+ publisher = {},
141
  doi = {},
142
+ url = {https://huggingface.co/spaces/Simondon/HumanLikeness},
143
+ abstract = {A leaderboard comparing LLM performance at humanlikeness in language use.},
144
+ keywords = {nlp, llm, psycholinguistics, nli, machine learning},
145
  license = {Apache-2.0},
146
  }"""
src/envs.py CHANGED
@@ -5,8 +5,29 @@ from huggingface_hub import HfApi
5
 
6
  # replace this with our token
7
  # TOKEN = os.environ.get("HF_TOKEN", None)
8
- TOKEN = os.getenv("H4_TOKEN")
9
  TOKEN1 = os.getenv("H4_TOKEN1")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # print("H4_token:", TOKEN)
11
 
12
 
 
5
 
6
  # replace this with our token
7
  # TOKEN = os.environ.get("HF_TOKEN", None)
8
+ TOKEN = os.getenv("H4_TOKEN1")
9
  TOKEN1 = os.getenv("H4_TOKEN1")
10
+ TOKEN2 = os.getenv("H4_TOKEN2")
11
+ TOKEN3 = os.getenv("H4_TOKEN3")
12
+ TOKEN4 = os.getenv("H4_TOKEN4")
13
+ TOKEN5 = os.getenv("H4_TOKEN5")
14
+ TOKEN6 = os.getenv("H4_TOKEN6")
15
+ TOKEN7 = os.getenv("H4_TOKEN7")
16
+ TOKEN8 = os.getenv("H4_TOKEN8")
17
+ TOKEN9 = os.getenv("H4_TOKEN9")
18
+ TOKEN10 = os.getenv("H4_TOKEN10")
19
+ TOKEN11 = os.getenv("H4_TOKEN11")
20
+ TOKEN12 = os.getenv("H4_TOKEN12")
21
+ TOKEN13 = os.getenv("H4_TOKEN13")
22
+ TOKEN14 = os.getenv("H4_TOKEN14")
23
+ TOKEN15 = os.getenv("H4_TOKEN15")
24
+ TOKEN16 = os.getenv("H4_TOKEN16")
25
+ TOKEN17 = os.getenv("H4_TOKEN17")
26
+ TOKEN18 = os.getenv("H4_TOKEN18")
27
+ TOKEN19 = os.getenv("H4_TOKEN19")
28
+ TOKEN20 = os.getenv("H4_TOKEN20")
29
+
30
+
31
  # print("H4_token:", TOKEN)
32
 
33
 
src/populate.py CHANGED
@@ -18,11 +18,15 @@ def get_leaderboard_df(results_path: str, requests_path: str, cols: list, benchm
18
  df = pd.DataFrame.from_records(all_data_json)
19
  print("all results:",df)
20
  # exit()
21
- df = df.sort_values(by=[utils.AutoEvalColumn.hallucination_rate.name], ascending=True)
22
- df = df[cols].round(decimals=2)
 
 
 
 
 
 
23
 
24
- # filter out if any of the benchmarks have not been produced
25
- df = df[formatting.has_no_nan_values(df, benchmark_cols)]
26
  return df
27
 
28
 
 
18
  df = pd.DataFrame.from_records(all_data_json)
19
  print("all results:",df)
20
  # exit()
21
+ try:
22
+ df = df.sort_values(by=[utils.AutoEvalColumn.hallucination_rate.name], ascending=True)
23
+ df = df[cols].round(decimals=2)
24
+ # filter out if any of the benchmarks have not been produced
25
+ df = df[formatting.has_no_nan_values(df, benchmark_cols)]
26
+ except:
27
+ pass
28
+
29
 
 
 
30
  return df
31
 
32