lyx97 commited on
Commit
3e4a9fd
1 Parent(s): f8a3804
Files changed (1) hide show
  1. src/about.py +10 -2
src/about.py CHANGED
@@ -21,11 +21,13 @@ NUM_FEWSHOT = 0 # Change with your few shot
21
 
22
 
23
  # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
 
26
  # What does your leaderboard evaluate?
27
  INTRODUCTION_TEXT = """
28
- Intro text
 
 
29
  """
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
@@ -69,4 +71,10 @@ If everything is done, check you can launch the EleutherAIHarness on your model
69
 
70
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
71
  CITATION_BUTTON_TEXT = r"""
 
 
 
 
 
 
72
  """
 
21
 
22
 
23
  # Your leaderboard name
24
+ TITLE = """<h1 align="center" id="space-title">TempCompass leaderboard</h1>"""
25
 
26
  # What does your leaderboard evaluate?
27
  INTRODUCTION_TEXT = """
28
+ Welcome to the leaderboard of TempCompass! 🏆
29
+
30
+ TempCompass is a benchmark to evaluate the temporal perception ability of Video LLMs. It consists of 410 videos and 7,540 task instructions, covering 11 temporal aspects and 4 task types. Please refer to [our paper](https://arxiv.org/abs/2403.00476) for more details.
31
  """
32
 
33
  # Which evaluations are you running? how can people reproduce what you have?
 
71
 
72
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
73
  CITATION_BUTTON_TEXT = r"""
74
+ @article{liu2024tempcompass,
75
+ title = {TempCompass: Do Video LLMs Really Understand Videos?},
76
+ author = {Yuanxin Liu and Shicheng Li and Yi Liu and Yuxiang Wang and Shuhuai Ren and Lei Li and Sishuo Chen and Xu Sun and Lu Hou},
77
+ year = {2024},
78
+ journal = {arXiv preprint arXiv: 2403.00476}
79
+ }
80
  """