lyx97 commited on
Commit
a07085a
1 Parent(s): ba19895

commit files to HF hub

Browse files
constants.py.bak ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # this is .py for store constants
2
+ MODEL_INFO = ["Model"]
3
+
4
+ TASK_INFO = ["Avg. All", "Avg. Multi-Choice", "Avg. Yes/No", "Avg. Caption Matching", "Avg. Caption Generation",
5
+ "Action. Multi-Choice", "Action. Yes/No", "Action. Caption Matching", "Action. Caption Generation",
6
+ "Direction. Multi-Choice", "Direction. Yes/No", "Direction. Caption Matching", "Direction. Caption Generation",
7
+ "Speed. Multi-Choice", "Speed. Yes/No", "Speed. Caption Matching", "Speed. Caption Generation",
8
+ "Event Order. Multi-Choice", "Event Order. Yes/No", "Event Order. Caption Matching", "Event Order. Caption Generation",
9
+ "Attribute Change. Multi-Choice", "Attribute Change. Yes/No", "Attribute Change. Caption Matching", "Attribute Change. Caption Generation"]
10
+
11
+ AVG_INFO = ["Avg. All", "Avg. Multi-Choice", "Avg. Yes/No", "Avg. Caption Matching", "Avg. Caption Generation"]
12
+ DATA_TITILE_TYPE = ["markdown",
13
+ "number", "number", "number", "number", "number",
14
+ "number", "number", "number", "number",
15
+ "number", "number", "number", "number",
16
+ "number", "number", "number", "number",
17
+ "number", "number", "number", "number",
18
+ "number", "number", "number", "number",]
19
+ CSV_DIR = "./file/result.csv"
20
+
21
+ # COLUMN_NAMES = MODEL_INFO + TASK_INFO
22
+ COLUMN_NAMES = MODEL_INFO + TASK_INFO
23
+
24
+ LEADERBORAD_INTRODUCTION = """
25
+ Welcome to the leaderboard of TempCompass! 🏆
26
+
27
+ TempCompass is a benchmark to evaluate the temporal perception ability of Video LLMs. It consists of 410 videos and 7,540 task instructions, covering 11 temporal aspects and 4 task types. Please refer to [our paper](https://arxiv.org/abs/2403.00476) for more details.
28
+ """
29
+
30
+ SUBMIT_INTRODUCTION = """
31
+ # TempCompass Leaderboard
32
+
33
+ Welcome to the leaderboard of the Video-Bench! 🏆
34
+
35
+ ## Submit Instruction
36
+ Run inference and automatic evaluation according to our [github repository](https://github.com/llyx97/TempCompass?tab=readme-ov-file#-quick-start).
37
+
38
+ You will obtain the JSON file `<task_type>.json`, where `<task_type>` correspond to one of the four categories: `multi-choice`, `yes_no`, `caption_matching` and `captioning`. (Example files can be found [here](https://github.com/llyx97/TempCompass/tree/main/auto_eval_results/video-llava))
39
+
40
+ For `multi-choice`, `yes_no`, `caption_matching`, the evaluation result of each question contains five keys. A specific example is as follows:
41
+ ```python
42
+ {
43
+ "question": "What activity is the monkey engaged in?\\nA. swimming\\nB. running\\nC. climbing\\nD. fighting",
44
+ "gt-answer": "D. fighting",
45
+ "video-llm-prediction": "D",
46
+ "match_success": true, # whether the video-llm-prediction can be assessed by rule-based matching
47
+ "rating": 1
48
+ }
49
+ ```
50
+
51
+ For `captioning`, we prompt chatgpt to answer the multi-choice question, using the Video LLM generated caption as context. An example of evalution result is as follows:
52
+ ```python
53
+ {
54
+ "chatgpt-reasoning": "The video description specifically mentions that the man is dribbling a basketball, dunking a basketball, and passing a basketball.",
55
+ "chatgpt-answer": "B. dribbling a basketball, C. passing a basketball",
56
+ "video-llm-prediction": "The video showcases a man dribbling a basketball, dunking a basketball, and passing a basketball. The man is seen moving around the court while performing these actions. The video captures the man's movements and the sound of the ball bouncing on the court. The man's dribbling skills are impressive, and he seems to be in control of the ball at all times. The dunking and passing actions are also executed with precision, and the man's movements are fluid and graceful. Overall, the video is a great display of basketball skills and is sure to impress any basketball",
57
+ "gt-answer": "A. dunking a basketball",
58
+ "rating": 0
59
+ }
60
+ ```
61
+
62
+
63
+ ### Submit Example
64
+ For example, if you want to submit Video-LLaVA's result in the leaderboard, you need to:
65
+ 1. Fill in ‘Video-LLaVA’ in ‘Model Name’ if it is your first time to submit your result (You can leave ‘Revision Model Name’ blank).
66
+ 2. Fill in ‘Video-LLaVA’ in ‘Revision Model Name’ if you want to update your result (You can leave ‘Model Name’ blank).
67
+ 3. Select ‘ImageLLM’ in ‘Model Type’.
68
+ 4. Fill in ‘https://github.com/x/x’ in ‘Model Link’.
69
+ 5. Fill in ‘7B’ in ‘Model size’.
70
+ 6. Upload `<task_type>.json`.
71
+ 7. Click the ‘Submit Eval’ button.
72
+ 8. Click ‘Refresh’ to obtain the uploaded leaderboard.
73
+
74
+ """
75
+
76
+ TABLE_INTRODUCTION = """In the table below, we summarize each task performance of all the models.
77
+ We use accurancy(%) as the primary evaluation metric for each tasks.
78
+ """
79
+
80
+ LEADERBORAD_INFO = """
81
+ Based on powerful Large Language Models (LLMs), recent generative Multimodal Large Language Models (MLLMs) have gained prominence as a pivotal research area, exhibiting remarkable capability for both comprehension and generation.
82
+ In this work, we address the evaluation of generative comprehension in MLLMs as a preliminary step towards a comprehensive assessment of generative models, by introducing a benchmark named SEED-Bench.
83
+ SEED-Bench consists of 19K multiple choice questions with accurate human annotations (x6 larger than existing benchmarks), which spans 12 evaluation dimensions including the comprehension of both the image and video modality.
84
+ We develop an advanced pipeline for generating multiple-choice questions that target specific evaluation dimensions, integrating both automatic filtering and manual verification processes.
85
+ Multiple-choice questions with groundtruth options derived from human annotation enables an objective and efficient assessment of model performance, eliminating the need for human or GPT intervention during evaluation.
86
+ We further evaluate the performance of 18 models across all 12 dimensions, covering both the spatial and temporal understanding.
87
+ By revealing the limitations of existing MLLMs through evaluation results, we aim for SEED-Bench to provide insights for motivating future research.
88
+ """
89
+
90
+ CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
91
+ CITATION_BUTTON_TEXT = r"""
92
+ @article{liu2024tempcompass,
93
+ title = {TempCompass: Do Video LLMs Really Understand Videos?},
94
+ author = {Yuanxin Liu and Shicheng Li and Yi Liu and Yuxiang Wang and Shuhuai Ren and Lei Li and Sishuo Chen and Xu Sun and Lu Hou},
95
+ year = {2024},
96
+ journal = {arXiv preprint arXiv: 2403.00476}
97
+ }
98
+ """
src/about.py CHANGED
@@ -40,33 +40,49 @@ To reproduce our results, here is the commands you can run:
40
  """
41
 
42
  EVALUATION_QUEUE_TEXT = """
43
- ## Some good practices before submitting a model
44
 
45
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
 
 
 
 
 
 
 
46
  ```python
47
- from transformers import AutoConfig, AutoModel, AutoTokenizer
48
- config = AutoConfig.from_pretrained("your model name", revision=revision)
49
- model = AutoModel.from_pretrained("your model name", revision=revision)
50
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
 
 
 
51
  ```
52
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
53
 
54
- Note: make sure your model is public!
55
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
56
-
57
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
58
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
 
 
 
 
 
59
 
60
- ### 3) Make sure your model has an open license!
61
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
62
 
63
- ### 4) Fill up your model card
64
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
 
 
 
 
 
 
 
 
65
 
66
- ## In case of model failure
67
- If your model is displayed in the `FAILED` category, its execution stopped.
68
- Make sure you have followed the above steps first.
69
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
70
  """
71
 
72
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
 
40
  """
41
 
42
  EVALUATION_QUEUE_TEXT = """
43
+ # TempCompass Leaderboard
44
 
45
+ Welcome to the leaderboard of the Video-Bench! 🏆
46
+
47
+ ## Submit Instruction
48
+ Run inference and automatic evaluation according to our [github repository](https://github.com/llyx97/TempCompass?tab=readme-ov-file#-quick-start).
49
+
50
+ You will obtain the JSON file `<task_type>.json`, where `<task_type>` correspond to one of the four categories: `multi-choice`, `yes_no`, `caption_matching` and `captioning`. (Example files can be found [here](https://github.com/llyx97/TempCompass/tree/main/auto_eval_results/video-llava))
51
+
52
+ For `multi-choice`, `yes_no`, `caption_matching`, the evaluation result of each question contains five keys. A specific example is as follows:
53
  ```python
54
+ {
55
+ "question": "What activity is the monkey engaged in?\\nA. swimming\\nB. running\\nC. climbing\\nD. fighting",
56
+ "gt-answer": "D. fighting",
57
+ "video-llm-prediction": "D",
58
+ "match_success": true, # whether the video-llm-prediction can be assessed by rule-based matching
59
+ "rating": 1
60
+ }
61
  ```
 
62
 
63
+ For `captioning`, we prompt chatgpt to answer the multi-choice question, using the Video LLM generated caption as context. An example of evalution result is as follows:
64
+ ```python
65
+ {
66
+ "chatgpt-reasoning": "The video description specifically mentions that the man is dribbling a basketball, dunking a basketball, and passing a basketball.",
67
+ "chatgpt-answer": "B. dribbling a basketball, C. passing a basketball",
68
+ "video-llm-prediction": "The video showcases a man dribbling a basketball, dunking a basketball, and passing a basketball. The man is seen moving around the court while performing these actions. The video captures the man's movements and the sound of the ball bouncing on the court. The man's dribbling skills are impressive, and he seems to be in control of the ball at all times. The dunking and passing actions are also executed with precision, and the man's movements are fluid and graceful. Overall, the video is a great display of basketball skills and is sure to impress any basketball",
69
+ "gt-answer": "A. dunking a basketball",
70
+ "rating": 0
71
+ }
72
+ ```
73
 
 
 
74
 
75
+ ### Submit Example
76
+ For example, if you want to submit Video-LLaVA's result in the leaderboard, you need to:
77
+ 1. Fill in ‘Video-LLaVA’ in ‘Model Name’ if it is your first time to submit your result (You can leave ‘Revision Model Name’ blank).
78
+ 2. Fill in ‘Video-LLaVA’ in ‘Revision Model Name’ if you want to update your result (You can leave ‘Model Name’ blank).
79
+ 3. Select ‘ImageLLM’ in ‘Model Type’.
80
+ 4. Fill in ‘https://github.com/x/x’ in ‘Model Link’.
81
+ 5. Fill in ‘7B’ in ‘Model size’.
82
+ 6. Upload `<task_type>.json`.
83
+ 7. Click the ‘Submit Eval’ button.
84
+ 8. Click ‘Refresh’ to obtain the uploaded leaderboard.
85
 
 
 
 
 
86
  """
87
 
88
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
src/submission/check_validity.py CHANGED
@@ -97,3 +97,10 @@ def already_submitted_models(requested_models_dir: str) -> set[str]:
97
  users_to_submission_dates[organisation].append(info["submitted_time"])
98
 
99
  return set(file_names), users_to_submission_dates
 
 
 
 
 
 
 
 
97
  users_to_submission_dates[organisation].append(info["submitted_time"])
98
 
99
  return set(file_names), users_to_submission_dates
100
+
101
+ def validate_model_size(s):
102
+ pattern = r'^\d+B$|^-$'
103
+ if re.match(pattern, s):
104
+ return s
105
+ else:
106
+ return '-'
src/submission/submit.py CHANGED
@@ -6,21 +6,19 @@ from src.display.formatting import styled_error, styled_message, styled_warning
6
  from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
  from src.submission.check_validity import (
8
  already_submitted_models,
9
- check_model_card,
10
- get_model_size,
11
- is_model_on_hub,
12
  )
13
 
14
  REQUESTED_MODELS = None
15
  USERS_TO_SUBMISSION_DATES = None
16
 
 
17
  def add_new_eval(
18
  model: str,
19
- base_model: str,
20
  revision: str,
21
- precision: str,
22
- weight_type: str,
23
  model_type: str,
 
24
  ):
25
  global REQUESTED_MODELS
26
  global USERS_TO_SUBMISSION_DATES
@@ -33,7 +31,6 @@ def add_new_eval(
33
  user_name = model.split("/")[0]
34
  model_path = model.split("/")[1]
35
 
36
- precision = precision.split(" ")[0]
37
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
 
39
  if model_type is None or model_type == "":
@@ -43,61 +40,34 @@ def add_new_eval(
43
  if revision == "":
44
  revision = "main"
45
 
46
- # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
-
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
-
57
- # Is the model info correctly filled?
58
- try:
59
- model_info = API.model_info(repo_id=model, revision=revision)
60
- except Exception:
61
- return styled_error("Could not get your model information. Please fill it up properly.")
62
-
63
- model_size = get_model_size(model_info=model_info, precision=precision)
64
-
65
- # Were the model card and license filled?
66
- try:
67
- license = model_info.cardData["license"]
68
- except Exception:
69
- return styled_error("Please select a license for your model")
70
 
71
- modelcard_OK, error_msg = check_model_card(model)
72
- if not modelcard_OK:
73
- return styled_error(error_msg)
 
74
 
75
  # Seems good, creating the eval
76
  print("Adding new eval")
77
 
78
  eval_entry = {
79
- "model": model,
80
- "base_model": base_model,
81
  "revision": revision,
82
- "precision": precision,
83
- "weight_type": weight_type,
84
  "status": "PENDING",
85
  "submitted_time": current_time,
86
  "model_type": model_type,
87
- "likes": model_info.likes,
88
  "params": model_size,
89
- "license": license,
90
  "private": False,
91
  }
92
 
93
  # Check for duplicate submission
94
- if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
95
  return styled_warning("This model has been already submitted.")
96
 
97
  print("Creating eval file")
98
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
  os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
 
102
  with open(out_path, "w") as f:
103
  f.write(json.dumps(eval_entry))
 
6
  from src.envs import API, EVAL_REQUESTS_PATH, TOKEN, QUEUE_REPO
7
  from src.submission.check_validity import (
8
  already_submitted_models,
9
+ validate_model_size,
 
 
10
  )
11
 
12
  REQUESTED_MODELS = None
13
  USERS_TO_SUBMISSION_DATES = None
14
 
15
+
16
  def add_new_eval(
17
  model: str,
 
18
  revision: str,
19
+ model_size: str,
 
20
  model_type: str,
21
+ model_link: str,
22
  ):
23
  global REQUESTED_MODELS
24
  global USERS_TO_SUBMISSION_DATES
 
31
  user_name = model.split("/")[0]
32
  model_path = model.split("/")[1]
33
 
 
34
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
35
 
36
  if model_type is None or model_type == "":
 
40
  if revision == "":
41
  revision = "main"
42
 
43
+ model_size = validate_model_size(model_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ if model_link == '':
46
+ model_name = model # no url
47
+ else:
48
+ model_name = '[' + model + '](' + model_link + ')'
49
 
50
  # Seems good, creating the eval
51
  print("Adding new eval")
52
 
53
  eval_entry = {
54
+ "model_name": model_name,
 
55
  "revision": revision,
 
 
56
  "status": "PENDING",
57
  "submitted_time": current_time,
58
  "model_type": model_type,
 
59
  "params": model_size,
 
60
  "private": False,
61
  }
62
 
63
  # Check for duplicate submission
64
+ if f"{model}_{revision}" in REQUESTED_MODELS:
65
  return styled_warning("This model has been already submitted.")
66
 
67
  print("Creating eval file")
68
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
69
  os.makedirs(OUT_DIR, exist_ok=True)
70
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_False.json"
71
 
72
  with open(out_path, "w") as f:
73
  f.write(json.dumps(eval_entry))
src/utils_display.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+ # These classes are for user facing column names, to avoid having to change them
4
+ # all around the code when a modif is needed
5
+ @dataclass
6
+ class ColumnContent:
7
+ name: str
8
+ type: str
9
+ displayed_by_default: bool
10
+ hidden: bool = False
11
+
12
+ def fields(raw_class):
13
+ return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
14
+
15
+ @dataclass(frozen=True)
16
+ class AutoEvalColumn: # Auto evals column
17
+ model_type_symbol = ColumnContent("T", "str", True)
18
+ model = ColumnContent("Model", "markdown", True)
19
+ average = ColumnContent("Average ⬆️", "number", True)
20
+ arc = ColumnContent("ARC", "number", True)
21
+ hellaswag = ColumnContent("HellaSwag", "number", True)
22
+ mmlu = ColumnContent("MMLU", "number", True)
23
+ truthfulqa = ColumnContent("TruthfulQA", "number", True)
24
+ model_type = ColumnContent("Type", "str", False)
25
+ precision = ColumnContent("Precision", "str", False, True)
26
+ license = ColumnContent("Hub License", "str", False)
27
+ params = ColumnContent("#Params (B)", "number", False)
28
+ likes = ColumnContent("Hub ❤️", "number", False)
29
+ revision = ColumnContent("Model sha", "str", False, False)
30
+ dummy = ColumnContent("model_name_for_query", "str", True) # dummy col to implement search bar (hidden by custom CSS)
31
+
32
+ @dataclass(frozen=True)
33
+ class EloEvalColumn: # Elo evals column
34
+ model = ColumnContent("Model", "markdown", True)
35
+ gpt4 = ColumnContent("GPT-4 (all)", "number", True)
36
+ human_all = ColumnContent("Human (all)", "number", True)
37
+ human_instruct = ColumnContent("Human (instruct)", "number", True)
38
+ human_code_instruct = ColumnContent("Human (code-instruct)", "number", True)
39
+
40
+
41
+ @dataclass(frozen=True)
42
+ class EvalQueueColumn: # Queue column
43
+ model = ColumnContent("model", "markdown", True)
44
+ revision = ColumnContent("revision", "str", True)
45
+ private = ColumnContent("private", "bool", True)
46
+ precision = ColumnContent("precision", "bool", True)
47
+ weight_type = ColumnContent("weight_type", "str", "Original")
48
+ status = ColumnContent("status", "str", True)
49
+
50
+ LLAMAS = ["huggingface/llama-7b", "huggingface/llama-13b", "huggingface/llama-30b", "huggingface/llama-65b"]
51
+
52
+
53
+ KOALA_LINK = "https://huggingface.co/TheBloke/koala-13B-HF"
54
+ VICUNA_LINK = "https://huggingface.co/lmsys/vicuna-13b-delta-v1.1"
55
+ OASST_LINK = "https://huggingface.co/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5"
56
+ DOLLY_LINK = "https://huggingface.co/databricks/dolly-v2-12b"
57
+ MODEL_PAGE = "https://huggingface.co/models"
58
+ LLAMA_LINK = "https://ai.facebook.com/blog/large-language-model-llama-meta-ai/"
59
+ VICUNA_LINK = "https://huggingface.co/CarperAI/stable-vicuna-13b-delta"
60
+ ALPACA_LINK = "https://crfm.stanford.edu/2023/03/13/alpaca.html"
61
+
62
+
63
+ def model_hyperlink(link, model_name):
64
+ return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>'
65
+
66
+
67
+ def make_clickable_model(model_name):
68
+ link = f"https://huggingface.co/{model_name}"
69
+
70
+ if model_name in LLAMAS:
71
+ link = LLAMA_LINK
72
+ model_name = model_name.split("/")[1]
73
+ elif model_name == "HuggingFaceH4/stable-vicuna-13b-2904":
74
+ link = VICUNA_LINK
75
+ model_name = "stable-vicuna-13b"
76
+ elif model_name == "HuggingFaceH4/llama-7b-ift-alpaca":
77
+ link = ALPACA_LINK
78
+ model_name = "alpaca-13b"
79
+ if model_name == "dolly-12b":
80
+ link = DOLLY_LINK
81
+ elif model_name == "vicuna-13b":
82
+ link = VICUNA_LINK
83
+ elif model_name == "koala-13b":
84
+ link = KOALA_LINK
85
+ elif model_name == "oasst-12b":
86
+ link = OASST_LINK
87
+ #else:
88
+ # link = MODEL_PAGE
89
+
90
+ return model_hyperlink(link, model_name)
91
+
92
+ def styled_error(error):
93
+ return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>"
94
+
95
+ def styled_warning(warn):
96
+ return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>"
97
+
98
+ def styled_message(message):
99
+ return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>"