File size: 3,480 Bytes
6009572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4d89098
6009572
 
4d89098
 
6009572
 
 
 
70db80a
4d89098
 
 
 
0edd5cb
 
 
 
4d89098
6009572
 
 
4d89098
 
 
 
 
6009572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a5c2d7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from dataclasses import dataclass
from enum import Enum

@dataclass
class Task:
    benchmark: str
    metric: str
    col_name: str


# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
    # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
    task0 = Task("anli_r1", "acc", "ANLI")
    task1 = Task("logiqa", "acc_norm", "LogiQA")

NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------



# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">UnlearnDiffAtk Benchmark</h1>"""

# subtitle
SUB_TITLE = """<h2 align="center" id="space-title">Effective and efficient adversarial prompt generation approach for diffusion models</h2>"""

# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
This benchmark evaluates the robustness of safety-driven unlearned diffusion models (DMs) 
(i.e., DMs after unlearning undesirable concepts, styles, or objects) across a variety of tasks. For more details, please visit the [project](https://www.optml-group.com/posts/mu_attack), 
check the [code](https://github.com/OPTML-Group/Diffusion-MU-Attack), and read the [paper](https://arxiv.org/abs/2310.11868).\\
Demo of our offensive method: [UnlearnDiffAtk](https://huggingface.co/spaces/Intel/UnlearnDiffAtk)\\
Demo of our defensive method: [AdvUnlearn](https://huggingface.co/spaces/Intel/AdvUnlearn)
"""

# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
For more details of Unlearning Methods used in this benchmarks:\\
(1) [Erased Stable Diffusion (ESD)](https://github.com/rohitgandikota/erasing);\\
(2) [Forget-Me-Not (FMN)](https://github.com/SHI-Labs/Forget-Me-Not);\\
(3) [Ablating Concepts (AC)](https://github.com/nupurkmr9/concept-ablation);\\
(4) [Unified Concept Editing (UCE)](https://github.com/rohitgandikota/unified-concept-editing);\\
(5) [concept-SemiPermeable Membrane (SPM)](https://github.com/Con6924/SPM); \\
(6) [Saliency Unlearning (SalUn)](https://github.com/OPTML-Group/Unlearn-Saliency); \\
(7) [EraseDiff (ED)](https://github.com/JingWu321/EraseDiff); \\
(8) [ScissorHands (SH)](https://github.com/JingWu321/Scissorhands).

"""

EVALUATION_QUEUE_TEXT = """
Evaluation Metrics: \\
(1) Pre-attack success rate (pre-ASR), lower is better;   \\
(2) Post-attack success rate (post-ASR), lower is better; \\
(3) Fréchet inception distance(FID) of images generated by Unlearned Methods, lower is better; \\
(3) CLIP (Contrastive Language-Image Pretraining) Score is to measure contextual alignment with prompt descriptions, higher is better.
"""

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
@article{zhang2023generate,
  title={To Generate or Not? Safety-Driven Unlearned Diffusion Models Are Still Easy To Generate Unsafe Images... For Now},
  author={Zhang, Yimeng and Jia, Jinghan and Chen, Xin and Chen, Aochuan and Zhang, Yihua and Liu, Jiancheng and Ding, Ke and Liu, Sijia},
  journal={arXiv preprint arXiv:2310.11868},
  year={2023}
}

@article{zhang2024defensive,
  title={Defensive Unlearning with Adversarial Training for Robust Concept Erasure in Diffusion Models},
  author={Zhang, Yimeng and Chen, Xin and Jia, Jinghan and Zhang, Yihua and Fan, Chongyu and Liu, Jiancheng and Hong, Mingyi and Ding, Ke and Liu, Sijia},
  journal={arXiv preprint arXiv:2405.15234},
  year={2024}
}
"""