Spaces:
Runtime error
Runtime error
RamAnanth1
commited on
Commit
•
8d29e07
1
Parent(s):
ee26531
Modify pgd to handle inpainting
Browse files
app.py
CHANGED
@@ -58,6 +58,29 @@ def pgd(X, model, eps=0.1, step_size=0.015, iters=40, clamp_min=0, clamp_max=1,
|
|
58 |
X_adv.data *= mask
|
59 |
|
60 |
return X_adv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
def process_image_img2img(raw_image,prompt):
|
63 |
resize = T.transforms.Resize(512)
|
@@ -115,7 +138,7 @@ def process_image_inpaint(raw_image,mask, prompt):
|
|
115 |
# Here we attack towards the embedding of a random target image. You can also simply attack towards an embedding of zeros!
|
116 |
target = pipe_inpaint.vae.encode(preprocess(target_image).half().cuda()).latent_dist.mean
|
117 |
|
118 |
-
adv_X =
|
119 |
target = target,
|
120 |
model=pipe_inpaint.vae.encode,
|
121 |
criterion=torch.nn.MSELoss(),
|
|
|
58 |
X_adv.data *= mask
|
59 |
|
60 |
return X_adv
|
61 |
+
|
62 |
+
def pgd_inpaint(X, target, model, criterion, eps=0.1, step_size=0.015, iters=40, clamp_min=0, clamp_max=1, mask=None):
|
63 |
+
X_adv = X.clone().detach() + (torch.rand(*X.shape)*2*eps-eps).cuda()
|
64 |
+
pbar = tqdm(range(iters))
|
65 |
+
for i in pbar:
|
66 |
+
actual_step_size = step_size - (step_size - step_size / 100) / iters * i
|
67 |
+
X_adv.requires_grad_(True)
|
68 |
+
|
69 |
+
loss = (model(X_adv).latent_dist.mean - target).norm()
|
70 |
+
|
71 |
+
pbar.set_description(f"[Running attack]: Loss {loss.item():.5f} | step size: {actual_step_size:.4}")
|
72 |
+
|
73 |
+
grad, = torch.autograd.grad(loss, [X_adv])
|
74 |
+
|
75 |
+
X_adv = X_adv - grad.detach().sign() * actual_step_size
|
76 |
+
X_adv = torch.minimum(torch.maximum(X_adv, X - eps), X + eps)
|
77 |
+
X_adv.data = torch.clamp(X_adv, min=clamp_min, max=clamp_max)
|
78 |
+
X_adv.grad = None
|
79 |
+
|
80 |
+
if mask is not None:
|
81 |
+
X_adv.data *= mask
|
82 |
+
|
83 |
+
return X_adv
|
84 |
|
85 |
def process_image_img2img(raw_image,prompt):
|
86 |
resize = T.transforms.Resize(512)
|
|
|
138 |
# Here we attack towards the embedding of a random target image. You can also simply attack towards an embedding of zeros!
|
139 |
target = pipe_inpaint.vae.encode(preprocess(target_image).half().cuda()).latent_dist.mean
|
140 |
|
141 |
+
adv_X = pgd_inpaint(X,
|
142 |
target = target,
|
143 |
model=pipe_inpaint.vae.encode,
|
144 |
criterion=torch.nn.MSELoss(),
|