Spaces:
Runtime error
Runtime error
RohitGandikota
commited on
Commit
β’
d3a1ab0
1
Parent(s):
8c8d68c
fixing markdown issue
Browse files
app.py
CHANGED
@@ -105,8 +105,7 @@ class Demo:
|
|
105 |
|
106 |
with gr.Row():
|
107 |
|
108 |
-
self.explain_train= gr.Markdown(
|
109 |
-
value='In this part you can train a concept slider for Stable Diffusion XL. Enter a target concept you wish to make an edit on. Next, enter a enhance prompt of the attribute you wish to edit (for controlling age of a person, enter "person, old"). Then, type the supress prompt of the attribute (for our example, enter "person, young"). Then press "train" button. With default settings, it takes about 15 minutes to train a slider; then you can try inference above or download the weights. Code and details are at [github link](https://github.com/rohitgandikota/sliders).')
|
110 |
|
111 |
with gr.Row():
|
112 |
|
@@ -183,40 +182,40 @@ class Demo:
|
|
183 |
|
184 |
def train(self, prompt, train_method, neg_guidance, iterations, lr, pbar = gr.Progress(track_tqdm=True)):
|
185 |
|
186 |
-
|
187 |
-
|
188 |
|
189 |
-
|
190 |
|
191 |
-
|
192 |
-
|
193 |
|
194 |
-
|
195 |
|
196 |
-
|
197 |
-
|
198 |
|
199 |
-
|
200 |
|
201 |
-
|
202 |
-
|
203 |
|
204 |
-
|
205 |
|
206 |
-
|
207 |
|
208 |
-
|
209 |
|
210 |
-
|
211 |
|
212 |
-
|
213 |
-
|
214 |
-
torch.cuda.empty_cache()
|
215 |
|
216 |
-
|
217 |
|
218 |
-
|
219 |
|
|
|
|
|
220 |
|
221 |
def inference(self, prompt, seed, model_name, pbar = gr.Progress(track_tqdm=True)):
|
222 |
|
@@ -227,38 +226,40 @@ class Demo:
|
|
227 |
model_path = model_map[model_name]
|
228 |
|
229 |
checkpoint = torch.load(model_path)
|
|
|
|
|
230 |
|
231 |
-
|
232 |
|
233 |
-
|
234 |
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
|
241 |
|
242 |
-
|
243 |
|
244 |
-
|
245 |
|
246 |
-
|
247 |
|
248 |
-
|
249 |
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
|
256 |
-
|
257 |
|
258 |
-
|
259 |
-
|
260 |
|
261 |
-
|
262 |
|
263 |
|
264 |
demo = Demo()
|
|
|
105 |
|
106 |
with gr.Row():
|
107 |
|
108 |
+
self.explain_train= gr.Markdown(value='In this part you can train a concept slider for Stable Diffusion XL. Enter a target concept you wish to make an edit on. Next, enter a enhance prompt of the attribute you wish to edit (for controlling age of a person, enter "person, old"). Then, type the supress prompt of the attribute (for our example, enter "person, young"). Then press "train" button. With default settings, it takes about 15 minutes to train a slider; then you can try inference above or download the weights. Code and details are at [github link](https://github.com/rohitgandikota/sliders).')
|
|
|
109 |
|
110 |
with gr.Row():
|
111 |
|
|
|
182 |
|
183 |
def train(self, prompt, train_method, neg_guidance, iterations, lr, pbar = gr.Progress(track_tqdm=True)):
|
184 |
|
185 |
+
# if self.training:
|
186 |
+
# return [gr.update(interactive=True, value='Train'), gr.update(value='Someone else is training... Try again soon'), None, gr.update()]
|
187 |
|
188 |
+
# if train_method == 'ESD-x':
|
189 |
|
190 |
+
# modules = ".*attn2$"
|
191 |
+
# frozen = []
|
192 |
|
193 |
+
# elif train_method == 'ESD-u':
|
194 |
|
195 |
+
# modules = "unet$"
|
196 |
+
# frozen = [".*attn2$", "unet.time_embedding$", "unet.conv_out$"]
|
197 |
|
198 |
+
# elif train_method == 'ESD-self':
|
199 |
|
200 |
+
# modules = ".*attn1$"
|
201 |
+
# frozen = []
|
202 |
|
203 |
+
# randn = torch.randint(1, 10000000, (1,)).item()
|
204 |
|
205 |
+
# save_path = f"models/{randn}_{prompt.lower().replace(' ', '')}.pt"
|
206 |
|
207 |
+
# self.training = True
|
208 |
|
209 |
+
# train(prompt, modules, frozen, iterations, neg_guidance, lr, save_path)
|
210 |
|
211 |
+
# self.training = False
|
|
|
|
|
212 |
|
213 |
+
# torch.cuda.empty_cache()
|
214 |
|
215 |
+
# model_map['Custom'] = save_path
|
216 |
|
217 |
+
# return [gr.update(interactive=True, value='Train'), gr.update(value='Done Training! \n Try your custom model in the "Test" tab'), save_path, gr.Dropdown.update(choices=list(model_map.keys()), value='Custom')]
|
218 |
+
return None
|
219 |
|
220 |
def inference(self, prompt, seed, model_name, pbar = gr.Progress(track_tqdm=True)):
|
221 |
|
|
|
226 |
model_path = model_map[model_name]
|
227 |
|
228 |
checkpoint = torch.load(model_path)
|
229 |
+
|
230 |
+
return None
|
231 |
|
232 |
+
# finetuner = FineTunedModel.from_checkpoint(self.diffuser, checkpoint).eval().half()
|
233 |
|
234 |
+
# torch.cuda.empty_cache()
|
235 |
|
236 |
+
# images = self.diffuser(
|
237 |
+
# prompt,
|
238 |
+
# n_steps=50,
|
239 |
+
# generator=generator
|
240 |
+
# )
|
241 |
|
242 |
|
243 |
+
# orig_image = images[0][0]
|
244 |
|
245 |
+
# torch.cuda.empty_cache()
|
246 |
|
247 |
+
# generator = torch.manual_seed(seed)
|
248 |
|
249 |
+
# with finetuner:
|
250 |
|
251 |
+
# images = self.diffuser(
|
252 |
+
# prompt,
|
253 |
+
# n_steps=50,
|
254 |
+
# generator=generator
|
255 |
+
# )
|
256 |
|
257 |
+
# edited_image = images[0][0]
|
258 |
|
259 |
+
# del finetuner
|
260 |
+
# torch.cuda.empty_cache()
|
261 |
|
262 |
+
# return edited_image, orig_image
|
263 |
|
264 |
|
265 |
demo = Demo()
|