ldkong commited on
Commit
3476eae
β€’
1 Parent(s): a1d3429

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -36
app.py CHANGED
@@ -211,40 +211,6 @@ def name2seq(file_name):
211
  images = torch.Tensor(images).unsqueeze(dim=0)
212
  return images
213
 
214
-
215
- def display_gif(file_name, save_name):
216
- images = []
217
-
218
- for frame in range(8):
219
- frame_name = '%d' % (frame)
220
- image_filename = file_name + frame_name + '.png'
221
- images.append(imageio.imread(image_filename))
222
-
223
- gif_filename = 'avatar_source.gif'
224
- return imageio.mimsave(gif_filename, images)
225
-
226
-
227
- def display_gif_pad(file_name, save_name):
228
- images = []
229
-
230
- for frame in range(8):
231
- frame_name = '%d' % (frame)
232
- image_filename = file_name + frame_name + '.png'
233
- image = imageio.imread(image_filename)
234
- image = image[:, :, :3]
235
- image_pad = cv2.copyMakeBorder(image, 0, 0, 125, 125, cv2.BORDER_CONSTANT, value=0)
236
- images.append(image_pad)
237
-
238
- return imageio.mimsave(save_name, images)
239
-
240
-
241
- def display_image(file_name):
242
-
243
- image_filename = file_name + '0' + '.png'
244
- print(image_filename)
245
- image = imageio.imread(image_filename)
246
- imageio.imwrite('image.png', image)
247
-
248
 
249
  def concat(file_name):
250
  images = []
@@ -304,7 +270,7 @@ model.load_state_dict(torch.load('TransferVAE.pth.tar', map_location=torch.devic
304
  model.eval()
305
 
306
 
307
- def run(domain_source, action_source, hair_source, top_source, bottom_source, domain_target, action_target, hair_target, top_target, bottom_target):
308
 
309
  # == Source Avatar ==
310
  # body
@@ -422,7 +388,11 @@ def run(domain_source, action_source, hair_source, top_source, bottom_source, do
422
  gr.Interface(
423
  fn=run,
424
  inputs=[
425
- gr.Textbox(value="Source Avatar - Human 😢", show_label=False, interactive=False),
 
 
 
 
426
  gr.Radio(choices=["slash", "spellcard", "walk"], value="slash"),
427
  gr.Radio(choices=["green", "yellow", "rose", "red", "wine"], value="green"),
428
  gr.Radio(choices=["brown", "blue", "white"], value="brown"),
 
211
  images = torch.Tensor(images).unsqueeze(dim=0)
212
  return images
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  def concat(file_name):
216
  images = []
 
270
  model.eval()
271
 
272
 
273
+ def run(action_source, hair_source, top_source, bottom_source, domain_target, action_target, hair_target, top_target, bottom_target):
274
 
275
  # == Source Avatar ==
276
  # body
 
388
  gr.Interface(
389
  fn=run,
390
  inputs=[
391
+ gr.Markdown(
392
+ """
393
+ Source Avatar - Human 😢
394
+ """
395
+ ),
396
  gr.Radio(choices=["slash", "spellcard", "walk"], value="slash"),
397
  gr.Radio(choices=["green", "yellow", "rose", "red", "wine"], value="green"),
398
  gr.Radio(choices=["brown", "blue", "white"], value="brown"),