bryandts commited on
Commit
67fe846
1 Parent(s): 62a0b05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -1
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  import torch
3
  import torchvision.transforms as transforms
4
  from sentence_transformers import SentenceTransformer, util
@@ -9,6 +10,22 @@ import random
9
  import torch
10
  import torch.nn as nn
11
  from generatorModel import Generator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def load_embedding(model):
14
  # Load your model and other components here
@@ -52,7 +69,7 @@ def generate_image(caption):
52
  fake_images = generator(noise, embeddings[sampled_item].unsqueeze(0).unsqueeze(0))
53
  img = fake_images.squeeze(0).permute(1, 2, 0).cpu().detach().numpy()
54
  img = (img - img.min()) / (img.max() - img.min())
55
-
56
  return img
57
 
58
  noise_dim = 16
 
1
  import gradio as gr
2
+ import numpy as np
3
  import torch
4
  import torchvision.transforms as transforms
5
  from sentence_transformers import SentenceTransformer, util
 
10
  import torch
11
  import torch.nn as nn
12
  from generatorModel import Generator
13
+ import cv2
14
+
15
+ def upscale_and_sharpen_image(input_array):
16
+ # Upscale the image to 256x256
17
+ upscaled_img = cv2.resize(input_array, (256, 256), interpolation=cv2.INTER_LANCZOS4)
18
+
19
+ # Define a sharpening kernel
20
+ sharpening_kernel = np.array([[-1, -1, -1],
21
+ [-1, 9, -1],
22
+ [-1, -1, -1]])
23
+
24
+ # Apply the sharpening kernel using filter2D
25
+ sharpened_img = cv2.filter2D(upscaled_img, -1, sharpening_kernel)
26
+
27
+ # Return the processed array
28
+ return sharpened_img
29
 
30
  def load_embedding(model):
31
  # Load your model and other components here
 
69
  fake_images = generator(noise, embeddings[sampled_item].unsqueeze(0).unsqueeze(0))
70
  img = fake_images.squeeze(0).permute(1, 2, 0).cpu().detach().numpy()
71
  img = (img - img.min()) / (img.max() - img.min())
72
+ img = upscale_and_sharpen_image(img)
73
  return img
74
 
75
  noise_dim = 16