ysharma HF staff commited on
Commit
a463433
1 Parent(s): efe97a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -17
app.py CHANGED
@@ -8,18 +8,16 @@ import spaces
8
  import torch
9
  from PIL import Image
10
  from diffusers import FluxInpaintPipeline
11
-
12
  from gradio_client import Client, handle_file
13
- from PIL import Image
14
 
15
  # Set an environment variable
16
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
17
 
18
  MARKDOWN = """
19
  # FLUX.1 Inpainting with Text guided Mask🔥
20
- Shoutout to [Black Forest Labs](https://huggingface.co/black-forest-labs) team for FLUX,
21
- [Piotr Skalski](https://huggingface.co/SkalskiP),and [Gothos](https://github.com/Gothos)
22
- for enabling and [showcasing inpainting](https://huggingface.co/spaces/SkalskiP/FLUX.1-inpaint) with the FLUX.
23
  """
24
 
25
  MAX_SEED = np.iinfo(np.int32).max
@@ -29,7 +27,6 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
29
  # Using Gradio Python Client to query EVF-SAM demo, hosted on SPaces, as an endpoint
30
  client = Client("ysharma/evf-sam", hf_token=HF_TOKEN)
31
 
32
-
33
  pipe = FluxInpaintPipeline.from_pretrained(
34
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
35
 
@@ -60,17 +57,12 @@ def resize_image_dimensions(
60
 
61
 
62
  def evf_sam_mask(image, prompt):
63
- print(image)
64
  images = client.predict(
65
  image_np=handle_file(image),
66
  prompt=prompt,
67
  api_name="/predict")
68
- print(images)
69
  # Open the mask image
70
  pil_image = Image.open(images[1])
71
- print(pil_image)
72
- print(type(pil_image))
73
-
74
  return pil_image
75
 
76
  @spaces.GPU(duration=150)
@@ -88,13 +80,7 @@ def process(
88
  gr.Info("Please enter a text prompt.")
89
  return None
90
 
91
- #image = input_image_editor['background']
92
- #mask = input_image_editor['layers'][0]
93
- print(f"type of image: {type(input_image)}")
94
  mask = evf_sam_mask(input_image, input_text)
95
- print(f"type of mask: {type(mask)}")
96
- print(f"inpaint_text: {inpaint_text}")
97
- print(f"input_text: {input_text}")
98
 
99
  if not input_image:
100
  gr.Info("Please upload an image.")
 
8
  import torch
9
  from PIL import Image
10
  from diffusers import FluxInpaintPipeline
 
11
  from gradio_client import Client, handle_file
 
12
 
13
  # Set an environment variable
14
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
15
 
16
  MARKDOWN = """
17
  # FLUX.1 Inpainting with Text guided Mask🔥
18
+ Shoutout to [Black Forest Labs](https://huggingface.co/black-forest-labs) team for FLUX!
19
+ Special thanks to [Piotr Skalski](https://huggingface.co/SkalskiP) and [Gothos](https://github.com/Gothos)
20
+ for their work on enabling and [showcasing inpainting](https://huggingface.co/spaces/SkalskiP/FLUX.1-inpaint) with the FLUX.
21
  """
22
 
23
  MAX_SEED = np.iinfo(np.int32).max
 
27
  # Using Gradio Python Client to query EVF-SAM demo, hosted on SPaces, as an endpoint
28
  client = Client("ysharma/evf-sam", hf_token=HF_TOKEN)
29
 
 
30
  pipe = FluxInpaintPipeline.from_pretrained(
31
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
32
 
 
57
 
58
 
59
  def evf_sam_mask(image, prompt):
 
60
  images = client.predict(
61
  image_np=handle_file(image),
62
  prompt=prompt,
63
  api_name="/predict")
 
64
  # Open the mask image
65
  pil_image = Image.open(images[1])
 
 
 
66
  return pil_image
67
 
68
  @spaces.GPU(duration=150)
 
80
  gr.Info("Please enter a text prompt.")
81
  return None
82
 
 
 
 
83
  mask = evf_sam_mask(input_image, input_text)
 
 
 
84
 
85
  if not input_image:
86
  gr.Info("Please upload an image.")