John6666 commited on
Commit
b8f8e7c
1 Parent(s): 9b07f4f

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -7
app.py CHANGED
@@ -24,9 +24,11 @@ from diffusers import AutoencoderKL
24
  from torch import Tensor, nn
25
  from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
26
  from safetensors.torch import load_file
 
 
27
  dtype = torch.bfloat16
28
- from huggingface_hub import snapshot_download
29
- model_path = snapshot_download(repo_id="nyanko7/flux-dev-de-distill")
30
  device = "cuda" if torch.cuda.is_available() else "cpu"
31
  # ---------------- Encoders ----------------
32
 
@@ -65,10 +67,14 @@ class HFEmbedder(nn.Module):
65
  return outputs[self.output_key]
66
 
67
 
 
 
 
 
68
  #device = "cuda"
69
- t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
70
- clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
71
- ae = AutoencoderKL.from_pretrained("camenduru/FLUX.1-dev-diffusers", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
72
  # quantize(t5, weights=qfloat8)
73
  # freeze(t5)
74
 
@@ -655,9 +661,10 @@ def generate_image(
655
  ):
656
  if seed == 0:
657
  seed = int(random.random() * 1000000)
 
658
 
659
  #device = "cuda" if torch.cuda.is_available() else "cpu"
660
- torch_device = torch.device(device)
661
 
662
  if do_img2img and init_image is not None:
663
  init_image = get_image(init_image)
@@ -696,8 +703,18 @@ def generate_image(
696
 
697
  x = x.clamp(-1, 1)
698
  x = rearrange(x[0], "c h w -> h w c")
699
- img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
700
 
 
 
 
 
 
 
 
 
 
 
701
  return img, seed
702
 
703
  def create_demo():
 
24
  from torch import Tensor, nn
25
  from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
26
  from safetensors.torch import load_file
27
+ from diffusers import FluxTransformer2DModel
28
+ from pipeline_flux_de_distill import FluxPipeline
29
  dtype = torch.bfloat16
30
+ #from huggingface_hub import snapshot_download
31
+ #model_path = snapshot_download(repo_id="nyanko7/flux-dev-de-distill")
32
  device = "cuda" if torch.cuda.is_available() else "cpu"
33
  # ---------------- Encoders ----------------
34
 
 
67
  return outputs[self.output_key]
68
 
69
 
70
+ model_path = "camenduru/FLUX.1-dev-diffusers"
71
+ transformer = FluxTransformer2DModel.from_pretrained("InstantX/flux-dev-de-distill-diffusers", torch_dtype=torch.bfloat16)
72
+ pipeline = FluxPipeline.from_pretrained(model_path, transformer=transformer, torch_dtype=torch.bfloat16).to(device)
73
+
74
  #device = "cuda"
75
+ #t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
76
+ #clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
77
+ #ae = AutoencoderKL.from_pretrained("camenduru/FLUX.1-dev-diffusers", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
78
  # quantize(t5, weights=qfloat8)
79
  # freeze(t5)
80
 
 
661
  ):
662
  if seed == 0:
663
  seed = int(random.random() * 1000000)
664
+ generator = torch.Generator(device=device).manual_seed(seed)
665
 
666
  #device = "cuda" if torch.cuda.is_available() else "cpu"
667
+ """torch_device = torch.device(device)
668
 
669
  if do_img2img and init_image is not None:
670
  init_image = get_image(init_image)
 
703
 
704
  x = x.clamp(-1, 1)
705
  x = rearrange(x[0], "c h w -> h w c")
706
+ img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())"""
707
 
708
+ img = pipeline(
709
+ prompt=prompt,
710
+ negative_prompt=neg_prompt,
711
+ guidance_scale=guidance,
712
+ num_inference_steps=num_steps,
713
+ width=width,
714
+ height=height,
715
+ generator=generator,
716
+ ).images[0]
717
+
718
  return img, seed
719
 
720
  def create_demo():