AlekseyCalvin commited on
Commit
fbdebbe
1 Parent(s): eb7660d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -0
app.py CHANGED
@@ -29,6 +29,11 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
29
 
30
  torch.backends.cuda.matmul.allow_tf32 = True
31
 
 
 
 
 
 
32
  dtype = torch.bfloat16
33
  pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype
34
  ).to("cuda")
@@ -54,6 +59,12 @@ pipe.tokenizer_max_length = maxtokens
54
  pipe.text_encoder.dtype = torch.bfloat16
55
  torch.cuda.empty_cache()
56
 
 
 
 
 
 
 
57
  # Load LoRAs from JSON file
58
  with open('loras.json', 'r') as f:
59
  loras = json.load(f)
 
29
 
30
  torch.backends.cuda.matmul.allow_tf32 = True
31
 
32
+ torch._inductor.config.conv_1x1_as_mm = True
33
+ torch._inductor.config.coordinate_descent_tuning = True
34
+ torch._inductor.config.epilogue_fusion = False
35
+ torch._inductor.config.coordinate_descent_check_all_directions = True
36
+
37
  dtype = torch.bfloat16
38
  pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype
39
  ).to("cuda")
 
59
  pipe.text_encoder.dtype = torch.bfloat16
60
  torch.cuda.empty_cache()
61
 
62
+ pipe.transformer.to(memory_format=torch.channels_last)
63
+ pipe.vae.to(memory_format=torch.channels_last)
64
+
65
+ pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
66
+ pipe.vae.decode = torch.compile(pipe.vae.decode, mode="max-autotune", fullgraph=True)
67
+
68
  # Load LoRAs from JSON file
69
  with open('loras.json', 'r') as f:
70
  loras = json.load(f)