dataautogpt3
commited on
Commit
•
c6a5360
1
Parent(s):
d1f1400
Update README.md
Browse files
README.md
CHANGED
@@ -21,7 +21,7 @@ In pursuit of optimal performance, numerous LORA (Low-Rank Adaptation) models ar
|
|
21 |
|
22 |
## Settings for ProteusV0.3-Lighting
|
23 |
|
24 |
-
Use these settings for the best results with ProteusV0.3:
|
25 |
|
26 |
CFG Scale: Use a CFG scale of 1 to 2
|
27 |
|
@@ -43,7 +43,7 @@ if you are having trouble coming up with prompts you can use this GPT I put toge
|
|
43 |
import torch
|
44 |
from diffusers import (
|
45 |
StableDiffusionXLPipeline,
|
46 |
-
|
47 |
AutoencoderKL
|
48 |
)
|
49 |
|
@@ -59,7 +59,7 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
|
|
59 |
vae=vae,
|
60 |
torch_dtype=torch.float16
|
61 |
)
|
62 |
-
pipe.scheduler =
|
63 |
pipe.to('cuda')
|
64 |
|
65 |
# Define prompts and generate image
|
|
|
21 |
|
22 |
## Settings for ProteusV0.3-Lighting
|
23 |
|
24 |
+
Use these settings for the best results with ProteusV0.3-Lighting:
|
25 |
|
26 |
CFG Scale: Use a CFG scale of 1 to 2
|
27 |
|
|
|
43 |
import torch
|
44 |
from diffusers import (
|
45 |
StableDiffusionXLPipeline,
|
46 |
+
EulerAncestralDiscreteScheduler,
|
47 |
AutoencoderKL
|
48 |
)
|
49 |
|
|
|
59 |
vae=vae,
|
60 |
torch_dtype=torch.float16
|
61 |
)
|
62 |
+
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
63 |
pipe.to('cuda')
|
64 |
|
65 |
# Define prompts and generate image
|