Upload README.md
Browse files
README.md
CHANGED
@@ -11,7 +11,7 @@ tags:
|
|
11 |
|
12 |
|
13 |
|
14 |
-
#
|
15 |
|
16 |
|
17 |
|
@@ -45,12 +45,23 @@ Training on 5M chinese style filtered data for 150k steps. Exponential moving av
|
|
45 |
|
46 |
## Usage
|
47 |
|
48 |
-
因为使用了customed tokenizer, 所以需要优先加载一下tokenizer
|
|
|
|
|
49 |
|
50 |
```py
|
51 |
-
# !pip install git+https://github.com/huggingface/accelerate
|
52 |
import torch
|
53 |
from diffusers import StableDiffusionPipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
```
|
56 |
|
|
|
11 |
|
12 |
|
13 |
|
14 |
+
# Chinese-Style-Stable-Diffusion-2-v0.1
|
15 |
|
16 |
|
17 |
|
|
|
45 |
|
46 |
## Usage
|
47 |
|
48 |
+
因为使用了customed tokenizer, 所以需要优先加载一下tokenizer, 并传入trust_remote_code=True
|
49 |
+
|
50 |
+
Custom Tokenizer should be loaded first with 'trust_remote_code=True'.
|
51 |
|
52 |
```py
|
|
|
53 |
import torch
|
54 |
from diffusers import StableDiffusionPipeline
|
55 |
+
from transformers import AutoTokenizer
|
56 |
+
|
57 |
+
tokenizer_id = "lyua1225/clip-huge-zh-75k-steps-bs4096"
|
58 |
+
sd2_id = "Midu/chinese-style-stable-diffusion-2-v0.1"
|
59 |
+
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id, trust_remote_code=True)
|
60 |
+
pipe = StableDiffusionPipeline.from_pretrained(sd2_id, torch_dtype=torch.float16, tokenizer=tokenizer)
|
61 |
+
pipe.to("cuda")
|
62 |
+
|
63 |
+
image = pipe("赛博朋克风格的城市街道,8K分辨率,CG渲染", guidance_scale=10, num_inference_steps=20).images[0]
|
64 |
+
image.save("cyberpunk.jpeg")
|
65 |
|
66 |
```
|
67 |
|