vilarin commited on
Commit
24478b9
1 Parent(s): d8dd51b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -174
app.py CHANGED
@@ -1,28 +1,27 @@
1
- import subprocess
2
- command = 'pip install git+https://github.com/snekkenull/diffusers.git'
3
- subprocess.run(command, shell=True)
4
-
5
  import os
6
  import gradio as gr
7
  import torch
8
  import numpy as np
9
- import random
10
- from diffusers import StableDiffusion3Pipeline, AutoencoderKL, SD3Transformer2DModel, StableDiffusion3Img2ImgPipeline, FlowMatchEulerDiscreteScheduler
11
  import spaces
12
- from diffusers.utils import load_image
13
  from PIL import Image
14
- import requests
15
- import transformers
16
- from transformers import AutoTokenizer, T5EncoderModel
17
- from translatepy import Translator
18
 
 
 
 
 
 
 
19
 
20
- os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
21
- translator = Translator()
22
- HF_TOKEN = os.environ.get("HF_TOKEN", None)
 
 
 
23
  # Constants
24
- model = "stabilityai/stable-diffusion-3-medium"
25
- repo= "stabilityai/stable-diffusion-3-medium-diffusers"
26
  MAX_SEED = np.iinfo(np.int32).max
27
 
28
  CSS = """
@@ -39,200 +38,130 @@ JS = """function () {
39
  }"""
40
 
41
 
42
- vae = AutoencoderKL.from_pretrained(
43
- repo,
44
- subfolder="vae",
45
- torch_dtype=torch.float16,
46
- )
47
-
48
- transformer = SD3Transformer2DModel.from_pretrained(
49
- repo,
50
- subfolder="transformer",
51
- torch_dtype=torch.float16,
52
- )
53
-
54
-
55
- # text_encoder_3 = T5EncoderModel.from_pretrained(
56
- # repo,
57
- # subfolder="text_encoder_3",
58
- # )
59
-
60
- # tokenizer_3 = AutoTokenizer.from_pretrained(
61
- # repo,
62
- # subfolder="tokenizer_3",
63
- # torch_dtype=torch.float16,
64
- # )
65
-
66
-
67
  # Ensure model and scheduler are initialized in GPU-enabled function
68
  if torch.cuda.is_available():
69
- pipe = StableDiffusion3Pipeline.from_pretrained(
70
- repo,
71
- vae=vae,
72
- transformer=transformer,
73
- torch_dtype=torch.float16).to("cuda")
74
- pipe2 = StableDiffusion3Img2ImgPipeline.from_pretrained(
75
- repo,
76
- vae=vae,
77
- transformer=transformer,
78
- torch_dtype=torch.float16).to("cuda")
79
-
80
-
81
-
82
- pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
83
- pipe2.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe2.scheduler.config)
84
-
85
- print(pipe.tokenizer_max_length)
86
-
87
- # Function
88
- @spaces.GPU()
89
- def generate_image(
90
- prompt,
91
- negative="low quality",
92
- width=1024,
93
- height=1024,
94
- scales=5,
95
- steps=30,
96
- strength=0.7,
97
- seed: int =-1,
98
- nums=1,
99
  progress=gr.Progress(track_tqdm=True)):
100
 
101
  if seed == -1:
102
  seed = random.randint(0, MAX_SEED)
103
- seed = int(seed)
104
- print(f'prompt:{prompt}')
 
 
 
105
 
106
- text = str(translator.translate(prompt['text'], 'English'))
 
 
107
 
 
 
 
108
 
109
- if prompt['files']:
110
- #images = Image.open(prompt['files'][-1]).convert('RGB')
111
- init_image = load_image(prompt['files'][-1]).resize((height, width))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  else:
113
- init_image = None
114
- generator = torch.Generator().manual_seed(seed)
115
-
 
 
 
 
 
 
116
 
117
- if init_image:
118
- image = pipe2(
119
- prompt=text,
120
- image=init_image,
121
- negative_prompt=negative,
122
- guidance_scale=scales,
123
- num_inference_steps=steps,
124
- strength=strength,
125
- generator = generator,
126
- num_images_per_prompt = nums,
127
- ).images
128
- else:
129
- image = pipe(
130
- prompt=text,
131
- negative_prompt=negative,
132
- width=width,
133
- height=height,
134
- guidance_scale=scales,
135
- num_inference_steps=steps,
136
- generator = generator,
137
- num_images_per_prompt = nums,
138
- ).images
139
-
140
- print(image)
141
- print(seed)
142
- return image, seed
143
 
144
 
145
  examples = [
146
- [{"text": "a female character with long, flowing hair that appears to be made of ethereal, swirling patterns resembling the Northern Lights or Aurora Borealis. The background is dominated by deep blues and purples, creating a mysterious and dramatic atmosphere. The character's face is serene, with pale skin and striking features. She wears a dark-colored outfit with subtle patterns. The overall style of the artwork is reminiscent of fantasy or supernatural genres", "files": []}],
147
- [{"text": "Digital art, portrait of an anthropomorphic roaring Tiger warrior with full armor, close up in the middle of a battle, behind him there is a banner with the text \"Open Source\".", "files": []}],
148
- [{"text": "photo of a dog and a cat both standing on a red box, with a blue ball in the middle with a parrot standing on top of the ball. The box has the text \"SD3\"", "files": []}],
149
- [{"text": "selfie photo of a wizard with long beard and purple robes, he is apparently in the middle of Tokyo. Probably taken from a phone.", "files": []}],
150
- [{"text": "A vibrant street wall covered in colorful graffiti, the centerpiece spells \"SD3 MEDIUM\", in a storm of colors", "files": []}],
151
- [{"text": "photo of a young woman with long, wavy brown hair tied in a bun and glasses. She has a fair complexion and is wearing subtle makeup, emphasizing her eyes and lips. She is dressed in a black top. The background appears to be an urban setting with a building facade, and the sunlight casts a warm glow on her face.", "files": []}],
152
- [{"text": "anime art of a steampunk inventor in their workshop, surrounded by gears, gadgets, and steam. He is holding a blue potion and a red potion, one in each hand", "files": []}],
153
- [{"text": "photo of picturesque scene of a road surrounded by lush green trees and shrubs. The road is wide and smooth, leading into the distance. On the right side of the road, there's a blue sports car parked with the license plate spelling \"SD32B\". The sky above is partly cloudy, suggesting a pleasant day. The trees have a mix of green and brown foliage. There are no people visible in the image. The overall composition is balanced, with the car serving as a focal point.", "files": []}],
154
- [{"text": "photo of young man in a black suit, white shirt, and black tie. He has a neatly styled haircut and is looking directly at the camera with a neutral expression. The background consists of a textured wall with horizontal lines. The photograph is in black and white, emphasizing contrasts and shadows. The man appears to be in his late twenties or early thirties, with fair skin and short, dark hair.", "files": []}],
155
- [{"text": "photo of a woman on the beach, shot from above. She is facing the sea, while wearing a white dress. She has long blonde hair", "files": []}],
156
- ]
157
 
158
 
159
 
160
  # Gradio Interface
161
 
162
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
163
- gr.HTML("<h1><center>SD3M🐉T5</center></h1>")
164
- gr.HTML("<p><center><a href='https://huggingface.co/stabilityai/stable-diffusion-3-medium'>sd3m</a> text/image-to-image generation<br><b>Update</b>: fix diffuser to support 512 token</center></p>")
165
  with gr.Row():
166
- with gr.Column(scale=4):
167
- img = gr.Gallery(label='SD3M Generated Image', columns = 1, preview=True, height=600)
168
- prompt = gr.MultimodalTextbox(label='Enter Your Prompt (Multi-Languages)', interactive=True, placeholder="Enter prompt, add one image.", file_types=['image'])
169
  with gr.Accordion("Advanced Options", open=True):
170
  with gr.Column(scale=1):
171
- negative = gr.Textbox(label="Negative prompt", value="low quality, ugly, blurry, poor face, bad anatomy")
172
- width = gr.Slider(
173
- label="Width",
174
- minimum=512,
175
- maximum=1280,
176
- step=8,
177
- value=1024,
178
- )
179
- height = gr.Slider(
180
- label="Height",
181
- minimum=512,
182
- maximum=1280,
183
- step=8,
184
- value=1024,
185
- )
186
- scales = gr.Slider(
187
- label="Guidance",
188
- minimum=3.5,
189
- maximum=7,
190
- step=0.1,
191
- value=5,
192
- )
193
- steps = gr.Slider(
194
- label="Steps",
195
- minimum=1,
196
- maximum=50,
197
- step=1,
198
- value=30,
199
- )
200
- strength = gr.Slider(
201
- label="Strength",
202
- minimum=0.0,
203
- maximum=1.0,
204
- step=0.1,
205
- value=0.7,
206
- )
207
  seed = gr.Slider(
208
  label="Seed (-1 Random)",
209
  minimum=-1,
210
  maximum=MAX_SEED,
211
  step=1,
212
  value=-1,
213
- scale=2,
214
  )
215
- nums = gr.Slider(
216
- label="Image Numbers",
217
- minimum=1,
218
- maximum=4,
219
- step=1,
220
- value=1,
221
- scale=1,
222
- )
 
 
 
 
 
 
 
 
 
223
  gr.Examples(
224
  examples=examples,
225
- inputs=prompt,
226
- outputs=[img, seed],
227
- fn=generate_image,
228
  cache_examples="lazy",
229
  examples_per_page=4,
230
  )
231
 
232
- prompt.submit(fn=generate_image,
233
- inputs=[prompt, negative, width, height, scales, steps, strength, seed, nums],
234
- outputs=[img, seed],
235
- )
236
 
 
237
 
238
  demo.queue().launch()
 
 
 
 
 
1
  import os
2
  import gradio as gr
3
  import torch
4
  import numpy as np
 
 
5
  import spaces
6
+ import random
7
  from PIL import Image
 
 
 
 
8
 
9
+ from glob import glob
10
+ from pathlib import Path
11
+ from typing import Optional
12
+
13
+ from diffusers import StableVideoDiffusionPipeline
14
+ from diffusers.utils import load_image, export_to_video
15
 
16
+ import uuid
17
+ # from huggingface_hub import hf_hub_download
18
+
19
+
20
+ # os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
21
+ # HF_TOKEN = os.environ.get("HF_TOKEN", None)
22
  # Constants
23
+ model = "ECNU-CILab/ExVideo-SVD-128f-v1"
24
+
25
  MAX_SEED = np.iinfo(np.int32).max
26
 
27
  CSS = """
 
38
  }"""
39
 
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  # Ensure model and scheduler are initialized in GPU-enabled function
42
  if torch.cuda.is_available():
43
+ pipe = StableVideoDiffusionPipeline.from_pretrained(
44
+ model,
45
+ torch_dtype=torch.float16,
46
+ variant="fp16").to("cuda")
47
+
48
+ # function source codes modified from multimodalart/stable-video-diffusion
49
+ @spaces.GPU(duration=120)
50
+ def generate(
51
+ image: Image,
52
+ seed: Optional[int] = -1,
53
+ motion_bucket_id: int = 127,
54
+ fps_id: int = 6,
55
+ version: str = "svd_xt",
56
+ cond_aug: float = 0.02,
57
+ decoding_t: int = 1,
58
+ device: str = "cuda",
59
+ output_folder: str = "outputs",
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  progress=gr.Progress(track_tqdm=True)):
61
 
62
  if seed == -1:
63
  seed = random.randint(0, MAX_SEED)
64
+
65
+ if image.mode == "RGBA":
66
+ image = image.convert("RGB")
67
+
68
+ generator = torch.manual_seed(seed)
69
 
70
+ os.makedirs(output_folder, exist_ok=True)
71
+ base_count = len(glob(os.path.join(output_folder, "*.mp4")))
72
+ video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
73
 
74
+ frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
75
+ export_to_video(frames, video_path, fps=fps_id)
76
+ torch.manual_seed(seed)
77
 
78
+ return video_path, seed
79
+
80
+ def resize_image(image, output_size=(1024, 576)):
81
+ # Calculate aspect ratios
82
+ target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
83
+ image_aspect = image.width / image.height # Aspect ratio of the original image
84
+
85
+ # Resize then crop if the original image is larger
86
+ if image_aspect > target_aspect:
87
+ # Resize the image to match the target height, maintaining aspect ratio
88
+ new_height = output_size[1]
89
+ new_width = int(new_height * image_aspect)
90
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
91
+ # Calculate coordinates for cropping
92
+ left = (new_width - output_size[0]) / 2
93
+ top = 0
94
+ right = (new_width + output_size[0]) / 2
95
+ bottom = output_size[1]
96
  else:
97
+ # Resize the image to match the target width, maintaining aspect ratio
98
+ new_width = output_size[0]
99
+ new_height = int(new_width / image_aspect)
100
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
101
+ # Calculate coordinates for cropping
102
+ left = 0
103
+ top = (new_height - output_size[1]) / 2
104
+ right = output_size[0]
105
+ bottom = (new_height + output_size[1]) / 2
106
 
107
+ # Crop the image
108
+ cropped_image = resized_image.crop((left, top, right, bottom))
109
+ return cropped_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
 
111
 
112
  examples = [
113
+ "./train.jpg",
114
+ "./girl.webp",
115
+ "./robo.jpg",
116
+ ]
 
 
 
 
 
 
 
117
 
118
 
119
 
120
  # Gradio Interface
121
 
122
  with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
123
+ gr.HTML("<h1><center>Exvideo📽️</center></h1>")
124
+ gr.HTML("<p><center><a href='https://huggingface.co/ECNU-CILab/ExVideo-SVD-128f-v1'>ExVideo</a> image-to-video generation<br><b>Update</b>: first version</center></p>")
125
  with gr.Row():
126
+ image = gr.Image(label='Upload Image', height=600, scale=2)
127
+ video = gr.Video(label="Generated Video", height=600, scale=2)
 
128
  with gr.Accordion("Advanced Options", open=True):
129
  with gr.Column(scale=1):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  seed = gr.Slider(
131
  label="Seed (-1 Random)",
132
  minimum=-1,
133
  maximum=MAX_SEED,
134
  step=1,
135
  value=-1,
 
136
  )
137
+ motion_bucket_id = gr.Slider(
138
+ label="Motion bucket id",
139
+ info="Controls how much motion to add/remove from the image",
140
+ value=127,
141
+ minimum=1,
142
+ maximum=255
143
+ )
144
+ fps_id = gr.Slider(
145
+ label="Frames per second",
146
+ info="The length of your video in seconds will be 25/fps",
147
+ value=6,
148
+ minimum=5,
149
+ maximum=30
150
+ )
151
+
152
+ submit_btn = gr.Button("Generate")
153
+ clear_btn = gr.ClearButton("Clear")
154
  gr.Examples(
155
  examples=examples,
156
+ inputs=image,
157
+ outputs=[video, seed],
158
+ fn=generate,
159
  cache_examples="lazy",
160
  examples_per_page=4,
161
  )
162
 
163
+ image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
 
 
 
164
 
165
+ generate_btn.click(fn=generate, inputs=[image, seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
166
 
167
  demo.queue().launch()