svjack commited on
Commit
8fd1034
1 Parent(s): 7b1b3c4

Upload 3 files

Browse files
Files changed (3) hide show
  1. i2v_app_t4.py +12 -1
  2. t2v_app_t4.py +2 -2
  3. v2v_app_t4.py +12 -1
i2v_app_t4.py CHANGED
@@ -123,6 +123,7 @@ def infer(
123
  if seed == -1:
124
  seed = random.randint(0, 2**8 - 1)
125
 
 
126
  pipe_image = CogVideoXImageToVideoPipeline.from_pretrained(
127
  "THUDM/CogVideoX-5b-I2V",
128
  transformer=transformer,
@@ -132,6 +133,16 @@ def infer(
132
  text_encoder=text_encoder,
133
  torch_dtype=torch.float16
134
  ).to(device)
 
 
 
 
 
 
 
 
 
 
135
  image_input = Image.fromarray(image_input).resize(size=(720, 480)) # Convert to PIL
136
  image = load_image(image_input)
137
  video_pt = pipe_image(
@@ -144,7 +155,7 @@ def infer(
144
  guidance_scale=7.0,
145
  generator=torch.Generator(device="cpu").manual_seed(seed),
146
  ).frames
147
- pipe_image.to("cpu")
148
  del pipe_image
149
  gc.collect()
150
  torch.cuda.empty_cache()
 
123
  if seed == -1:
124
  seed = random.randint(0, 2**8 - 1)
125
 
126
+ '''
127
  pipe_image = CogVideoXImageToVideoPipeline.from_pretrained(
128
  "THUDM/CogVideoX-5b-I2V",
129
  transformer=transformer,
 
133
  text_encoder=text_encoder,
134
  torch_dtype=torch.float16
135
  ).to(device)
136
+ '''
137
+ pipe_image = CogVideoXImageToVideoPipeline.from_pretrained(
138
+ "THUDM/CogVideoX-5b-I2V",
139
+ transformer=transformer,
140
+ vae=vae,
141
+ scheduler=pipe.scheduler,
142
+ tokenizer=pipe.tokenizer,
143
+ text_encoder=text_encoder,
144
+ torch_dtype=torch.float16
145
+ )
146
  image_input = Image.fromarray(image_input).resize(size=(720, 480)) # Convert to PIL
147
  image = load_image(image_input)
148
  video_pt = pipe_image(
 
155
  guidance_scale=7.0,
156
  generator=torch.Generator(device="cpu").manual_seed(seed),
157
  ).frames
158
+ #pipe_image.to("cpu")
159
  del pipe_image
160
  gc.collect()
161
  torch.cuda.empty_cache()
t2v_app_t4.py CHANGED
@@ -121,7 +121,7 @@ def infer(
121
  if seed == -1:
122
  seed = random.randint(0, 2**8 - 1)
123
 
124
- pipe.to(device)
125
  video_pt = pipe(
126
  prompt=prompt,
127
  num_videos_per_prompt=1,
@@ -132,7 +132,7 @@ def infer(
132
  guidance_scale=7.0,
133
  generator=torch.Generator(device="cpu").manual_seed(seed),
134
  ).frames
135
- pipe.to("cpu")
136
  gc.collect()
137
  torch.cuda.empty_cache()
138
  return (video_pt, seed)
 
121
  if seed == -1:
122
  seed = random.randint(0, 2**8 - 1)
123
 
124
+ #pipe.to(device)
125
  video_pt = pipe(
126
  prompt=prompt,
127
  num_videos_per_prompt=1,
 
132
  guidance_scale=7.0,
133
  generator=torch.Generator(device="cpu").manual_seed(seed),
134
  ).frames
135
+ #pipe.to("cpu")
136
  gc.collect()
137
  torch.cuda.empty_cache()
138
  return (video_pt, seed)
v2v_app_t4.py CHANGED
@@ -127,6 +127,7 @@ def infer(
127
  seed = random.randint(0, 2**8 - 1)
128
 
129
  video = load_video(video_input)[:49] # Limit to 49 frames
 
130
  pipe_video = CogVideoXVideoToVideoPipeline.from_pretrained(
131
  "THUDM/CogVideoX-5b",
132
  transformer=transformer,
@@ -136,6 +137,16 @@ def infer(
136
  text_encoder=text_encoder,
137
  torch_dtype=torch.float16
138
  ).to(device)
 
 
 
 
 
 
 
 
 
 
139
  video_pt = pipe_video(
140
  video=video,
141
  prompt=prompt,
@@ -147,7 +158,7 @@ def infer(
147
  guidance_scale=7.0,
148
  generator=torch.Generator(device="cpu").manual_seed(seed),
149
  ).frames
150
- pipe_video.to("cpu")
151
  del pipe_video
152
  gc.collect()
153
  torch.cuda.empty_cache()
 
127
  seed = random.randint(0, 2**8 - 1)
128
 
129
  video = load_video(video_input)[:49] # Limit to 49 frames
130
+ '''
131
  pipe_video = CogVideoXVideoToVideoPipeline.from_pretrained(
132
  "THUDM/CogVideoX-5b",
133
  transformer=transformer,
 
137
  text_encoder=text_encoder,
138
  torch_dtype=torch.float16
139
  ).to(device)
140
+ '''
141
+ pipe_video = CogVideoXVideoToVideoPipeline.from_pretrained(
142
+ "THUDM/CogVideoX-5b",
143
+ transformer=transformer,
144
+ vae=vae,
145
+ scheduler=pipe.scheduler,
146
+ tokenizer=pipe.tokenizer,
147
+ text_encoder=text_encoder,
148
+ torch_dtype=torch.float16
149
+ )
150
  video_pt = pipe_video(
151
  video=video,
152
  prompt=prompt,
 
158
  guidance_scale=7.0,
159
  generator=torch.Generator(device="cpu").manual_seed(seed),
160
  ).frames
161
+ #pipe_video.to("cpu")
162
  del pipe_video
163
  gc.collect()
164
  torch.cuda.empty_cache()