aakashch0179 commited on
Commit
a32d3b4
1 Parent(s): 61eeb7c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import torch
2
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
3
  from diffusers.utils import export_to_video
@@ -16,17 +17,20 @@ if st.button("Generate Video"):
16
  st.warning("Please upgrade 'accelerate' to version 0.17.0 or higher for CPU offloading.")
17
  else:
18
  with st.spinner("Generating video..."):
19
- # ... (Your model loading code)
 
 
 
 
20
 
 
21
  video_frames = pipe(prompt, num_inference_steps=25).frames
22
 
23
- # ... (Your potential reshaping code)
24
-
25
- # Create dummy frames for testing
26
  dummy_frames = [np.ones((256, 256, 3), dtype=np.uint8) for _ in range(20)]
27
 
28
- # Replace with your actual export logic
29
  video_path = export_to_video(dummy_frames)
30
 
31
  # Display the video in the Streamlit app
32
- st.video(video_path)
 
1
+
2
  import torch
3
  from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
4
  from diffusers.utils import export_to_video
 
17
  st.warning("Please upgrade 'accelerate' to version 0.17.0 or higher for CPU offloading.")
18
  else:
19
  with st.spinner("Generating video..."):
20
+ # Define the pipeline for image generation
21
+ pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b",
22
+ torch_dtype=torch.float16, variant="fp16", device="cpu")
23
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
24
+ pipe.enable_model_cpu_offload()
25
 
26
+ # Generate video frames
27
  video_frames = pipe(prompt, num_inference_steps=25).frames
28
 
29
+ # Create dummy frames for testing (replace with actual manipulation later)
 
 
30
  dummy_frames = [np.ones((256, 256, 3), dtype=np.uint8) for _ in range(20)]
31
 
32
+ # Export to video
33
  video_path = export_to_video(dummy_frames)
34
 
35
  # Display the video in the Streamlit app
36
+ st.video(video_path)