aakashch0179 commited on
Commit
61eeb7c
1 Parent(s): c6b92f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -26
app.py CHANGED
@@ -3,45 +3,30 @@ from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
3
  from diffusers.utils import export_to_video
4
  import streamlit as st
5
  import numpy as np
 
6
  # Title and User Input
7
  st.title("Text-to-Video with Streamlit")
8
  prompt = st.text_input("Enter your text prompt:", "Spiderman is surfing")
9
 
10
  # Button to trigger generation
11
-
12
- if st.button("Generate Video"):
13
- # Ensure you have 'accelerate' version 0.17.0 or higher
14
  import accelerate
15
  if accelerate.__version__ < "0.17.0":
16
  st.warning("Please upgrade 'accelerate' to version 0.17.0 or higher for CPU offloading.")
17
  else:
18
  with st.spinner("Generating video..."):
19
- pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b",
20
- torch_dtype=torch.float16,
21
- variant="fp16",
22
- device="cpu") # Force CPU usage
23
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
24
- pipe.enable_model_cpu_offload() # Assuming 'accelerate' is updated
25
 
26
-
27
-
28
  video_frames = pipe(prompt, num_inference_steps=25).frames
29
 
30
- # # Reshape for export (assuming your frames have a time dimension)
31
- # video_frames = [np.transpose(frame, (1, 2, 0)) for frame in video_frames] # Swap time and channels axes
32
-
33
- # # Check and adjust axes if needed
34
- # for i, frame in enumerate(video_frames):
35
- # if len(frame.shape) == 3: # Assuming (height, width, channels) format
36
- # video_frames[i] = np.transpose(frame, (1, 0, 2)) # Swap height and width
37
-
38
 
39
- # video_path = export_to_video(video_frames)
40
- dummy_frames = [np.ones((256, 256, 3), dtype=np.uint8) for _ in range(20)] # Example (256x256 RGB images)
41
 
42
- # Replace with your actual export logic
43
- video_path = export_to_video(dummy_frames)
44
 
45
- # Display the video in the Streamlit app
46
- st.video(video_path)
47
-
 
3
  from diffusers.utils import export_to_video
4
  import streamlit as st
5
  import numpy as np
6
+
7
  # Title and User Input
8
  st.title("Text-to-Video with Streamlit")
9
  prompt = st.text_input("Enter your text prompt:", "Spiderman is surfing")
10
 
11
  # Button to trigger generation
12
+ if st.button("Generate Video"):
13
+ # Ensure you have 'accelerate' version 0.17.0 or higher
 
14
  import accelerate
15
  if accelerate.__version__ < "0.17.0":
16
  st.warning("Please upgrade 'accelerate' to version 0.17.0 or higher for CPU offloading.")
17
  else:
18
  with st.spinner("Generating video..."):
19
+ # ... (Your model loading code)
 
 
 
 
 
20
 
 
 
21
  video_frames = pipe(prompt, num_inference_steps=25).frames
22
 
23
+ # ... (Your potential reshaping code)
 
 
 
 
 
 
 
24
 
25
+ # Create dummy frames for testing
26
+ dummy_frames = [np.ones((256, 256, 3), dtype=np.uint8) for _ in range(20)]
27
 
28
+ # Replace with your actual export logic
29
+ video_path = export_to_video(dummy_frames)
30
 
31
+ # Display the video in the Streamlit app
32
+ st.video(video_path)