Spaces:
Runtime error
Runtime error
Reduce chunk size to be able to run on A10G
Browse files- app_canny.py +1 -1
- app_canny_db.py +1 -1
- app_depth.py +1 -1
- app_pix2pix_video.py +1 -1
- app_pose.py +1 -1
- app_text_to_video.py +1 -1
app_canny.py
CHANGED
@@ -47,7 +47,7 @@ def create_demo(model: Model):
|
|
47 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
48 |
"None"], label="Watermark", value='Picsart AI Research')
|
49 |
chunk_size = gr.Slider(
|
50 |
-
label="Chunk size", minimum=2, maximum=16, value=
|
51 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
52 |
merging_ratio = gr.Slider(
|
53 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
|
|
47 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
48 |
"None"], label="Watermark", value='Picsart AI Research')
|
49 |
chunk_size = gr.Slider(
|
50 |
+
label="Chunk size", minimum=2, maximum=16, value=2, step=1, visible=not on_huggingspace,
|
51 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
52 |
merging_ratio = gr.Slider(
|
53 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
app_canny_db.py
CHANGED
@@ -51,7 +51,7 @@ def create_demo(model: Model):
|
|
51 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
52 |
"None"], label="Watermark", value='Picsart AI Research')
|
53 |
chunk_size = gr.Slider(
|
54 |
-
label="Chunk size", minimum=2, maximum=16, value=
|
55 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
56 |
merging_ratio = gr.Slider(
|
57 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
|
|
51 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
52 |
"None"], label="Watermark", value='Picsart AI Research')
|
53 |
chunk_size = gr.Slider(
|
54 |
+
label="Chunk size", minimum=2, maximum=16, value=2, step=1, visible=not on_huggingspace,
|
55 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
56 |
merging_ratio = gr.Slider(
|
57 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
app_depth.py
CHANGED
@@ -60,7 +60,7 @@ def create_demo(model: Model):
|
|
60 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
61 |
"None"], label="Watermark", value='Picsart AI Research')
|
62 |
chunk_size = gr.Slider(
|
63 |
-
label="Chunk size", minimum=2, maximum=16, value=
|
64 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
65 |
merging_ratio = gr.Slider(
|
66 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
|
|
60 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
61 |
"None"], label="Watermark", value='Picsart AI Research')
|
62 |
chunk_size = gr.Slider(
|
63 |
+
label="Chunk size", minimum=2, maximum=16, value=2, step=1, visible=not on_huggingspace,
|
64 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
65 |
merging_ratio = gr.Slider(
|
66 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
app_pix2pix_video.py
CHANGED
@@ -74,7 +74,7 @@ def create_demo(model: Model):
|
|
74 |
value=-1,
|
75 |
step=1)
|
76 |
chunk_size = gr.Slider(
|
77 |
-
label="Chunk size", minimum=2, maximum=16, value=
|
78 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
79 |
merging_ratio = gr.Slider(
|
80 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
|
|
74 |
value=-1,
|
75 |
step=1)
|
76 |
chunk_size = gr.Slider(
|
77 |
+
label="Chunk size", minimum=2, maximum=16, value=2, step=1, visible=not on_huggingspace,
|
78 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
79 |
merging_ratio = gr.Slider(
|
80 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
app_pose.py
CHANGED
@@ -35,7 +35,7 @@ def create_demo(model: Model):
|
|
35 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
36 |
"None"], label="Watermark", value='Picsart AI Research')
|
37 |
chunk_size = gr.Slider(
|
38 |
-
label="Chunk size", minimum=2, maximum=16, value=
|
39 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
40 |
merging_ratio = gr.Slider(
|
41 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
|
|
35 |
watermark = gr.Radio(["Picsart AI Research", "Text2Video-Zero",
|
36 |
"None"], label="Watermark", value='Picsart AI Research')
|
37 |
chunk_size = gr.Slider(
|
38 |
+
label="Chunk size", minimum=2, maximum=16, value=2, step=1, visible=not on_huggingspace,
|
39 |
info="Number of frames processed at once. Reduce for lower memory usage.")
|
40 |
merging_ratio = gr.Slider(
|
41 |
label="Merging ratio", minimum=0.0, maximum=0.9, step=0.1, value=0.0, visible=not on_huggingspace,
|
app_text_to_video.py
CHANGED
@@ -80,7 +80,7 @@ def create_demo(model: Model):
|
|
80 |
info="Perform DDPM steps from t0 to t1. The larger the gap between t0 and t1, the more variance between the frames. Ensure t0 < t1",
|
81 |
maximum=48, value=47, step=1)
|
82 |
chunk_size = gr.Slider(
|
83 |
-
label="Chunk size", minimum=2, maximum=16, value=
|
84 |
info="Number of frames processed at once. Reduce for lower memory usage."
|
85 |
)
|
86 |
merging_ratio = gr.Slider(
|
|
|
80 |
info="Perform DDPM steps from t0 to t1. The larger the gap between t0 and t1, the more variance between the frames. Ensure t0 < t1",
|
81 |
maximum=48, value=47, step=1)
|
82 |
chunk_size = gr.Slider(
|
83 |
+
label="Chunk size", minimum=2, maximum=16, value=2, step=1, visible=not on_huggingspace,
|
84 |
info="Number of frames processed at once. Reduce for lower memory usage."
|
85 |
)
|
86 |
merging_ratio = gr.Slider(
|