Spaces:
Running
Running
duration
Browse files
app.py
CHANGED
@@ -3,22 +3,15 @@ import os
|
|
3 |
os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
|
4 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
5 |
|
6 |
-
import logging
|
7 |
import uuid
|
8 |
|
9 |
import GPUtil
|
10 |
import gradio as gr
|
11 |
import psutil
|
12 |
import spaces
|
13 |
-
import torch
|
14 |
|
15 |
from videosys import CogVideoXConfig, CogVideoXPABConfig, VideoSysEngine
|
16 |
|
17 |
-
logging.basicConfig(level=logging.INFO)
|
18 |
-
logger = logging.getLogger(__name__)
|
19 |
-
|
20 |
-
dtype = torch.float16
|
21 |
-
|
22 |
|
23 |
def load_model(enable_video_sys=False, pab_threshold=[100, 850], pab_range=2):
|
24 |
pab_config = CogVideoXPABConfig(spatial_threshold=pab_threshold, spatial_range=pab_range)
|
@@ -57,14 +50,14 @@ def get_server_status():
|
|
57 |
return {"cpu": f"{cpu_percent}%", "memory": f"{memory.percent}%", "disk": f"{disk.percent}%", "gpu": gpu_info}
|
58 |
|
59 |
|
60 |
-
@spaces.GPU(duration=
|
61 |
def generate_vanilla(prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
|
62 |
engine = load_model()
|
63 |
video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
|
64 |
return video_path
|
65 |
|
66 |
|
67 |
-
@spaces.GPU(duration=
|
68 |
def generate_vs(
|
69 |
prompt,
|
70 |
num_inference_steps,
|
|
|
3 |
os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
|
4 |
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
|
5 |
|
|
|
6 |
import uuid
|
7 |
|
8 |
import GPUtil
|
9 |
import gradio as gr
|
10 |
import psutil
|
11 |
import spaces
|
|
|
12 |
|
13 |
from videosys import CogVideoXConfig, CogVideoXPABConfig, VideoSysEngine
|
14 |
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def load_model(enable_video_sys=False, pab_threshold=[100, 850], pab_range=2):
|
17 |
pab_config = CogVideoXPABConfig(spatial_threshold=pab_threshold, spatial_range=pab_range)
|
|
|
50 |
return {"cpu": f"{cpu_percent}%", "memory": f"{memory.percent}%", "disk": f"{disk.percent}%", "gpu": gpu_info}
|
51 |
|
52 |
|
53 |
+
@spaces.GPU(duration=240)
|
54 |
def generate_vanilla(prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
|
55 |
engine = load_model()
|
56 |
video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
|
57 |
return video_path
|
58 |
|
59 |
|
60 |
+
@spaces.GPU(duration=240)
|
61 |
def generate_vs(
|
62 |
prompt,
|
63 |
num_inference_steps,
|