Spaces:
Running
Running
updated ui
Browse files- .gitignore +7 -0
- backend/__pycache__/__init__.cpython-311.pyc +0 -0
- backend/__pycache__/device.cpython-311.pyc +0 -0
- backend/__pycache__/image_saver.cpython-311.pyc +0 -0
- backend/__pycache__/lcm_text_to_image.cpython-311.pyc +0 -0
- backend/models/__pycache__/lcmdiffusion_setting.cpython-311.pyc +0 -0
- frontend/webui/hf_demo.py +12 -5
- models/__pycache__/interface_types.cpython-311.pyc +0 -0
- models/__pycache__/settings.cpython-311.pyc +0 -0
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
env
|
2 |
+
*.bak
|
3 |
+
*.pyc
|
4 |
+
__pycache__
|
5 |
+
results
|
6 |
+
# excluding user settings for the GUI frontend
|
7 |
+
configs/settings.yaml
|
backend/__pycache__/__init__.cpython-311.pyc
CHANGED
Binary files a/backend/__pycache__/__init__.cpython-311.pyc and b/backend/__pycache__/__init__.cpython-311.pyc differ
|
|
backend/__pycache__/device.cpython-311.pyc
CHANGED
Binary files a/backend/__pycache__/device.cpython-311.pyc and b/backend/__pycache__/device.cpython-311.pyc differ
|
|
backend/__pycache__/image_saver.cpython-311.pyc
CHANGED
Binary files a/backend/__pycache__/image_saver.cpython-311.pyc and b/backend/__pycache__/image_saver.cpython-311.pyc differ
|
|
backend/__pycache__/lcm_text_to_image.cpython-311.pyc
CHANGED
Binary files a/backend/__pycache__/lcm_text_to_image.cpython-311.pyc and b/backend/__pycache__/lcm_text_to_image.cpython-311.pyc differ
|
|
backend/models/__pycache__/lcmdiffusion_setting.cpython-311.pyc
CHANGED
Binary files a/backend/models/__pycache__/lcmdiffusion_setting.cpython-311.pyc and b/backend/models/__pycache__/lcmdiffusion_setting.cpython-311.pyc differ
|
|
frontend/webui/hf_demo.py
CHANGED
@@ -10,6 +10,8 @@ from backend.device import get_device_name
|
|
10 |
from constants import APP_VERSION
|
11 |
from backend.device import is_openvino_device
|
12 |
import PIL
|
|
|
|
|
13 |
|
14 |
lcm_text_to_image = LCMTextToImage()
|
15 |
lcm_lora = LCMLora(
|
@@ -39,6 +41,7 @@ def predict(
|
|
39 |
):
|
40 |
print(f"prompt - {prompt}")
|
41 |
lcm_diffusion_setting = LCMDiffusionSetting()
|
|
|
42 |
lcm_diffusion_setting.prompt = prompt
|
43 |
lcm_diffusion_setting.guidance_scale = 1.0
|
44 |
lcm_diffusion_setting.inference_steps = steps
|
@@ -46,15 +49,19 @@ def predict(
|
|
46 |
lcm_diffusion_setting.use_seed = use_seed
|
47 |
lcm_diffusion_setting.use_safety_checker = True
|
48 |
lcm_diffusion_setting.use_tiny_auto_encoder = True
|
49 |
-
lcm_diffusion_setting.image_width = 320 if is_openvino_device() else 512
|
50 |
-
lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
|
51 |
-
lcm_diffusion_setting.
|
52 |
-
|
|
|
|
|
|
|
|
|
53 |
start = perf_counter()
|
54 |
images = lcm_text_to_image.generate(lcm_diffusion_setting)
|
55 |
latency = perf_counter() - start
|
56 |
print(f"Latency: {latency:.2f} seconds")
|
57 |
-
return images[0]
|
58 |
|
59 |
|
60 |
css = """
|
|
|
10 |
from constants import APP_VERSION
|
11 |
from backend.device import is_openvino_device
|
12 |
import PIL
|
13 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
14 |
+
from pprint import pprint
|
15 |
|
16 |
lcm_text_to_image = LCMTextToImage()
|
17 |
lcm_lora = LCMLora(
|
|
|
41 |
):
|
42 |
print(f"prompt - {prompt}")
|
43 |
lcm_diffusion_setting = LCMDiffusionSetting()
|
44 |
+
lcm_diffusion_setting.diffusion_task = DiffusionTask.text_to_image.value
|
45 |
lcm_diffusion_setting.prompt = prompt
|
46 |
lcm_diffusion_setting.guidance_scale = 1.0
|
47 |
lcm_diffusion_setting.inference_steps = steps
|
|
|
49 |
lcm_diffusion_setting.use_seed = use_seed
|
50 |
lcm_diffusion_setting.use_safety_checker = True
|
51 |
lcm_diffusion_setting.use_tiny_auto_encoder = True
|
52 |
+
# lcm_diffusion_setting.image_width = 320 if is_openvino_device() else 512
|
53 |
+
# lcm_diffusion_setting.image_height = 320 if is_openvino_device() else 512
|
54 |
+
lcm_diffusion_setting.image_width = 512
|
55 |
+
lcm_diffusion_setting.image_height = 512
|
56 |
+
lcm_diffusion_setting.use_openvino = True
|
57 |
+
lcm_diffusion_setting.use_tiny_auto_encoder = False
|
58 |
+
pprint(lcm_diffusion_setting.model_dump())
|
59 |
+
lcm_text_to_image.init(lcm_diffusion_setting=lcm_diffusion_setting)
|
60 |
start = perf_counter()
|
61 |
images = lcm_text_to_image.generate(lcm_diffusion_setting)
|
62 |
latency = perf_counter() - start
|
63 |
print(f"Latency: {latency:.2f} seconds")
|
64 |
+
return images[0]
|
65 |
|
66 |
|
67 |
css = """
|
models/__pycache__/interface_types.cpython-311.pyc
CHANGED
Binary files a/models/__pycache__/interface_types.cpython-311.pyc and b/models/__pycache__/interface_types.cpython-311.pyc differ
|
|
models/__pycache__/settings.cpython-311.pyc
CHANGED
Binary files a/models/__pycache__/settings.cpython-311.pyc and b/models/__pycache__/settings.cpython-311.pyc differ
|
|