Spaces:
Runtime error
Runtime error
aakashch0179
commited on
Commit
•
db4a0b6
1
Parent(s):
9f983ee
Update app.py
Browse files
app.py
CHANGED
@@ -39,90 +39,48 @@
|
|
39 |
|
40 |
|
41 |
|
42 |
-
#
|
43 |
-
|
44 |
-
# import streamlit as st
|
45 |
-
# import torch
|
46 |
-
# from diffusers import ShapEPipeline
|
47 |
-
# from diffusers.utils import export_to_gif
|
48 |
-
# from PIL import Image
|
49 |
-
# import numpy as np
|
50 |
-
# # import PyTorch
|
51 |
-
|
52 |
-
# # Model loading (Ideally done once at the start for efficiency)
|
53 |
-
# ckpt_id = "openai/shap-e"
|
54 |
-
|
55 |
-
# @st.cache_resource # Caches the model for faster subsequent runs
|
56 |
-
|
57 |
-
# def process_image_for_pil(image):
|
58 |
-
# if isinstance(image, torch.Tensor):
|
59 |
-
# # Your PyTorch conversion logic here (with correct indentation)
|
60 |
-
# # elif isinstance(image, np.ndarray):
|
61 |
-
# # Your Numpy conversion logic here (with correct indentation)
|
62 |
-
# image_array = image.astype('uint8') # Assuming 8-bit conversion is needed
|
63 |
-
# return Image.fromarray(image_array)
|
64 |
-
# else:
|
65 |
-
# raise TypeError("Unsupported image format. Please provide conversion logic.")
|
66 |
-
|
67 |
-
# test_image = np.random.randint(0, 256, size=(256, 256, 3), dtype=np.uint8) # Placeholder image
|
68 |
-
# result = process_image_for_pil(test_image)
|
69 |
-
|
70 |
-
|
71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
|
|
|
|
73 |
|
74 |
-
#
|
75 |
-
# """Determines whether to resize images (replace with your own logic)"""
|
76 |
-
# if image.width > 512 or image.height > 512:
|
77 |
-
# return True
|
78 |
-
# else:
|
79 |
-
# return False
|
80 |
-
# def load_model():
|
81 |
-
# return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")
|
82 |
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
|
86 |
-
|
87 |
|
88 |
-
# # User Inputs
|
89 |
-
# prompt = st.text_input("Enter your prompt:", "a shark")
|
90 |
-
# guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5)
|
91 |
|
92 |
-
# # Generate and Display Images
|
93 |
-
# if st.button("Generate"):
|
94 |
-
# with st.spinner("Generating images..."):
|
95 |
-
# images = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=64).images
|
96 |
|
97 |
-
# # ... (Process images for PIL conversion)
|
98 |
|
99 |
-
# # Resize Images (Optional)
|
100 |
-
# pil_images = [] # Modified to store resized images if needed
|
101 |
-
# for image in images:
|
102 |
-
# processed_image = process_image_for_pil(image)
|
103 |
-
# if should_resize(processed_image): # Pass image to should_resize
|
104 |
-
# resized_image = processed_image.resize((256, 256))
|
105 |
-
# pil_images.append(resized_image)
|
106 |
-
# else:
|
107 |
-
# pil_images.append(processed_image) # Append without resizing
|
108 |
|
109 |
-
#
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
from PIL import Image
|
118 |
-
import numpy as np
|
119 |
|
120 |
-
|
121 |
-
ckpt_id = "openai/shap-e"
|
122 |
-
@st.cache_resource
|
123 |
-
def load_model():
|
124 |
-
return StableDiffusionPipeline.from_pretrained(ckpt_id, torch_dtype=torch.float16, use_auth_token=True).to("cuda")
|
125 |
-
pipe = load_model() # Load the model
|
126 |
|
127 |
# App Title
|
128 |
st.title("Shark 3D Image Generator")
|
@@ -131,26 +89,26 @@ st.title("Shark 3D Image Generator")
|
|
131 |
prompt = st.text_input("Enter your prompt:", "a shark")
|
132 |
guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5)
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
return False
|
139 |
|
140 |
-
#
|
141 |
-
if st.button("Generate"):
|
142 |
-
with st.spinner("Generating images..."):
|
143 |
-
images = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=64).images
|
144 |
|
145 |
-
|
|
|
146 |
for image in images:
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
-
if should_resize(image):
|
150 |
-
image = image.resize((256, 256))
|
151 |
-
pil_images.append(image)
|
152 |
|
153 |
-
gif_path = export_to_gif(pil_images, "shark_3d.gif")
|
154 |
-
st.image(pil_images[0])
|
155 |
-
st.success("GIF saved as shark_3d.gif")
|
156 |
|
|
|
39 |
|
40 |
|
41 |
|
42 |
+
# Text to 3D
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
import streamlit as st
|
45 |
+
import torch
|
46 |
+
from diffusers import ShapEPipeline
|
47 |
+
from diffusers.utils import export_to_gif
|
48 |
+
from PIL import Image
|
49 |
+
import numpy as np
|
50 |
+
# import PyTorch
|
51 |
|
52 |
+
# Model loading (Ideally done once at the start for efficiency)
|
53 |
+
ckpt_id = "openai/shap-e"
|
54 |
|
55 |
+
@st.cache_resource # Caches the model for faster subsequent runs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
+
def process_image_for_pil(image):
|
58 |
+
if isinstance(image, torch.Tensor):
|
59 |
+
# Your PyTorch conversion logic here (with correct indentation)
|
60 |
+
# elif isinstance(image, np.ndarray):
|
61 |
+
# Your Numpy conversion logic here (with correct indentation)
|
62 |
+
image_array = image.astype('uint8') # Assuming 8-bit conversion is needed
|
63 |
+
return Image.fromarray(image_array)
|
64 |
+
else:
|
65 |
+
raise TypeError("Unsupported image format. Please provide conversion logic.")
|
66 |
|
67 |
+
test_image = np.random.randint(0, 256, size=(256, 256, 3), dtype=np.uint8) # Placeholder image
|
68 |
+
result = process_image_for_pil(test_image)
|
69 |
|
|
|
|
|
|
|
70 |
|
|
|
|
|
|
|
|
|
71 |
|
|
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
+
def should_resize(image): # Add 'image' as an argument
|
75 |
+
"""Determines whether to resize images (replace with your own logic)"""
|
76 |
+
if image.width > 512 or image.height > 512:
|
77 |
+
return True
|
78 |
+
else:
|
79 |
+
return False
|
80 |
+
def load_model():
|
81 |
+
return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")
|
|
|
|
|
82 |
|
83 |
+
pipe = load_model()
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
# App Title
|
86 |
st.title("Shark 3D Image Generator")
|
|
|
89 |
prompt = st.text_input("Enter your prompt:", "a shark")
|
90 |
guidance_scale = st.slider("Guidance Scale", 0.0, 20.0, 15.0, step=0.5)
|
91 |
|
92 |
+
# Generate and Display Images
|
93 |
+
if st.button("Generate"):
|
94 |
+
with st.spinner("Generating images..."):
|
95 |
+
images = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=64).images
|
|
|
96 |
|
97 |
+
# ... (Process images for PIL conversion)
|
|
|
|
|
|
|
98 |
|
99 |
+
# Resize Images (Optional)
|
100 |
+
pil_images = [] # Modified to store resized images if needed
|
101 |
for image in images:
|
102 |
+
processed_image = process_image_for_pil(image)
|
103 |
+
if should_resize(processed_image): # Pass image to should_resize
|
104 |
+
resized_image = processed_image.resize((256, 256))
|
105 |
+
pil_images.append(resized_image)
|
106 |
+
else:
|
107 |
+
pil_images.append(processed_image) # Append without resizing
|
108 |
+
|
109 |
+
gif_path = export_to_gif(pil_images, "shark_3d.gif")
|
110 |
+
st.image(pil_images[0])
|
111 |
+
st.success("GIF saved as shark_3d.gif")
|
112 |
|
|
|
|
|
|
|
113 |
|
|
|
|
|
|
|
114 |
|