Spaces:
Runtime error
Runtime error
aakashch0179
commited on
Commit
•
f3b84cf
1
Parent(s):
a6fe570
Update app.py
Browse files
app.py
CHANGED
@@ -51,6 +51,33 @@ from PIL import Image
|
|
51 |
ckpt_id = "openai/shap-e"
|
52 |
|
53 |
@st.cache_resource # Caches the model for faster subsequent runs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
def load_model():
|
55 |
return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")
|
56 |
|
|
|
51 |
ckpt_id = "openai/shap-e"
|
52 |
|
53 |
@st.cache_resource # Caches the model for faster subsequent runs
|
54 |
+
def process_image_for_pil(image):
|
55 |
+
"""Converts image data into a format compatible with PIL.Image.fromarray()"""
|
56 |
+
|
57 |
+
# Example 1: If image is already a PIL Image
|
58 |
+
if isinstance(image, Image.Image):
|
59 |
+
return image
|
60 |
+
|
61 |
+
# Example 2: If image is a NumPy array
|
62 |
+
if isinstance(image, np.ndarray):
|
63 |
+
return Image.fromarray(image)
|
64 |
+
|
65 |
+
# Example 3: If image is a PyTorch Tensor
|
66 |
+
if isinstance(image, torch.Tensor):
|
67 |
+
return Image.fromarray(image.detach().cpu().numpy())
|
68 |
+
|
69 |
+
# Add more cases based on the output of print(type(images[0]))
|
70 |
+
|
71 |
+
raise TypeError("Unsupported image format. Please provide conversion logic.")
|
72 |
+
|
73 |
+
|
74 |
+
def should_resize():
|
75 |
+
"""Determines whether to resize images (replace with your own logic)"""
|
76 |
+
# Example: Resize only if the image dimensions exceed a threshold
|
77 |
+
if image.width > 512 or image.height > 512:
|
78 |
+
return True
|
79 |
+
else:
|
80 |
+
return False
|
81 |
def load_model():
|
82 |
return ShapEPipeline.from_pretrained(ckpt_id).to("cuda")
|
83 |
|