Spaces:
Running
on
Zero
Running
on
Zero
Walid-Ahmed
commited on
Commit
•
8c4b099
1
Parent(s):
6187e8f
Update app.py
Browse files
app.py
CHANGED
@@ -2,16 +2,20 @@ import spaces
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
from diffusers import StableDiffusionPipeline
|
5 |
-
from huggingface_hub import snapshot_download
|
6 |
from transformers import pipeline
|
7 |
from PIL import Image
|
8 |
import os
|
9 |
|
|
|
10 |
# Retrieve the API token from the environment variable
|
11 |
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
12 |
if huggingface_token is None:
|
13 |
raise ValueError("HUGGINGFACE_TOKEN environment variable is not set.")
|
14 |
|
|
|
|
|
|
|
15 |
# Check if CUDA is available
|
16 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
17 |
|
@@ -30,7 +34,6 @@ model_path = snapshot_download(
|
|
30 |
local_dir="stable-diffusion-3-medium",
|
31 |
token=huggingface_token
|
32 |
)
|
33 |
-
#image_gen = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
|
34 |
image_gen = StableDiffusion3Pipeline.from_pretrained(model_path, text_encoder_3=None, tokenizer_3=None,torch_dtype=torch.float16)
|
35 |
|
36 |
image_gen = image_gen.to(device)
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
from diffusers import StableDiffusionPipeline
|
5 |
+
from huggingface_hub import snapshot_download,login
|
6 |
from transformers import pipeline
|
7 |
from PIL import Image
|
8 |
import os
|
9 |
|
10 |
+
|
11 |
# Retrieve the API token from the environment variable
|
12 |
huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
|
13 |
if huggingface_token is None:
|
14 |
raise ValueError("HUGGINGFACE_TOKEN environment variable is not set.")
|
15 |
|
16 |
+
# Log in to Hugging Face
|
17 |
+
login(token=huggingface_token)
|
18 |
+
|
19 |
# Check if CUDA is available
|
20 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
21 |
|
|
|
34 |
local_dir="stable-diffusion-3-medium",
|
35 |
token=huggingface_token
|
36 |
)
|
|
|
37 |
image_gen = StableDiffusion3Pipeline.from_pretrained(model_path, text_encoder_3=None, tokenizer_3=None,torch_dtype=torch.float16)
|
38 |
|
39 |
image_gen = image_gen.to(device)
|