Aashi's picture
Update app.py
37b77a6 verified
raw
history blame
2.55 kB
# import gradio as gr
# import torch
# from diffusers import AutoPipelineForImage2Image
# from diffusers.utils import make_image_grid, load_image
# # gr.load("models/NSTiwari/SDXL_LoRA_model").launch()
# pipeline = AutoPipelineForImage2Image.from_pretrained(
# "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
# )
# pipeline.load_lora_weights('pytorch_lora_weights_00.safetensors')
# # _ = pipeline.to("cuda")
# pipeline.enable_model_cpu_offload()
# url = "https://img.onmanorama.com/content/dam/mm/en/lifestyle/decor/images/2020/12/1/25-lakh-living-hall.jpg.transform/576x300/image.jpg"
# # init_image = load_image(url)
# # image = init_image.resize((1024, 576))
# prompt = "A cozy Indian living room glows with morning sunshine on Republic Day, its walls decked in saffron, white, and green tapestries and art, while colorful cushions and festive garlands add a vibrant, celebratory air."
# # pass prompt and image to pipeline
# image_out = pipeline(prompt, image=image, strength=0.5).images[0]
# # make_image_grid([image, image_out], rows=1, cols=2)
# # Define the image generation function
# def generate_image(prompt, image_url):
# init_image = load_image(image_url)
# image = init_image.resize((1024, 576))
# image_out = pipeline(prompt, image=image, strength=0.5).images[0]
# return image_out
# # Set up Gradio interface
# iface = gr.Interface(
# fn=generate_image,
# inputs=[gr.Textbox(label="Prompt"), gr.Textbox(label="Image URL")],
# outputs="image"
# )
# # Launch the Gradio app
# iface.launch()
###New###########
import gradio as gr
import torch
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image
# Load the Stable Diffusion pipeline
pipeline = AutoPipelineForImage2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipeline.load_lora_weights('pytorch_lora_weights_00.safetensors')
_ = pipeline.to("cuda")
pipeline.enable_model_cpu_offload()
# Define the image generation function
def generate_image(prompt, image_url):
init_image = load_image(image_url)
image = init_image.resize((1024, 576))
image_out = pipeline(prompt, image=image, strength=0.5).images[0]
return image_out
# Set up Gradio interface
iface = gr.Interface(
fn=generate_image,
inputs=[gr.Textbox(label="Prompt"), gr.Textbox(label="Image URL")],
outputs="image"
)
# Launch the Gradio app
iface.launch()