import spaces import os import gradio as gr import torch import numpy as np import torch from pytorch_lightning import seed_everything from model_lib.utils import parse_args from model_lib.modules import MoMA_main_modal os.environ["CUDA_VISIBLE_DEVICES"]="0" title = "MoMA" description = "This demo is running on Zero_GPU with 4bit quantization. Please find our project page at https://moma-adapter.github.io" device = torch.device('cuda') seed_everything(0) args = parse_args() model = MoMA_main_modal(args).to(device, dtype=torch.float16) @spaces.GPU(duration=200) def inference(rgb, subject, prompt, strength, seed): global model seed = int(seed) if seed else 0 seed = seed if not seed == 0 else np.random.randint(0,1000) generated_image = model.generate_images(rgb, subject, prompt, strength=strength, seed=seed) return generated_image gr.Interface( inference, [gr.Image(type="pil", label="Input RGB"), gr.Textbox(lines=1, label="subject"), gr.Textbox(lines=1, label="Prompt"), gr.Slider(minimum=0.2, maximum=1.2, step=0.1,label="Strength. Recommend: 1.0 for context editing; 0.4 for texture editing",value=1.0), gr.Textbox(lines=1, label="Seed. Use 0 for a random seed")], gr.Image(type="pil", label="Output"), title=title, description=description, examples=[["example_images/newImages/3.jpg",'car','A car in autumn with falling leaves.',1.0,"6"],["example_images/newImages/3.jpg",'car','A wooden sculpture of a car on a table.',0.4,"4"],["example_images/newImages/2.jpg",'car','A car on a city road with green trees and buildings.',1.0,"4"],["example_images/newImages/03.jpg",'cat','A cat at the Grand Canyon.',1.0,"2"],["example_images/newImages/02.jpg",'dog','A dog in a spring garden with flowers.',1.0,"6"],["example_images/newImages/1.jpeg",'bird','A bird in spring with flowers.',1.0,"1"],["example_images/newImages/17.jpg",'robot','A robot in autumn mountain and lake.',1,"5"]], allow_flagging='never' ).launch(debug=False)