Violette's picture
Violette HF staff
Update app.py
b1426aa
raw
history blame
9.71 kB
import os
import subprocess
from huggingface_hub import HfApi, upload_folder
import gradio as gr
import requests
from huggingface_hub import whoami, list_models
def error_str(error, title="Error"):
return f"""#### {title}
{error}""" if error else ""
def url_to_model_id(model_id_str):
return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1] if model_id_str.startswith("https://huggingface.co/") else model_id_str
def has_diffusion_model(model_id, token):
api = HfApi(token=token)
return any([f.endswith("diffusion_pytorch_model.bin") for f in api.list_repo_files(repo_id=model_id)])
def get_my_model_names(token):
try:
author = whoami(token=token)
model_infos = list_models(author=author["name"], use_auth_token=token)
model_names = []
for model_info in model_infos:
model_id = model_info.modelId
if has_diffusion_model(model_id, token):
model_names.append(model_id)
# if not model_names:
# return [], Exception("No diffusion models found in your account.")
return model_names, None
except Exception as e:
return [], e
def on_token_change(token):
if token:
model_names, error = get_my_model_names(token)
return gr.update(visible=not error), gr.update(choices=model_names, label="Select a model:"), error_str(error)
else:
return gr.update(visible=False), gr.update(choices=[], label="Select a model:"), None
def on_load_model(user_model_id, other_model_id, token):
if not user_model_id and not other_model_id:
return None, None, None, None, gr.update(value=error_str("Please enter a model ID."))
try:
model_id = url_to_model_id(other_model_id) if other_model_id else user_model_id
original_model_id = model_id
if not has_diffusion_model(model_id, token):
return None, None, None, None, gr.update(value=error_str("There are no diffusion weights in the model you selected."))
user = whoami(token=token)
model_id = user["name"] + "/" + model_id.split("/")[-1]
title = " ".join([w.capitalize() for w in model_id.split("/")[-1].replace("-", " ").replace("_", " ").split(" ")])
description = f"""Demo for <a href="https://huggingface.co/{original_model_id}">{title}</a> Stable Diffusion model."""
return gr.update(visible=True), gr.update(value=model_id), gr.update(value=title), gr.update(value=description), None
except Exception as e:
return None, None, None, None, gr.update(value=error_str(e))
def create_and_push(space_type, hardware, private_space, other_model_name, radio_model_names, model_id, title, description, prefix, update, token):
try:
# 1. Create the new space
api = HfApi(token=token)
repo_url = api.create_repo(
repo_id=model_id,
exist_ok=update,
repo_type="space",
space_sdk="gradio",
private=private_space
)
api_url = f'https://huggingface.co/api/spaces/{model_id}'
headers = { "Authorization" : f"Bearer {token}"}
# add HUGGING_FACE_HUB_TOKEN secret to new space
requests.post(f'{api_url}/secrets', json={"key":"HUGGING_FACE_HUB_TOKEN","value":token}, headers=headers)
# set new Space Hardware flavor
requests.post(f'{api_url}/hardware', json={'flavor': hardware}, headers=headers)
# 2. Replace the name, title, and description in the template
with open("template/app_simple.py" if space_type == "Simple" else "template/app_advanced.py", "r") as f:
app = f.read()
app = app.replace("$model_id", url_to_model_id(other_model_name) if other_model_name else radio_model_names)
app = app.replace("$title", title)
app = app.replace("$description", description)
app = app.replace("$prefix", prefix)
app = app.replace("$space_id", whoami(token=token)["name"] + "/" + model_id.split("/")[-1])
# 3. save the new app.py file
with open("app.py", "w") as f:
f.write(app)
# 4. Upload the new app.py to the space
api.upload_file(
path_or_fileobj="app.py",
path_in_repo="app.py",
repo_id=model_id,
token=token,
repo_type="space",
)
# 5. Upload template/requirements.txt to the space
if space_type == "Advanced":
api.upload_file(
path_or_fileobj="template/requirements.txt",
path_in_repo="requirements.txt",
repo_id=model_id,
token=token,
repo_type="space",
)
# 5. Delete the app.py file
os.remove("app.py")
return f"""Successfully created space at: <a href="{repo_url}" target="_blank">{repo_url}</a>"""
except Exception as e:
return error_str(e)
DESCRIPTION = """### Create a gradio space for your Diffusers🧨 model
With this space, you can easily create a gradio demo for your Diffusers model and share it with the community.
1️⃣ Make sure you have created your hugging face account
2️⃣ Generate a token here with write access
3️⃣ Choose a stable diffusion base model, there are thousands of them here
4️⃣ Choose Space type
5️⃣ Choose the new Space Hardware
It is done.
"""
with gr.Blocks() as demo:
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column(scale=11):
with gr.Column():
gr.Markdown("#### 1. Choose a model")
input_token = gr.Textbox(
max_lines=1,
type="password",
label="Enter your Hugging Face token",
placeholder="WRITE permission is required!",
)
gr.Markdown("You can get a token [here](https://huggingface.co/settings/tokens)")
with gr.Group(visible=False) as group_model:
radio_model_names = gr.Radio(label="Your models:")
other_model_name = gr.Textbox(label="Other model:", placeholder="URL or model id, e.g. username/model_name")
btn_load = gr.Button(value="Load model")
with gr.Column(scale=10):
with gr.Column(visible=False) as group_create:
gr.Markdown("#### 2. Enter details and create the space")
name = gr.Textbox(label="Name", placeholder="e.g. diffusers-demo")
title = gr.Textbox(label="Title", placeholder="e.g. Diffusers Demo")
description = gr.Textbox(label="Description", placeholder="e.g. Demo for my awesome Diffusers model", lines=5)
prefix = gr.Textbox(label="Prefix tokens", placeholder="Tokens that are required to be present in the prompt, e.g. `rick and morty style`")
gr.Markdown("""#### Choose space type
- **Simple** - Runs on GPU using Hugging Face inference API, but you cannot control image generation parameters.
- **Advanced** - Runs on CPU by default, with the option to upgrade to GPU. You can control image generation parameters: guidance, number of steps, image size, etc. Also supports **image-to-image** generation.""")
space_type =gr.Radio(label="Space type", choices=["Simple", "Advanced"], value="Simple")
update = gr.Checkbox(label="Update the space if it already exists?")
private_space = gr.Checkbox(label="Private Space")
gr.Markdown("Choose the new Space Hardware <small>[check pricing page](https://huggingface.co/pricing#spaces), you need payment method to upgrade your Space hardware</small>")
hardware = gr.Dropdown(["cpu-basic","cpu-upgrade","t4-small","t4-medium","a10g-small","a10g-large"],value = "cpu-basic", label="Space Hardware")
brn_create = gr.Button("Create the space")
error_output = gr.Markdown(label="Output")
input_token.change(
fn=on_token_change,
inputs=input_token,
outputs=[group_model, radio_model_names, error_output],
queue=False,
scroll_to_output=True)
btn_load.click(
fn=on_load_model,
inputs=[radio_model_names, other_model_name, input_token],
outputs=[group_create, name, title, description, error_output],
queue=False,
scroll_to_output=True)
brn_create.click(
fn=create_and_push,
inputs=[space_type, hardware, private_space, other_model_name, radio_model_names, name, title, description, prefix, update, input_token],
outputs=[error_output],
scroll_to_output=True
)
# gr.Markdown("""<img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/imgs/diffusers_library.jpg" width="150"/>""")
gr.HTML("""
<div style="border-top: 1px solid #303030;">
<br>
<p>Space by: <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a></p><br>
<a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
<p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.sd-space-creator" alt="visitors"></p>
</div>
""")
demo.queue()
demo.launch(debug=True)