Nymbo commited on
Commit
0824d60
1 Parent(s): 509cb06

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -110
app.py CHANGED
@@ -1,123 +1,99 @@
1
  import gradio as gr
2
- import requests
3
- import io
4
- import random
 
 
 
5
  import os
6
- import time
7
- from PIL import Image
8
- import json
9
  from threading import RLock
 
 
10
 
11
- # Project by Nymbo
12
 
13
- # Base API URL for Hugging Face inference
14
- API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
15
- # Retrieve the API token from environment variables
16
- API_TOKEN = os.getenv("HF_READ_TOKEN")
17
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
18
- # Timeout for requests
19
- timeout = 100
 
 
 
 
 
20
 
21
- lock = RLock()
22
 
23
- # Function to query the Hugging Face API for image generation
24
- def query(prompt, model, negative_prompt, steps, cfg_scale, sampler, seed, strength, width, height):
25
- # Debug log to indicate function start
26
- print("Starting query function...")
27
- # Print the parameters for debugging purposes
28
- print(f"Prompt: {prompt}")
29
- print(f"Model: {model}")
30
- print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
31
-
32
- # Check if the prompt is empty or None
33
- if prompt == "" or prompt is None:
34
- print("Prompt is empty or None. Exiting query function.") # Debug log
35
- return None
36
 
37
- # Randomly select an API token from available options to distribute the load
38
- API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")])
39
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
40
- print(f"Selected API token: {API_TOKEN}") # Debug log
41
-
42
- # Enhance the prompt with additional details for better quality
43
- prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
44
- print(f'Generation: {prompt}') # Debug log
45
-
46
- # Set the API URL based on the selected model
47
- if model == 'Stable Diffusion XL':
48
- API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
49
- # Add more model options as needed
50
- print(f"API URL set to: {API_URL}") # Debug log
51
-
52
- # Define the payload for the request
53
- payload = {
54
- "inputs": prompt,
55
- "negative_prompt": negative_prompt,
56
- "steps": steps, # Number of sampling steps
57
- "cfg_scale": cfg_scale, # Scale for controlling adherence to prompt
58
- "seed": seed if seed != -1 else random.randint(1, 1000000000), # Random seed for reproducibility
59
- "strength": strength, # How strongly the model should transform the image
60
- "parameters": {
61
- "width": width, # Width of the generated image
62
- "height": height # Height of the generated image
63
- }
64
- }
65
- print(f"Payload: {json.dumps(payload, indent=2)}") # Debug log
66
-
67
- # Make a request to the API to generate the image
68
- try:
69
- response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
70
- print(f"Response status code: {response.status_code}") # Debug log
71
- except requests.exceptions.RequestException as e:
72
- # Log any request exceptions and raise an error for the user
73
- print(f"Request failed: {e}") # Debug log
74
- raise gr.Error(f"Request failed: {e}")
75
-
76
- # Check if the response status is not successful
77
- if response.status_code != 200:
78
- print(f"Error: Failed to retrieve image. Response status: {response.status_code}") # Debug log
79
- print(f"Response content: {response.text}") # Debug log
80
- if response.status_code == 400:
81
- raise gr.Error(f"{response.status_code}: Bad Request - There might be an issue with the input parameters.")
82
- elif response.status_code == 401:
83
- raise gr.Error(f"{response.status_code}: Unauthorized - Please check your API token.")
84
- elif response.status_code == 403:
85
- raise gr.Error(f"{response.status_code}: Forbidden - You do not have permission to access this model.")
86
- elif response.status_code == 404:
87
- raise gr.Error(f"{response.status_code}: Not Found - The requested model could not be found.")
88
- elif response.status_code == 503:
89
- raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.")
90
- else:
91
- raise gr.Error(f"{response.status_code}: An unexpected error occurred.")
92
-
93
  try:
94
- # Attempt to read the image from the response content
95
- image_bytes = response.content
96
- image = Image.open(io.BytesIO(image_bytes))
97
- print(f'Generation completed! ({prompt})') # Debug log
 
 
 
 
 
 
 
98
  return image
99
- except Exception as e:
100
- # Handle any errors that occur when opening the image
101
- print(f"Error while trying to open image: {e}") # Debug log
102
- return None
103
-
104
- # Custom CSS to hide the footer in the interface
105
- css = """
106
- * {}
107
- footer {visibility: hidden !important;}
108
- """
109
 
110
- print("Initializing Gradio interface...") # Debug log
111
 
112
- # Define the Gradio interface
113
- with gr.Blocks(theme='Nymbo/Nymbo_Theme') as demo:
114
- # Tab for basic settings
115
- with gr.Tab('Basic Settings'):
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  txt_input = gr.Textbox(label='Your prompt:', lines=4)
117
- model = gr.Radio(label="Select a model", value="Stable Diffusion XL", choices=["Stable Diffusion XL", "Stable Diffusion 3", "FLUX.1 [Schnell]", "RealVisXL v4.0", "Duchaiten Real3D NSFW XL", "Tempest v0.1"], interactive=True)
118
- gen_button = gr.Button('Generate Image')
 
 
 
 
 
 
119
 
120
- # Tab for advanced settings
121
  with gr.Tab("Advanced Settings"):
122
  with gr.Row():
123
  # Textbox for specifying elements to exclude from the image
@@ -143,9 +119,34 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme') as demo:
143
  # Radio buttons for selecting the sampling method
144
  method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
145
 
146
- # Set up button click event to call the query function
147
- gen_button.click(query, inputs=[txt_input, model, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=gr.Image(type="pil", label="Generated Image"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
 
149
- print("Launching Gradio interface...") # Debug log
150
- # Launch the Gradio interface without showing the API or sharing externally
151
  demo.launch(show_api=False, max_threads=400)
 
1
  import gradio as gr
2
+ from random import randint
3
+ from all_models import models
4
+
5
+ from externalmod import gr_Interface_load, randomize_seed
6
+
7
+ import asyncio
8
  import os
 
 
 
9
  from threading import RLock
10
+ lock = RLock()
11
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
12
 
 
13
 
14
+ def load_fn(models):
15
+ global models_load
16
+ models_load = {}
17
+
18
+ for model in models:
19
+ if model not in models_load.keys():
20
+ try:
21
+ m = gr_Interface_load(f'models/{model}', hf_token=HF_TOKEN)
22
+ except Exception as error:
23
+ print(error)
24
+ m = gr.Interface(lambda: None, ['text'], ['image'])
25
+ models_load.update({model: m})
26
 
 
27
 
28
+ load_fn(models)
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+
31
+ num_models = 6
32
+
33
+ default_models = models[:num_models]
34
+ inference_timeout = 600
35
+ MAX_SEED=3999999999
36
+ starting_seed = randint(1941, 2024)
37
+
38
+ def extend_choices(choices):
39
+ return choices[:num_models] + (num_models - len(choices[:num_models])) * ['NA']
40
+
41
+
42
+ def update_imgbox(choices):
43
+ choices_plus = extend_choices(choices[:num_models])
44
+ return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
45
+
46
+ async def infer(model_str, prompt, seed=1, timeout=inference_timeout):
47
+ from pathlib import Path
48
+ kwargs = {}
49
+ noise = ""
50
+ kwargs["seed"] = seed
51
+ task = asyncio.create_task(asyncio.to_thread(models_load[model_str].fn,
52
+ prompt=f'{prompt} {noise}', **kwargs, token=HF_TOKEN))
53
+ await asyncio.sleep(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  try:
55
+ result = await asyncio.wait_for(task, timeout=timeout)
56
+ except (Exception, asyncio.TimeoutError) as e:
57
+ print(e)
58
+ print(f"Task timed out: {model_str}")
59
+ if not task.done(): task.cancel()
60
+ result = None
61
+ if task.done() and result is not None:
62
+ with lock:
63
+ png_path = "image.png"
64
+ result.save(png_path)
65
+ image = str(Path(png_path).resolve())
66
  return image
67
+ return None
 
 
 
 
 
 
 
 
 
68
 
 
69
 
70
+ def gen_fnseed(model_str, prompt, seed=1):
71
+ if model_str == 'NA':
72
+ return None
73
+ try:
74
+ loop = asyncio.new_event_loop()
75
+ result = loop.run_until_complete(infer(model_str, prompt, seed, inference_timeout))
76
+ except (Exception, asyncio.CancelledError) as e:
77
+ print(e)
78
+ print(f"Task aborted: {model_str}")
79
+ result = None
80
+ finally:
81
+ loop.close()
82
+ return result
83
+
84
+ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
85
+ gr.HTML("<h1>My Gradio Interface</h1>") # Add this line to include the HTML title
86
+ with gr.Tab('Compare-6'):
87
  txt_input = gr.Textbox(label='Your prompt:', lines=4)
88
+ gen_button = gr.Button('Generate up to 6 images in up to 3 minutes total')
89
+ with gr.Row():
90
+ seed = gr.Slider(label="Use a seed to replicate the same image later (maximum 3999999999)", minimum=0, maximum=MAX_SEED, step=1, value=starting_seed, scale=3)
91
+ seed_rand = gr.Button("Randomize Seed 🎲", size="sm", variant="secondary", scale=1)
92
+ seed_rand.click(randomize_seed, None, [seed], queue=False)
93
+ #stop_button = gr.Button('Stop', variant = 'secondary', interactive = False)
94
+
95
+ gen_button.click(lambda s: gr.update(interactive = True), None)
96
 
 
97
  with gr.Tab("Advanced Settings"):
98
  with gr.Row():
99
  # Textbox for specifying elements to exclude from the image
 
119
  # Radio buttons for selecting the sampling method
120
  method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
121
 
122
+ gr.HTML(
123
+ """
124
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
125
+ <div>
126
+ <body>
127
+ <div class="center"><p style="margin-bottom: 10px; color: #000000;">Scroll down to see more images and select models.</p>
128
+ </div>
129
+ </body>
130
+ </div>
131
+ </div>
132
+ """
133
+ )
134
+ with gr.Row():
135
+ output = [gr.Image(label = m, min_width=480) for m in default_models]
136
+ current_models = [gr.Textbox(m, visible = False) for m in default_models]
137
+
138
+ for m, o in zip(current_models, output):
139
+ gen_event = gr.on(triggers=[gen_button.click, txt_input.submit], fn=gen_fnseed,
140
+ inputs=[m, txt_input, seed], outputs=[o], concurrency_limit=None, queue=False)
141
+ #stop_button.click(lambda s: gr.update(interactive = False), None, stop_button, cancels = [gen_event])
142
+ with gr.Accordion('Model selection'):
143
+ model_choice = gr.CheckboxGroup(models, label = f'Choose up to {int(num_models)} different models from the {len(models)} available!', value=default_models, interactive=True)
144
+ #model_choice = gr.CheckboxGroup(models, label = f'Choose up to {num_models} different models from the 2 available! Untick them to only use one!', value = default_models, multiselect = True, max_choices = num_models, interactive = True, filterable = False)
145
+ model_choice.change(update_imgbox, model_choice, output)
146
+ model_choice.change(extend_choices, model_choice, current_models)
147
+ with gr.Row():
148
+ gr.HTML(
149
+ )
150
 
151
+ demo.queue(default_concurrency_limit=200, max_size=200)
 
152
  demo.launch(show_api=False, max_threads=400)