Spaces:
Paused
Paused
import random | |
import io | |
import zipfile | |
import requests | |
import json | |
import base64 | |
import math | |
import gradio as gr | |
from PIL import Image | |
jwt_token = '' | |
url = "https://image.novelai.net/ai/generate-image" | |
headers = {} | |
def set_token(token): | |
global jwt_token, headers | |
if jwt_token == token: | |
return | |
jwt_token = token | |
headers = { | |
"Authorization": f"Bearer {jwt_token}", | |
"Content-Type": "application/json", | |
"Origin": "https://novelai.net", | |
"Referer": "https://novelai.net/" | |
} | |
def get_remain_anlas(): | |
try: | |
data = requests.get("https://api.novelai.net/user/data", headers=headers).content | |
anlas = json.loads(data)['subscription']['trainingStepsLeft'] | |
return anlas['fixedTrainingStepsLeft'] + anlas['purchasedTrainingSteps'] | |
except: | |
return '获取失败,err:' + str(data) | |
def calculate_cost(width, height, steps=28, sm=False, dyn=False, strength=1, rmbg=False): | |
pixels = width * height | |
if pixels <= 1048576 and steps <= 28 and not rmbg: | |
return 0 | |
dyn = sm and dyn | |
L = math.ceil(2951823174884865e-21 * pixels + 5.753298233447344e-7 * pixels * steps) | |
L *= 1.4 if dyn else (1.2 if sm else 1) | |
L = math.ceil(L * strength) | |
return L * 3 + 5 if rmbg else L | |
def generate_novelai_image( | |
input_text="", | |
negative_prompt="", | |
seed=-1, | |
scale=5.0, | |
width=1024, | |
height=1024, | |
steps=28, | |
sampler="k_euler", | |
schedule='native', | |
smea=False, | |
dyn=False, | |
dyn_threshold=False, | |
cfg_rescale=0, | |
ref_images=None, | |
info_extracts=[], | |
ref_strs=[], | |
i2i_image=None, | |
i2i_str=0.7, | |
i2i_noise=0, | |
overlay=True, | |
inp_img=None, | |
selection='i2i' | |
): | |
# Assign a random seed if seed is -1 | |
if seed == -1: | |
seed = random.randint(0, 2**32 - 1) | |
# Define the payload | |
payload = { | |
"action": "generate", | |
"input": input_text, | |
"model": "nai-diffusion-3", | |
"parameters": { | |
"width": width, | |
"height": height, | |
"scale": scale, | |
"sampler": sampler, | |
"steps": steps, | |
"n_samples": 1, | |
"ucPreset": 0, | |
"add_original_image": True, | |
"cfg_rescale": cfg_rescale, | |
"controlnet_strength": 1, | |
"dynamic_thresholding": dyn_threshold, | |
"params_version": 1, | |
"legacy": False, | |
"legacy_v3_extend": False, | |
"negative_prompt": negative_prompt, | |
"noise": i2i_noise, | |
"noise_schedule": schedule, | |
"qualityToggle": True, | |
"reference_information_extracted_multiple": info_extracts, | |
"reference_strength_multiple": ref_strs, | |
"seed": seed, | |
"sm": smea, | |
"sm_dyn": dyn, | |
"uncond_scale": 1, | |
"add_original_image": overlay | |
} | |
} | |
if ref_images is not None: | |
payload['parameters']['reference_image_multiple'] = [image2base64(image[0]) for image in ref_images] | |
if selection == 'inp' and inp_img['background'].getextrema()[3][1] > 0: | |
payload['action'] = "infill" | |
payload['model'] = 'nai-diffusion-3-inpainting' | |
payload['parameters']['mask'] = image2base64(inp_img['layers'][0]) | |
payload['parameters']['image'] = image2base64(inp_img['background']) | |
payload['parameters']['extra_noise_seed'] = seed | |
if i2i_image is not None and selection == 'i2i': | |
payload['action'] = "img2img" | |
payload['parameters']['image'] = image2base64(i2i_image) | |
payload['parameters']['strength'] = i2i_str | |
payload['parameters']['extra_noise_seed'] = seed | |
# Send the POST request | |
try: | |
response = requests.post(url, json=payload, headers=headers, timeout=180) | |
except: | |
raise gr.Error('NAI response timeout') | |
# Process the response | |
if response.headers.get('Content-Type') == 'binary/octet-stream': | |
zipfile_in_memory = io.BytesIO(response.content) | |
with zipfile.ZipFile(zipfile_in_memory, 'r') as zip_ref: | |
file_names = zip_ref.namelist() | |
if file_names: | |
with zip_ref.open(file_names[0]) as file: | |
return file.read(), payload | |
else: | |
messages = json.loads(response.content) | |
raise gr.Error(str(messages["statusCode"]) + ": " + messages["message"]) | |
else: | |
messages = json.loads(response.content) | |
raise gr.Error(str(messages["statusCode"]) + ": " + messages["message"]) | |
def image_from_bytes(data): | |
img_file = io.BytesIO(data) | |
img_file.seek(0) | |
return Image.open(img_file) | |
def image2base64(img): | |
output_buffer = io.BytesIO() | |
img.save(output_buffer, format='PNG' if img.mode=='RGBA' else 'JPEG') | |
byte_data = output_buffer.getvalue() | |
base64_str = base64.b64encode(byte_data).decode() | |
return base64_str | |
def augment_image(image, width, height, req_type, selection, factor=1, defry=0, prompt=''): | |
if selection == "scale": | |
width = int(width * factor) | |
height = int(height * factor) | |
image = image.resize((width, height)) | |
req_type = {"移除背景": "bg-removal", "素描": "sketch", "线稿": "lineart", "上色": "colorize", "更改表情": "emotion", "去聊天框": "declutter"}[req_type] | |
base64img = image2base64(image) | |
payload = {"image": base64img, "width": width, "height": height, "req_type": req_type} | |
if req_type == "colorize" or req_type == "emotion": | |
payload["defry"] = defry | |
payload["prompt"] = prompt | |
try: | |
response = requests.post("https://image.novelai.net/ai/augment-image", json=payload, headers=headers, timeout=60) | |
except: | |
raise gr.Error('NAI response timeout') | |
# Process the response | |
if response.headers.get('Content-Type') == 'binary/octet-stream': | |
zipfile_in_memory = io.BytesIO(response.content) | |
with zipfile.ZipFile(zipfile_in_memory, 'r') as zip_ref: | |
if len(zip_ref.namelist()): | |
images = [] | |
for file_name in zip_ref.namelist(): | |
with zip_ref.open(file_name) as file: | |
images.append(image_from_bytes(file.read())) | |
return images | |
else: | |
messages = json.loads(response.content) | |
raise gr.Error(str(messages["statusCode"]) + ": " + messages["message"]) | |
else: | |
messages = json.loads(response.content) | |
raise gr.Error(str(messages["statusCode"]) + ": " + messages["message"]) |