RNPD / scripts /mainrunpodA1111.py
stlaurentjr's picture
очистить после установки animatediff reactor
41211e8
raw
history blame
33.2 kB
import os
from IPython.display import clear_output
from subprocess import call, getoutput, Popen, run
import time
import ipywidgets as widgets
import requests
import sys
import fileinput
from torch.hub import download_url_to_file
from urllib.parse import urlparse, parse_qs, unquote
import re
import six
import gdown
from urllib.request import urlopen, Request
import tempfile
from tqdm import tqdm
from bs4 import BeautifulSoup
import zipfile
def Deps(force_reinstall):
if not force_reinstall and os.path.exists(
"/usr/local/lib/python3.10/dist-packages/safetensors"
):
ntbks()
print("[1;32mModules and notebooks updated, dependencies already installed")
os.environ["TORCH_HOME"] = "/workspace/cache/torch"
os.environ["PYTHONWARNINGS"] = "ignore"
else:
call(
"pip install --root-user-action=ignore --disable-pip-version-check --no-deps -qq gdown PyWavelets numpy==1.23.5 accelerate==0.12.0 --force-reinstall",
shell=True,
stdout=open("/dev/null", "w"),
)
ntbks()
if os.path.exists("deps"):
call("rm -r deps", shell=True)
if os.path.exists("diffusers"):
call("rm -r diffusers", shell=True)
call("mkdir deps", shell=True)
if not os.path.exists("cache"):
call("mkdir cache", shell=True)
os.chdir("deps")
dwn(
"https://huggingface.co/TheLastBen/dependencies/resolve/main/rnpddeps-t2.tar.zst",
"/workspace/deps/rnpddeps-t2.tar.zst",
"Installing dependencies",
)
call(
"tar -C / --zstd -xf rnpddeps-t2.tar.zst",
shell=True,
stdout=open("/dev/null", "w"),
)
call(
"sed -i 's@~/.cache@/workspace/cache@' /usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py",
shell=True,
)
os.chdir("/workspace")
call(
"git clone --depth 1 -q --branch main https://github.com/TheLastBen/diffusers",
shell=True,
stdout=open("/dev/null", "w"),
)
call(
"pip install --root-user-action=ignore --disable-pip-version-check -qq gradio==3.41.2",
shell=True,
stdout=open("/dev/null", "w"),
)
call("rm -r deps", shell=True)
os.chdir("/workspace")
os.environ["TORCH_HOME"] = "/workspace/cache/torch"
os.environ["PYTHONWARNINGS"] = "ignore"
call(
"sed -i 's@text = _formatwarnmsg(msg)@text =\"\"@g' /usr/lib/python3.10/warnings.py",
shell=True,
)
clear_output()
done()
def dwn(url, dst, msg):
file_size = None
req = Request(url, headers={"User-Agent": "torch.hub"})
u = urlopen(req)
meta = u.info()
if hasattr(meta, "getheaders"):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
with tqdm(
total=file_size,
disable=False,
mininterval=0.5,
bar_format=msg + " |{bar:20}| {percentage:3.0f}%",
) as pbar:
with open(dst, "wb") as f:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
pbar.update(len(buffer))
f.close()
def ntbks():
os.chdir("/workspace")
if not os.path.exists("Latest_Notebooks"):
call("mkdir Latest_Notebooks", shell=True)
else:
call("rm -r Latest_Notebooks", shell=True)
call("mkdir Latest_Notebooks", shell=True)
os.chdir("/workspace/Latest_Notebooks")
call(
"wget -q -i https://huggingface.co/datasets/TheLastBen/RNPD/raw/main/Notebooks.txt",
shell=True,
)
call("rm Notebooks.txt", shell=True)
os.chdir("/workspace")
def repo(Huggingface_token_optional):
from slugify import slugify
from huggingface_hub import HfApi, CommitOperationAdd, create_repo
os.chdir("/workspace")
if Huggingface_token_optional != "":
username = HfApi().whoami(Huggingface_token_optional)["name"]
backup = f"https://huggingface.co/datasets/{username}/fast-stable-diffusion/resolve/main/sd_backup_rnpd.tar.zst"
headers = {"Authorization": f"Bearer {Huggingface_token_optional}"}
response = requests.head(backup, headers=headers)
if response.status_code == 302:
print("[1;33mRestoring the SD folder...")
open("/workspace/sd_backup_rnpd.tar.zst", "wb").write(
requests.get(backup, headers=headers).content
)
call("tar --zstd -xf sd_backup_rnpd.tar.zst", shell=True)
call("rm sd_backup_rnpd.tar.zst", shell=True)
else:
print("[1;33mBackup not found, using a fresh/existing repo...")
time.sleep(2)
if not os.path.exists("/workspace/sd/stablediffusiond"): # reset later
call(
"wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst",
shell=True,
)
call("tar --zstd -xf sd_mrep.tar.zst", shell=True)
call("rm sd_mrep.tar.zst", shell=True)
os.chdir("/workspace/sd")
if not os.path.exists("stable-diffusion-webui"):
call(
"git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui",
shell=True,
)
else:
print("[1;33mInstalling/Updating the repo...")
os.chdir("/workspace")
if not os.path.exists("/workspace/sd/stablediffusiond"): # reset later
call(
"wget -q -O sd_mrep.tar.zst https://huggingface.co/TheLastBen/dependencies/resolve/main/sd_mrep.tar.zst",
shell=True,
)
call("tar --zstd -xf sd_mrep.tar.zst", shell=True)
call("rm sd_mrep.tar.zst", shell=True)
os.chdir("/workspace/sd")
if not os.path.exists("stable-diffusion-webui"):
call(
"git clone -q --depth 1 --branch master https://github.com/AUTOMATIC1111/stable-diffusion-webui",
shell=True,
)
os.chdir("/workspace/sd/stable-diffusion-webui/")
call("git reset --hard", shell=True)
print("[1;32m")
call("git pull", shell=True)
os.chdir("/workspace")
clear_output()
done()
def mdl(Original_Model_Version, Path_to_MODEL, MODEL_LINK):
import gdown
src = getsrc(MODEL_LINK)
if not os.path.exists("/workspace/sd/stable-diffusion-webui/models/VAE"):
call("mkdir -p /workspace/sd/stable-diffusion-webui/models/VAE", shell=True)
# Получаем список файлов в директории /workspace/auto-VAE
files = os.listdir("/workspace/auto-VAE")
for file in files:
source_path = os.path.join("/workspace/auto-VAE", file)
target_path = os.path.join(
"/workspace/sd/stable-diffusion-webui/models/VAE", file
)
# Проверяем, существует ли уже символическая ссылка
if not os.path.exists(target_path):
call(f"ln -s {source_path} {target_path}", shell=True)
# if os.path.exists('/workspace/sd/stable-diffusion-webui/models/VAE'):
# call('ln -s /workspace/auto-VAE/* /workspace/sd/stable-diffusion-webui/models/VAE', shell=True)
# else:
# call('mkdir -p /workspace/sd/stable-diffusion-webui/models/VAE', shell=True)
# call('ln -s /workspace/auto-VAE/* /workspace/sd/stable-diffusion-webui/models/VAE', shell=True)
if not os.path.exists(
"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/SDv1-5.ckpt"
):
call(
"ln -s /workspace/auto-models/* /workspace/sd/stable-diffusion-webui/models/Stable-diffusion",
shell=True,
)
if not os.path.exists("/workspace/sd/stable-diffusion-webui/models/Lora"):
call("mkdir -p /workspace/sd/stable-diffusion-webui/models/Lora", shell=True)
call(
"ln -s /workspace/auto-lora/* /workspace/sd/stable-diffusion-webui/models/Lora",
shell=True,
)
if Path_to_MODEL != "":
if os.path.exists(str(Path_to_MODEL)):
print("[1;32mUsing the custom model")
model = Path_to_MODEL
else:
print("[1;31mWrong path, check that the path to the model is correct")
elif MODEL_LINK != "":
if src == "civitai":
modelname = get_name(MODEL_LINK, False)
model = f"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}"
if not os.path.exists(model):
dwn(MODEL_LINK, model, "Downloading the custom model")
clear_output()
else:
print("[1;33mModel already exists")
elif src == "gdrive":
modelname = get_name(MODEL_LINK, True)
model = f"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}"
if not os.path.exists(model):
gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
clear_output()
else:
print("[1;33mModel already exists")
else:
modelname = os.path.basename(MODEL_LINK)
model = f"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}"
if not os.path.exists(model):
gdown.download(url=MODEL_LINK, output=model, quiet=False, fuzzy=True)
clear_output()
else:
print("[1;33mModel already exists")
if os.path.exists(model) and os.path.getsize(model) > 1810671599:
print("[1;32mModel downloaded, using the custom model.")
else:
call(
"rm " + model,
shell=True,
stdout=open("/dev/null", "w"),
stderr=open("/dev/null", "w"),
)
print("[1;31mWrong link, check that the link is valid")
else:
if Original_Model_Version == "v1.5":
model = "/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/SDv1-5.ckpt"
print("[1;32mUsing the original V1.5 model")
elif Original_Model_Version == "v2-512":
model = "/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/SDv2-512.ckpt"
if not os.path.exists(
"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/SDv2-512.ckpt"
):
print("[1;33mDownloading the V2-512 model...")
call(
"gdown -O "
+ model
+ " https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-nonema-pruned.ckpt",
shell=True,
)
clear_output()
print("[1;32mUsing the original V2-512 model")
elif Original_Model_Version == "v2-768":
model = "/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/SDv2-768.ckpt"
print("[1;32mUsing the original V2-768 model")
elif Original_Model_Version == "SDXL":
model = "/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/sd_xl_base_1.0.safetensors"
print("[1;32mUsing the original SDXL model")
else:
model = "/workspace/sd/stable-diffusion-webui/models/Stable-diffusion"
print("[1;31mWrong model version, try again")
try:
model
except:
model = "/workspace/sd/stable-diffusion-webui/models/Stable-diffusion"
return model
def modeldwn(model_LINK):
if model_LINK == "":
print("[1;33mNothing to do")
else:
os.makedirs(
"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion",
exist_ok=True,
)
src = getsrc(model_LINK)
if src == "civitai":
modelname = get_name(model_LINK, False)
loramodel = f"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}"
if not os.path.exists(loramodel):
dwn(model_LINK, loramodel, "Downloading the LoRA model")
clear_output()
else:
print("[1;33mModel already exists")
elif src == "gdrive":
modelname = get_true_name(model_LINK)
loramodel = f"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}"
if not os.path.exists(loramodel):
gdown.download(
url=model_LINK.replace("/file/d/", "/uc?id=").replace("/view", ""),
output=loramodel,
quiet=False,
)
clear_output()
else:
print("[1;33mModel already exists")
else:
modelname = os.path.basename(model_LINK)
loramodel = f"/workspace/sd/stable-diffusion-webui/models/Stable-diffusion/{modelname}"
if not os.path.exists(loramodel):
gdown.download(
url=model_LINK, output=loramodel, quiet=False, fuzzy=True
)
clear_output()
else:
print("[1;33mModel already exists")
if os.path.exists(loramodel):
print("[1;32mCheckpoints downloaded")
else:
print("[1;31mWrong link, check that the link is valid")
def loradwn(LoRA_LINK):
if LoRA_LINK == "":
print("[1;33mNothing to do")
else:
os.makedirs("/workspace/sd/stable-diffusion-webui/models/Lora", exist_ok=True)
src = getsrc(LoRA_LINK)
if src == "civitai":
modelname = get_name(LoRA_LINK, False)
loramodel = f"/workspace/sd/stable-diffusion-webui/models/Lora/{modelname}"
if not os.path.exists(loramodel):
dwn(LoRA_LINK, loramodel, "Downloading the LoRA model")
clear_output()
else:
print("[1;33mModel already exists")
elif src == "gdrive":
modelname = get_true_name(LoRA_LINK)
loramodel = f"/workspace/sd/stable-diffusion-webui/models/Lora/{modelname}"
if not os.path.exists(loramodel):
gdown.download(
url=LoRA_LINK.replace("/file/d/", "/uc?id=").replace("/view", ""),
output=loramodel,
quiet=False,
)
clear_output()
else:
print("[1;33mModel already exists")
else:
modelname = os.path.basename(LoRA_LINK)
loramodel = f"/workspace/sd/stable-diffusion-webui/models/Lora/{modelname}"
if not os.path.exists(loramodel):
gdown.download(url=LoRA_LINK, output=loramodel, quiet=False, fuzzy=True)
clear_output()
else:
print("[1;33mModel already exists")
if os.path.exists(loramodel):
print("[1;32mLoRA downloaded")
else:
print("[1;31mWrong link, check that the link is valid")
def download_and_install_config():
target_directory = "/workspace/sd/stable-diffusion-webui/"
config_url = (
"https://huggingface.co/spaces/stlaurentjr/RNPD/raw/main/config/config.json"
)
config_file_path = os.path.join(target_directory, "config.json")
# Check if the config file already exists
if not os.path.exists(config_file_path):
# Change the directory
os.chdir(target_directory)
# Download the file using curl command
call(f"curl -o config.json {config_url}", shell=True)
print(f"Config file downloaded successfully.")
else:
print("Config file already exists, download skipped.")
def InstallDeforum():
# Переход в директорию расширений
os.chdir("/workspace/sd/stable-diffusion-webui/extensions")
# Проверка наличия директории с расширением
if not os.path.exists("deforum-for-automatic1111-webui"):
# Клонирование репозитория, если он не существует
call(
"git clone https://github.com/deforum-art/sd-webui-deforum.git", shell=True
)
# Возврат в корневую директорию
os.chdir("/workspace")
else:
# Если директория существует, переходим в нее
# os.chdir('deforum-for-automatic1111-webui')
# Сброс изменений и обновление репозитория
# call('git reset --hard', shell=True, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
# call('git pull', shell=True, stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
# Возврат в корневую директорию
os.chdir("/workspace")
def InstallReActor():
# Переход в директорию расширений
os.chdir("/workspace/sd/stable-diffusion-webui/extensions")
# Проверка наличия директории с расширением
if not os.path.exists("sd-webui-reactor"):
# Установка библиотек
from subprocess import call, DEVNULL
call(
'pip install "onnxruntime-gpu>=1.16.1"',
shell=True
)
# Клонирование репозитория, если он не существует
call(
"git clone https://github.com/Gourieff/sd-webui-reactor.git",
shell=True,
)
# Создание папки под модели
call(
"mkdir /workspace/sd/stable-diffusion-webui/models/insightface",
shell=True
)
os.chdir("/workspace/sd/stable-diffusion-webui/models/insightface")
# Скачивание модели для reActor
call(
"wget https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
# Вовзрат в корневую директорию
clear_output()
os.chdir("/workspace")
else:
#Возврат в корневую директорию
os.chdir("/workspace")
def InstallAnimateDiff():
# Переход в директорию расширений
os.chdir("/workspace/sd/stable-diffusion-webui/extensions")
# Проверка наличия директории с расширением
if not os.path.exists("sd-webui-animatediff"):
from subprocess import call, DEVNULL
# Клонирование репозитория, если он не существует
call(
"git clone https://github.com/continue-revolution/sd-webui-animatediff.git",
shell=True,
)
# Переход в папку с моделями
os.chdir(
"/workspace/sd/stable-diffusion-webui/extensions/sd-webui-animatediff/model"
)
# Скачивание моделей для animatediff
call(
"wget https://huggingface.co/guoyww/animatediff/resolve/refs%2Fpr%2F3/mm_sd_v15_v2.ckpt",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
# call(
# "wget https://huggingface.co/manshoety/AD_Stabilized_Motion/resolve/main/mm-Stabilized_high.pth",
# shell=True,
# stdout=DEVNULL,
# stderr=DEVNULL
# )
# Вовзрат в корневую директорию
clear_output()
os.chdir("/workspace")
else:
# Возврат в корневую директорию
os.chdir("/workspace")
def InstallWildCards():
# Путь к целевой директории
target_dir = "/workspace/sd/stable-diffusion-webui/extensions/stable-diffusion-webui-wildcards/wildcards/"
# Перемещаемся в директорию расширений
os.chdir("/workspace/sd/stable-diffusion-webui/extensions")
if not os.path.exists("stable-diffusion-webui-wildcards"):
# Клонирование репозитория, если он ещё не существует
call(
"git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui-wildcards.git",
shell=True,
)
# Перемещаемся в клонированный репозиторий
os.chdir("stable-diffusion-webui-wildcards")
# Скачиваем файл по ID
gdown.download(
id="1sY9Yv29cCYZuxBvszkmLVgw--aRdwT1P", output="wildcards.zip", quiet=False
)
# Создаем целевую директорию, если она ещё не существует
os.makedirs(target_dir, exist_ok=True)
# Распаковываем архив
with zipfile.ZipFile("wildcards.zip", "r") as zip_ref:
zip_ref.extractall(target_dir)
else:
# Если репозиторий уже существует, обновляем его
os.chdir("stable-diffusion-webui-wildcards")
call(
"git reset --hard",
shell=True,
stdout=open(os.devnull, "w"),
stderr=open(os.devnull, "w"),
)
call(
"git pull",
shell=True,
stdout=open(os.devnull, "w"),
stderr=open(os.devnull, "w"),
)
# Возвращаемся в исходную рабочую директорию
os.chdir("/workspace")
def CNet(ControlNet_Model, ControlNet_XL_Model):
def download(url, model_dir):
try:
filename = os.path.basename(urlparse(url).path)
pth = os.path.abspath(os.path.join(model_dir, filename))
if not os.path.exists(pth):
print("Downloading: " + os.path.basename(url))
download_url_to_file(url, pth, hash_prefix=None, progress=True)
else:
print(f"[1;32mThe model {filename} already exists[0m")
except Exception as e:
print(f"Ошибка при скачивании {url}: {e}")
wrngv1 = False
os.chdir("/workspace/sd/stable-diffusion-webui/extensions")
if not os.path.exists("sd-webui-controlnet"):
call(
"git clone https://github.com/Mikubill/sd-webui-controlnet.git", shell=True
)
os.chdir("/workspace")
else:
os.chdir("sd-webui-controlnet")
call(
"git reset --hard",
shell=True,
stdout=open("/dev/null", "w"),
stderr=open("/dev/null", "w"),
)
call(
"git pull",
shell=True,
stdout=open("/dev/null", "w"),
stderr=open("/dev/null", "w"),
)
os.chdir("/workspace")
mdldir = (
"/workspace/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/models"
)
call(
"ln -s /workspace/auto-controlnet-models/* /workspace/sd/stable-diffusion-webui/extensions/sd-webui-controlnet/models",
shell=True,
)
for filename in os.listdir(mdldir):
if "_sd14v1" in filename:
renamed = re.sub("_sd14v1", "-fp16", filename)
os.rename(os.path.join(mdldir, filename), os.path.join(mdldir, renamed))
call(
"wget -q -O CN_models.txt https://huggingface.co/spaces/stlaurentjr/RNPD/raw/main/config/CN_models.txt",
shell=True,
)
call(
"wget -q -O CN_models_XL.txt https://github.com/TheLastBen/fast-stable-diffusion/raw/main/AUTOMATIC1111_files/CN_models_XL.txt",
shell=True,
)
with open("CN_models.txt", "r") as f:
mdllnk = f.read().splitlines()
with open("CN_models_XL.txt", "r") as d:
mdllnk_XL = d.read().splitlines()
call("rm CN_models.txt CN_models_XL.txt", shell=True)
os.chdir("/workspace")
if ControlNet_Model == "All" or ControlNet_Model == "all":
for lnk in mdllnk:
download(lnk, mdldir)
clear_output()
elif ControlNet_Model == "17":
mdllnk = list(filter(lambda x: "t2i" in x, mdllnk))
for lnk in mdllnk:
download(lnk, mdldir)
clear_output()
elif (
ControlNet_Model.isdigit()
and int(ControlNet_Model) - 1 < 16
and int(ControlNet_Model) > 0
):
download(mdllnk[int(ControlNet_Model) - 1], mdldir)
clear_output()
elif ControlNet_Model == "none":
pass
clear_output()
else:
print("[1;31mWrong ControlNet V1 choice, try again")
wrngv1 = True
if ControlNet_XL_Model == "All" or ControlNet_XL_Model == "all":
for lnk_XL in mdllnk_XL:
download(lnk_XL, mdldir)
if not wrngv1:
clear_output()
done()
elif ControlNet_XL_Model.isdigit() and int(ControlNet_XL_Model) - 1 < 5:
download(mdllnk_XL[int(ControlNet_XL_Model) - 1], mdldir)
if not wrngv1:
clear_output()
done()
elif ControlNet_XL_Model == "none":
pass
if not wrngv1:
clear_output()
done()
else:
print("[1;31mWrong ControlNet XL choice, try again")
def sd(User, Password, model):
import gradio
gradio.close_all()
auth = f"--gradio-auth {User}:{Password}"
if User == "" or Password == "":
auth = ""
call(
"wget -q -O /usr/local/lib/python3.10/dist-packages/gradio/blocks.py https://raw.githubusercontent.com/TheLastBen/fast-stable-diffusion/main/AUTOMATIC1111_files/blocks.py",
shell=True,
)
os.chdir("/workspace/sd/stable-diffusion-webui/modules")
call(
"sed -i 's@possible_sd_paths =.*@possible_sd_paths = [\"/workspace/sd/stablediffusion\"]@' /workspace/sd/stable-diffusion-webui/modules/paths.py",
shell=True,
)
call(
"sed -i 's@\.\.\/@src/@g' /workspace/sd/stable-diffusion-webui/modules/paths.py",
shell=True,
)
call(
"sed -i 's@src\/generative-models@generative-models@g' /workspace/sd/stable-diffusion-webui/modules/paths.py",
shell=True,
)
call(
'sed -i \'s@\["sd_model_checkpoint"\]@\["sd_model_checkpoint", "sd_vae", "CLIP_stop_at_last_layers", "inpainting_mask_weight", "initial_noise_multiplier"\]@g\' /workspace/sd/stable-diffusion-webui/modules/shared.py',
shell=True,
)
call(
'sed -i \'s@\["sd_model_checkpoint"\]@\["sd_model_checkpoint", "sd_vae", "CLIP_stop_at_last_layers", "inpainting_mask_weight", "initial_noise_multiplier"\]@g\' /workspace/sd/stable-diffusion-webui/modules/shared.py',
shell=True,
)
call(
'sed -i \'s/"CLIP_stop_at_last_layers": 1/"CLIP_stop_at_last_layers": 2/g\' /workspace/sd/stable-diffusion-webui/config.json',
shell=True,
)
call(
"sed -i 's@print(\"No module.*@@' /workspace/sd/stablediffusion/ldm/modules/diffusionmodules/model.py",
shell=True,
)
os.chdir("/workspace/sd/stable-diffusion-webui")
clear_output()
podid = os.environ.get("RUNPOD_POD_ID")
localurl = f"{podid}-3001.proxy.runpod.net"
for line in fileinput.input(
"/usr/local/lib/python3.10/dist-packages/gradio/blocks.py", inplace=True
):
if line.strip().startswith("self.server_name ="):
line = f' self.server_name = "{localurl}"\n'
if line.strip().startswith('self.protocol = "https"'):
line = ' self.protocol = "https"\n'
if line.strip().startswith(
'if self.local_url.startswith("https") or self.is_colab'
):
line = ""
if line.strip().startswith('else "http"'):
line = ""
sys.stdout.write(line)
if model == "":
mdlpth = ""
else:
if os.path.isfile(model):
mdlpth = "--ckpt " + model
else:
mdlpth = "--ckpt-dir " + model
vae_path = "--vae-path /workspace/sd/stable-diffusion-webui/models/VAE/vae-ft-mse-840000-ema-pruned.safetensors"
configf = (
"--disable-console-progressbars --no-half-vae --disable-safe-unpickle --api --no-download-sd-model --opt-sdp-attention --enable-insecure-extension-access --skip-version-check --listen --port 3000 "
+ auth
+ " "
+ mdlpth
+ " "
+ vae_path
)
return configf
def save(Huggingface_Write_token):
from slugify import slugify
from huggingface_hub import HfApi, CommitOperationAdd, create_repo
if Huggingface_Write_token == "":
print("[1;31mA huggingface write token is required")
else:
os.chdir("/workspace")
if os.path.exists("sd"):
call(
'tar --exclude="stable-diffusion-webui/models/*/*" --exclude="sd-webui-controlnet/models/*" --zstd -cf sd_backup_rnpd.tar.zst sd',
shell=True,
)
api = HfApi()
username = api.whoami(token=Huggingface_Write_token)["name"]
repo_id = f"{username}/{slugify('fast-stable-diffusion')}"
print("[1;32mBacking up...")
operations = [
CommitOperationAdd(
path_in_repo="sd_backup_rnpd.tar.zst",
path_or_fileobj="/workspace/sd_backup_rnpd.tar.zst",
)
]
create_repo(
repo_id,
private=True,
token=Huggingface_Write_token,
exist_ok=True,
repo_type="dataset",
)
api.create_commit(
repo_id=repo_id,
repo_type="dataset",
operations=operations,
commit_message="SD folder Backup",
token=Huggingface_Write_token,
)
call("rm sd_backup_rnpd.tar.zst", shell=True)
clear_output()
done()
else:
print("[1;33mNothing to backup")
def getsrc(url):
parsed_url = urlparse(url)
if parsed_url.netloc == "civitai.com":
src = "civitai"
elif parsed_url.netloc == "drive.google.com":
src = "gdrive"
elif parsed_url.netloc == "huggingface.co":
src = "huggingface"
else:
src = "others"
return src
def get_true_name(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
title_tag = soup.find("title")
if title_tag:
title_text = title_tag.text
# Извлечение имени файла из тега title (предполагая, что имя файла указано в теге title)
file_name = title_text.split(" - ")[0]
return file_name
else:
raise RuntimeError("Could not find the title tag in the HTML")
def get_name(url, gdrive):
from gdown.download import get_url_from_gdrive_confirmation
if not gdrive:
response = requests.get(url, allow_redirects=False)
if "Location" in response.headers:
redirected_url = response.headers["Location"]
quer = parse_qs(urlparse(redirected_url).query)
if "response-content-disposition" in quer:
disp_val = quer["response-content-disposition"][0].split(";")
for vals in disp_val:
if vals.strip().startswith("filename="):
filenm = unquote(vals.split("=", 1)[1].strip())
return filenm.replace('"', "")
else:
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36"
}
lnk = "https://drive.google.com/uc?id={id}&export=download".format(
id=url[url.find("/d/") + 3 : url.find("/view")]
)
res = requests.session().get(lnk, headers=headers, stream=True, verify=True)
res = requests.session().get(
get_url_from_gdrive_confirmation(res.text),
headers=headers,
stream=True,
verify=True,
)
content_disposition = six.moves.urllib_parse.unquote(
res.headers["Content-Disposition"]
)
filenm = (
re.search(r"filename\*=UTF-8''(.*)", content_disposition)
.groups()[0]
.replace(os.path.sep, "_")
)
return filenm
def done():
done = widgets.Button(
description="Done!",
disabled=True,
button_style="success",
tooltip="",
icon="check",
)
display(done)