NagisaNao commited on
Commit
0540f3c
1 Parent(s): 855e5a3

edits to the download code

Browse files
files_cells/notebooks/en/auto-cleaner_en.ipynb CHANGED
@@ -242,7 +242,7 @@
242
  "\n",
243
  "# ================ AutoCleaner function ================\n",
244
  "directories = {\n",
245
- " \"Images\": f\"{webui_path}/outputs\",\n",
246
  " \"Models\": f\"{webui_path}/models/Stable-diffusion/\",\n",
247
  " \"Vae\": f\"{webui_path}/models/VAE/\",\n",
248
  " \"LoRa\": f\"{webui_path}/models/Lora/\",\n",
 
242
  "\n",
243
  "# ================ AutoCleaner function ================\n",
244
  "directories = {\n",
245
+ " \"Images\": f\"{webui_path}/output\",\n",
246
  " \"Models\": f\"{webui_path}/models/Stable-diffusion/\",\n",
247
  " \"Vae\": f\"{webui_path}/models/VAE/\",\n",
248
  " \"LoRa\": f\"{webui_path}/models/Lora/\",\n",
files_cells/notebooks/en/downloading_en.ipynb CHANGED
@@ -137,10 +137,10 @@
137
  "# ================= MAIN CODE =================\n",
138
  "if not os.path.exists(webui_path):\n",
139
  " start_install = int(time.time())\n",
140
- " print(\"⌚ Unpacking Stable Diffusion...\", end='')\n",
141
  " with capture.capture_output() as cap:\n",
142
  " aria2_command = \"aria2c --console-log-level=error -c -x 16 -s 16 -k 1M\"\n",
143
- " url = \"https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip\" if change_webui != 'Forge' else \"https://huggingface.co/NagisaNao/test/resolve/main/FULL_REPO_forge.zip\"\n",
144
  " !{aria2_command} {url} -o repo.zip\n",
145
  "\n",
146
  " !unzip -q -o repo.zip -d {webui_path}\n",
@@ -253,13 +253,19 @@
253
  "\n",
254
  "# 1-4 (fp16/cleaned)\n",
255
  "vae_list = {\n",
256
- " \"1.Anime.vae\": [\n",
257
- " {\"url\": \"https://civitai.com/api/download/models/131654\", \"name\": \"Anime.vae.safetensors\"},\n",
258
- " {\"url\": \"https://civitai.com/api/download/models/131658\", \"name\": \"vae-ft-mse.vae.safetensors\"}\n",
259
- " ],\n",
260
- " \"2.Anything.vae\": [{\"url\": \"https://civitai.com/api/download/models/131656\", \"name\": \"Anything.vae.safetensors\"}],\n",
261
- " \"3.Blessed2.vae\": [{\"url\": \"https://civitai.com/api/download/models/142467\", \"name\": \"Blessed2.vae.safetensors\"}],\n",
262
- " \"4.ClearVae.vae\": [{\"url\": \"https://civitai.com/api/download/models/133362\", \"name\": \"ClearVae_23.vae.safetensors\"}],\n",
 
 
 
 
 
 
263
  " \"5.WD.vae\": [{\"url\": \"https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors\", \"name\": \"WD.vae.safetensors\"}]\n",
264
  "}\n",
265
  "\n",
@@ -333,26 +339,47 @@
333
  "}\n",
334
  "\n",
335
  "extension_repo = []\n",
336
- "directories = (value for key, value in prefixes.items()) # for unpucking zip files\n",
337
  "!mkdir -p {\" \".join(directories)}\n",
338
  "\n",
339
  "hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
340
  "user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
341
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  "''' Get Image Preview | CivitAi '''\n",
343
  "\n",
344
- "def get_data_from_api(model_id): # get model data\n",
 
345
  " endpoint_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
346
  " headers = {\"Content-Type\": \"application/json\"}\n",
347
  " try:\n",
348
  " response = requests.get(endpoint_url, headers=headers)\n",
349
- " if response.status_code == 200:\n",
350
- " return response.json()\n",
351
  " except requests.exceptions.RequestException as e:\n",
352
  " print(f\"An error occurred: {e}\")\n",
353
  " return None\n",
354
  "\n",
355
  "def extract_model_info(data, url):\n",
 
356
  " if 'type=' in url:\n",
357
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
358
  " model_name = data['files'][1]['name']\n",
@@ -369,59 +396,69 @@
369
  " return model_type, model_name, image_url\n",
370
  "\n",
371
  "def gen_preview_filename(model_name, image_url):\n",
 
372
  " name = model_name.split('.')\n",
373
  " img_exts = image_url.split('.')\n",
374
- " return f\"{name[0]}.preview.{img_exts[-1]}\" # assigning the original image format\n",
375
  "\n",
376
  "''' main download code '''\n",
377
  "\n",
378
  "def handle_manual(url):\n",
379
- " original_url = url\n",
380
- " url = url.split(':', 1)[1]\n",
381
- " file_name = re.search(r'\\[(.*?)\\]', url)\n",
382
- " file_name = file_name.group(1) if file_name else None\n",
383
- " if file_name:\n",
384
- " url = re.sub(r'\\[.*?\\]', '', url)\n",
385
  "\n",
386
- " for prefix, dir in prefixes.items():\n",
387
- " if original_url.startswith(f\"{prefix}:\"):\n",
388
- " if prefix != \"extension\":\n",
389
- " manual_download(url, dir, file_name=file_name)\n",
390
- " else:\n",
391
- " extension_repo.append((url, file_name))\n",
 
 
 
 
 
 
 
 
392
  "\n",
393
  "def manual_download(url, dst_dir, file_name):\n",
394
  " aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
395
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
396
  " header_option = f\"--header={user_header}\"\n",
397
  "\n",
398
- " # === CivitAi API ===\n",
399
  " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image\n",
400
  " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
401
  "\n",
402
  " if 'civitai' in url:\n",
403
- " url = f\"{url}{'&' if '?' in url else '?'}token={civitai_token}\" # some authors are pussies and they need API token...\n",
404
- " model_id = url.split('/')[-1]\n",
405
- " clean_url = url.split('?')[0]\n",
406
  "\n",
407
  " data = get_data_from_api(model_id)\n",
408
  " if data:\n",
409
  " model_type, model_name, image_url = extract_model_info(data, url)\n",
410
- " if model_name and image_url:\n",
411
- " image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)\n",
412
- " if any(types in model_type for types in support_types):\n",
413
- " with capture.capture_output() as cap: # clear shit\n",
414
- " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}\n",
 
415
  " del cap\n",
416
- " file_name = file_name or model_name # assigns the original file name if not specified initially\n",
 
 
 
 
 
 
417
  "\n",
418
- " \"\"\" information output \"\"\"\n",
419
- " # -- wrold's best print info --\n",
420
- " print(f\"\\n\\033[32m{'---'*45}\\n\\033[33mURL: \\033[34m{clean_url if 'civitai' in url else url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name if not 'huggingface' in url else basename}\\033[0m\")\n",
421
  " print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
422
- " if 'civitai' in url and data and any(types in model_type for types in support_types) and (locals().get('image_file_name') or ''):\n",
423
  " print(f\"\\033[32m[Preview DL]:\\033[0m {image_file_name} - {image_url}\\n\")\n",
424
- " # ===================\n",
425
  "\n",
426
  " # -- GDrive --\n",
427
  " if 'drive.google' in url:\n",
@@ -465,9 +502,8 @@
465
  "\n",
466
  " unpucking_zip_files()\n",
467
  "\n",
468
- "## unpucking zip files\n",
469
  "def unpucking_zip_files():\n",
470
- " # directories - above\n",
471
  " for directory in directories:\n",
472
  " for root, dirs, files in os.walk(directory):\n",
473
  " for file in files:\n",
@@ -480,8 +516,6 @@
480
  "\n",
481
  "''' submodels - added urls '''\n",
482
  "\n",
483
- "submodels = []\n",
484
- "\n",
485
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
486
  " if selection == \"none\":\n",
487
  " return []\n",
@@ -493,33 +527,32 @@
493
  " else:\n",
494
  " selected_models = model_dict[selection]\n",
495
  " selected_nums = map(int, num_selection.replace(',', '').split())\n",
496
- "\n",
497
  " for num in selected_nums:\n",
498
  " if 1 <= num <= len(model_dict):\n",
499
  " name = list(model_dict)[num - 1]\n",
500
  " selected_models.extend(model_dict[name])\n",
501
  "\n",
502
  " unique_models = list({model['name']: model for model in selected_models}.values())\n",
503
- "\n",
504
  " for model in unique_models:\n",
505
  " model['dst_dir'] = dst_dir\n",
506
  "\n",
507
  " return unique_models\n",
508
  "\n",
509
- "submodels += add_submodels(Model, Model_Num, model_list, models_dir) # model\n",
510
- "submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir) # vae\n",
511
- "submodels += add_submodels(controlnet, controlnet_Num, controlnet_list, control_dir) # controlnet\n",
 
 
 
 
512
  "\n",
513
- "for submodel in submodels:\n",
514
- " if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
515
- " continue\n",
516
- " url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
517
  "\n",
518
  "''' file.txt - added urls '''\n",
519
  "\n",
520
- "unique_urls = []\n",
521
- "\n",
522
- "def process_file_download(file_url):\n",
523
  " files_urls = \"\"\n",
524
  "\n",
525
  " if file_url.startswith(\"http\"):\n",
@@ -533,19 +566,23 @@
533
  "\n",
534
  " current_tag = None\n",
535
  " for line in lines:\n",
 
536
  " if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
537
  " current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
538
  "\n",
539
  " urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
540
  " for url in urls:\n",
541
- " if url.startswith(\"http\") and url not in unique_urls:\n",
 
 
542
  " files_urls += f\"{current_tag}:{url}, \"\n",
543
- " unique_urls.append(url)\n",
544
  "\n",
545
  " return files_urls\n",
546
  "\n",
547
- "# fix all possible errors/options and function call\n",
548
  "file_urls = \"\"\n",
 
 
549
  "if custom_file_urls:\n",
550
  " for custom_file_url in custom_file_urls.replace(',', '').split():\n",
551
  " if not custom_file_url.endswith('.txt'):\n",
@@ -555,7 +592,7 @@
555
  " custom_file_url = f'{root_path}/{custom_file_url}'\n",
556
  "\n",
557
  " try:\n",
558
- " file_urls += process_file_download(custom_file_url)\n",
559
  " except FileNotFoundError:\n",
560
  " pass\n",
561
  "\n",
 
137
  "# ================= MAIN CODE =================\n",
138
  "if not os.path.exists(webui_path):\n",
139
  " start_install = int(time.time())\n",
140
+ " print(\"⌚ Unpacking Stable Diffusion...\" if change_webui != 'Forge' else \"⌚ Unpacking Stable Diffusion (Forge)...\", end='')\n",
141
  " with capture.capture_output() as cap:\n",
142
  " aria2_command = \"aria2c --console-log-level=error -c -x 16 -s 16 -k 1M\"\n",
143
+ " url = \"https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip\" if change_webui != 'Forge' else \"https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO_forge.zip\"\n",
144
  " !{aria2_command} {url} -o repo.zip\n",
145
  "\n",
146
  " !unzip -q -o repo.zip -d {webui_path}\n",
 
253
  "\n",
254
  "# 1-4 (fp16/cleaned)\n",
255
  "vae_list = {\n",
256
+ " ## vae broke - the author's an asshole\n",
257
+ " # \"1.Anime.vae\": [\n",
258
+ " # {\"url\": \"https://civitai.com/api/download/models/131654\", \"name\": \"Anime.vae.safetensors\"},\n",
259
+ " # {\"url\": \"https://civitai.com/api/download/models/131658\", \"name\": \"vae-ft-mse.vae.safetensors\"}\n",
260
+ " # ],\n",
261
+ " # \"2.Anything.vae\": [{\"url\": \"https://civitai.com/api/download/models/131656\", \"name\": \"Anything.vae.safetensors\"}],\n",
262
+ " # \"3.Blessed2.vae\": [{\"url\": \"https://civitai.com/api/download/models/142467\", \"name\": \"Blessed2.vae.safetensors\"}],\n",
263
+ " # \"4.ClearVae.vae\": [{\"url\": \"https://civitai.com/api/download/models/133362\", \"name\": \"ClearVae_23.vae.safetensors\"}],\n",
264
+ "\n",
265
+ " \"1.Anime.vae\": [{\"url\": \"https://civitai.com/api/download/models/311162\", \"name\": \"vae-ft-mse-840000-ema-pruned.vae.safetensors\"}],\n",
266
+ " \"2.Anything.vae\": [{\"url\": \"https://civitai.com/api/download/models/119279\", \"name\": \"Anything.vae.safetensors\"}],\n",
267
+ " \"3.Blessed2.vae\": [{\"url\": \"https://huggingface.co/NoCrypt/blessed_vae/resolve/main/blessed2.vae.pt\", \"name\": \"Blessed2.vae.safetensors\"}],\n",
268
+ " \"4.ClearVae.vae\": [{\"url\": \"https://civitai.com/api/download/models/88156\", \"name\": \"ClearVae_23.vae.safetensors\"}],\n",
269
  " \"5.WD.vae\": [{\"url\": \"https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors\", \"name\": \"WD.vae.safetensors\"}]\n",
270
  "}\n",
271
  "\n",
 
339
  "}\n",
340
  "\n",
341
  "extension_repo = []\n",
342
+ "directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
343
  "!mkdir -p {\" \".join(directories)}\n",
344
  "\n",
345
  "hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
346
  "user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
347
  "\n",
348
+ "''' Formatted Info Output '''\n",
349
+ "\n",
350
+ "from math import floor\n",
351
+ "\n",
352
+ "def center_text(text, terminal_width=45):\n",
353
+ " text_length = len(text)\n",
354
+ " left_padding = floor((terminal_width - text_length) / 2)\n",
355
+ " right_padding = terminal_width - text_length - left_padding\n",
356
+ " return f\"\\033[1m\\033[36m{' ' * left_padding}{text}{' ' * right_padding}\\033[0m\\033[32m\"\n",
357
+ "\n",
358
+ "def format_output(url, dst_dir, file_name):\n",
359
+ " info = f\"[{file_name.split('.')[0]}]\"\n",
360
+ " info = center_text(info)\n",
361
+ "\n",
362
+ " print(f\"\\n\\033[32m{'---'*20}]{info}[{'---'*20}\")\n",
363
+ " print(f\"\\033[33mURL: \\033[34m{url}\")\n",
364
+ " print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
365
+ " print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
366
+ "\n",
367
  "''' Get Image Preview | CivitAi '''\n",
368
  "\n",
369
+ "def get_data_from_api(model_id):\n",
370
+ " \"\"\"Fetch model data from the API\"\"\"\n",
371
  " endpoint_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
372
  " headers = {\"Content-Type\": \"application/json\"}\n",
373
  " try:\n",
374
  " response = requests.get(endpoint_url, headers=headers)\n",
375
+ " response.raise_for_status()\n",
376
+ " return response.json()\n",
377
  " except requests.exceptions.RequestException as e:\n",
378
  " print(f\"An error occurred: {e}\")\n",
379
  " return None\n",
380
  "\n",
381
  "def extract_model_info(data, url):\n",
382
+ " \"\"\"Extract model information based on URL\"\"\"\n",
383
  " if 'type=' in url:\n",
384
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
385
  " model_name = data['files'][1]['name']\n",
 
396
  " return model_type, model_name, image_url\n",
397
  "\n",
398
  "def gen_preview_filename(model_name, image_url):\n",
399
+ " \"\"\"Generate a preview filename\"\"\"\n",
400
  " name = model_name.split('.')\n",
401
  " img_exts = image_url.split('.')\n",
402
+ " return f\"{name[0]}.preview.{img_exts[-1]}\"\n",
403
  "\n",
404
  "''' main download code '''\n",
405
  "\n",
406
  "def handle_manual(url):\n",
407
+ " url_parts = url.split(':', 1)\n",
408
+ " prefix = url_parts[0]\n",
409
+ " path = url_parts[1]\n",
 
 
 
410
  "\n",
411
+ " file_name_match = re.search(r'\\[(.*?)\\]', path)\n",
412
+ " file_name = file_name_match.group(1) if file_name_match else None\n",
413
+ " if file_name:\n",
414
+ " path = re.sub(r'\\[.*?\\]', '', path)\n",
415
+ "\n",
416
+ " if prefix in prefixes:\n",
417
+ " dir = prefixes[prefix]\n",
418
+ " if prefix != \"extension\":\n",
419
+ " try:\n",
420
+ " manual_download(path, dir, file_name=file_name)\n",
421
+ " except Exception as e:\n",
422
+ " print(f\"Error downloading file: {e}\")\n",
423
+ " else:\n",
424
+ " extension_repo.append((path, file_name))\n",
425
  "\n",
426
  "def manual_download(url, dst_dir, file_name):\n",
427
  " aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
428
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
429
  " header_option = f\"--header={user_header}\"\n",
430
  "\n",
431
+ " # ==== CivitAi API+ ====\n",
432
  " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image\n",
433
  " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
434
  "\n",
435
  " if 'civitai' in url:\n",
436
+ " url = f\"{url}{'&' if '?' in url else '?'}token={civitai_token}\"\n",
437
+ " model_id = url.split('/')[-1].split('?')[0]\n",
438
+ " clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token\n",
439
  "\n",
440
  " data = get_data_from_api(model_id)\n",
441
  " if data:\n",
442
  " model_type, model_name, image_url = extract_model_info(data, url)\n",
443
+ "\n",
444
+ " if any(t in model_type for t in support_types):\n",
445
+ " if model_name and image_url:\n",
446
+ " image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)\n",
447
+ " with capture.capture_output() as cap:\n",
448
+ " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'\n",
449
  " del cap\n",
450
+ " file_name = file_name or model_name\n",
451
+ " else:\n",
452
+ " clean_url = url\n",
453
+ "\n",
454
+ " \"\"\" Formatted info output \"\"\"\n",
455
+ " model_name_or_basename = file_name if not 'huggingface' in url else basename\n",
456
+ " format_output(clean_url or url, dst_dir, model_name_or_basename)\n",
457
  "\n",
 
 
 
458
  " print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
459
+ " if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):\n",
460
  " print(f\"\\033[32m[Preview DL]:\\033[0m {image_file_name} - {image_url}\\n\")\n",
461
+ " # =====================\n",
462
  "\n",
463
  " # -- GDrive --\n",
464
  " if 'drive.google' in url:\n",
 
502
  "\n",
503
  " unpucking_zip_files()\n",
504
  "\n",
505
+ "# unpucking zip files\n",
506
  "def unpucking_zip_files():\n",
 
507
  " for directory in directories:\n",
508
  " for root, dirs, files in os.walk(directory):\n",
509
  " for file in files:\n",
 
516
  "\n",
517
  "''' submodels - added urls '''\n",
518
  "\n",
 
 
519
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
520
  " if selection == \"none\":\n",
521
  " return []\n",
 
527
  " else:\n",
528
  " selected_models = model_dict[selection]\n",
529
  " selected_nums = map(int, num_selection.replace(',', '').split())\n",
 
530
  " for num in selected_nums:\n",
531
  " if 1 <= num <= len(model_dict):\n",
532
  " name = list(model_dict)[num - 1]\n",
533
  " selected_models.extend(model_dict[name])\n",
534
  "\n",
535
  " unique_models = list({model['name']: model for model in selected_models}.values())\n",
 
536
  " for model in unique_models:\n",
537
  " model['dst_dir'] = dst_dir\n",
538
  "\n",
539
  " return unique_models\n",
540
  "\n",
541
+ "def handle_submodels(selection, num_selection, model_dict, dst_dir, url):\n",
542
+ " submodels = add_submodels(selection, num_selection, model_dict, dst_dir)\n",
543
+ " for submodel in submodels:\n",
544
+ " if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
545
+ " continue\n",
546
+ " url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
547
+ " return url\n",
548
  "\n",
549
+ "url = handle_submodels(Model, Model_Num, model_list, models_dir, url)\n",
550
+ "url = handle_submodels(Vae, Vae_Num, vae_list, vaes_dir, url)\n",
551
+ "url = handle_submodels(controlnet, controlnet_Num, controlnet_list, control_dir, url)\n",
 
552
  "\n",
553
  "''' file.txt - added urls '''\n",
554
  "\n",
555
+ "def process_file_download(file_url, prefixes, unique_urls):\n",
 
 
556
  " files_urls = \"\"\n",
557
  "\n",
558
  " if file_url.startswith(\"http\"):\n",
 
566
  "\n",
567
  " current_tag = None\n",
568
  " for line in lines:\n",
569
+ " line = line.strip()\n",
570
  " if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
571
  " current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
572
  "\n",
573
  " urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
574
  " for url in urls:\n",
575
+ " filter_url = url.split('[')[0] # same url filter\n",
576
+ "\n",
577
+ " if url.startswith(\"http\") and filter_url not in unique_urls:\n",
578
  " files_urls += f\"{current_tag}:{url}, \"\n",
579
+ " unique_urls.add(filter_url)\n",
580
  "\n",
581
  " return files_urls\n",
582
  "\n",
 
583
  "file_urls = \"\"\n",
584
+ "unique_urls = set()\n",
585
+ "\n",
586
  "if custom_file_urls:\n",
587
  " for custom_file_url in custom_file_urls.replace(',', '').split():\n",
588
  " if not custom_file_url.endswith('.txt'):\n",
 
592
  " custom_file_url = f'{root_path}/{custom_file_url}'\n",
593
  "\n",
594
  " try:\n",
595
+ " file_urls += process_file_download(custom_file_url, prefixes, unique_urls)\n",
596
  " except FileNotFoundError:\n",
597
  " pass\n",
598
  "\n",
files_cells/notebooks/en/launch_en.ipynb CHANGED
@@ -61,9 +61,10 @@
61
  "ngrok_token = settings['ngrok_token']\n",
62
  "zrok_token = settings['zrok_token']\n",
63
  "commandline_arguments = settings['commandline_arguments']\n",
 
64
  "\n",
65
  "\n",
66
- "# ======================== TUNNEL ========================\n",
67
  "print('Please Wait...')\n",
68
  "\n",
69
  "def get_public_ip(version='ipv4'):\n",
@@ -76,10 +77,17 @@
76
  " except Exception as e:\n",
77
  " print(f\"Error getting public {version} address:\", e)\n",
78
  "\n",
79
- "public_ipv4 = get_public_ip(version='ipv4')\n",
 
 
 
 
 
 
 
80
  "\n",
81
  "tunnel_class = pickle.load(open(f\"{root_path}/new_tunnel\", \"rb\"), encoding=\"utf-8\")\n",
82
- "tunnel_port= 1734\n",
83
  "tunnel = tunnel_class(tunnel_port)\n",
84
  "tunnel.add_tunnel(command=\"cl tunnel --url localhost:{port}\", name=\"cl\", pattern=re.compile(r\"[\\w-]+\\.trycloudflare\\.com\"))\n",
85
  "tunnel.add_tunnel(command=\"lt --port {port}\", name=\"lt\", pattern=re.compile(r\"[\\w-]+\\.loca\\.lt\"), note=\"Password : \" + \"\\033[32m\" + public_ipv4 + \"\\033[0m\" + \" rerun cell if 404 error.\")\n",
@@ -90,17 +98,32 @@
90
  " tunnel.add_tunnel(command=\"zrok share public http://localhost:{port}/ --headless\", name=\"zrok\", pattern=re.compile(r\"[\\w-]+\\.share\\.zrok\\.io\"))\n",
91
  "\n",
92
  "clear_output()\n",
93
- "# ======================== TUNNEL ========================\n",
94
  "\n",
95
  "\n",
96
- " # automatic fixing path V2\n",
97
- "!sed -i 's|\"tagger_hf_cache_dir\": \".*\"|\"tagger_hf_cache_dir\": \"{webui_path}/models/interrogators/\"|' {webui_path}/config.json\n",
98
- "!sed -i 's|\"additional_networks_extra_lora_path\": \".*\"|\"additional_networks_extra_lora_path\": \"{webui_path}/models/Lora/\"|' {webui_path}/config.json\n",
99
- "!sed -i 's|\"ad_extra_models_dir\": \".*\"|\"ad_extra_models_dir\": \"{webui_path}/models/adetailer/\"|' {webui_path}/config.json\n",
100
- "!sed -i 's/\"sd_checkpoint_hash\": \".*\"/\"sd_checkpoint_hash\": \"\"/g; s/\"sd_model_checkpoint\": \".*\"/\"sd_model_checkpoint\": \"\"/g; s/\"sd_vae\": \".*\"/\"sd_vae\": \"None\"/g' {webui_path}/config.json\n",
101
- "# for kaggle\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  "if env == 'Kaggle':\n",
103
- " !sed -i 's/\"civitai_interface\\/NSFW content\\/value\":.*/\"civitai_interface\\/NSFW content\\/value\": false/g' {webui_path}/ui-config.json\n",
 
104
  "\n",
105
  "\n",
106
  "with tunnel:\n",
@@ -112,6 +135,10 @@
112
  " if env != \"Google Colab\":\n",
113
  " commandline_arguments += f' --encrypt-pass={tunnel_port} --api'\n",
114
  "\n",
 
 
 
 
115
  " !COMMANDLINE_ARGS=\"{commandline_arguments}\" python launch.py\n",
116
  "\n",
117
  "\n",
 
61
  "ngrok_token = settings['ngrok_token']\n",
62
  "zrok_token = settings['zrok_token']\n",
63
  "commandline_arguments = settings['commandline_arguments']\n",
64
+ "change_webui = settings['change_webui']\n",
65
  "\n",
66
  "\n",
67
+ "# ======================== TUNNEL V2 ========================\n",
68
  "print('Please Wait...')\n",
69
  "\n",
70
  "def get_public_ip(version='ipv4'):\n",
 
77
  " except Exception as e:\n",
78
  " print(f\"Error getting public {version} address:\", e)\n",
79
  "\n",
80
+ "# Check if public IP is already saved, if not then get it\n",
81
+ "try:\n",
82
+ " with open(f\"{root_path}/public_ip.txt\", \"r\") as file:\n",
83
+ " public_ipv4 = file.read().strip()\n",
84
+ "except FileNotFoundError:\n",
85
+ " public_ipv4 = get_public_ip(version='ipv4')\n",
86
+ " with open(f\"{root_path}/public_ip.txt\", \"w\") as file:\n",
87
+ " file.write(public_ipv4)\n",
88
  "\n",
89
  "tunnel_class = pickle.load(open(f\"{root_path}/new_tunnel\", \"rb\"), encoding=\"utf-8\")\n",
90
+ "tunnel_port = 1734\n",
91
  "tunnel = tunnel_class(tunnel_port)\n",
92
  "tunnel.add_tunnel(command=\"cl tunnel --url localhost:{port}\", name=\"cl\", pattern=re.compile(r\"[\\w-]+\\.trycloudflare\\.com\"))\n",
93
  "tunnel.add_tunnel(command=\"lt --port {port}\", name=\"lt\", pattern=re.compile(r\"[\\w-]+\\.loca\\.lt\"), note=\"Password : \" + \"\\033[32m\" + public_ipv4 + \"\\033[0m\" + \" rerun cell if 404 error.\")\n",
 
98
  " tunnel.add_tunnel(command=\"zrok share public http://localhost:{port}/ --headless\", name=\"zrok\", pattern=re.compile(r\"[\\w-]+\\.share\\.zrok\\.io\"))\n",
99
  "\n",
100
  "clear_output()\n",
 
101
  "\n",
102
  "\n",
103
+ "# =============== Automatic Fixing Path V3 ===============\n",
104
+ "paths_to_check = [\n",
105
+ " (\"tagger_hf_cache_dir\", f\"{webui_path}/models/interrogators/\"),\n",
106
+ " (\"additional_networks_extra_lora_path\", f\"{webui_path}/models/Lora/\"),\n",
107
+ " (\"ad_extra_models_dir\", f\"{webui_path}/models/adetailer/\"),\n",
108
+ " (\"sd_checkpoint_hash\", \"\"),\n",
109
+ " (\"sd_model_checkpoint\", \"\"),\n",
110
+ " (\"sd_vae\", \"None\")\n",
111
+ "]\n",
112
+ "\n",
113
+ "config_path = f'{webui_path}/ui-config.json'\n",
114
+ "\n",
115
+ "with open(config_path, 'r') as file:\n",
116
+ " config_data = json.load(file)\n",
117
+ "\n",
118
+ "for key, value in paths_to_check:\n",
119
+ " if key in config_data and config_data[key] != value:\n",
120
+ " sed_command = f\"sed -i 's|\\\"{key}\\\": \\\".*\\\"|\\\"{key}\\\": \\\"{value}\\\"|' {config_path}\"\n",
121
+ " os.system(sed_command)\n",
122
+ "\n",
123
+ "# Additional check for Kaggle\n",
124
  "if env == 'Kaggle':\n",
125
+ " get_ipython().system('sed -i \\'s/\"civitai_interface\\\\/NSFW content\\\\/value\":.*/\"civitai_interface\\\\/NSFW content\\\\/value\": false/g\\' {webui_path}/ui-config.json')\n",
126
+ "# -------------------------------------------------------\n",
127
  "\n",
128
  "\n",
129
  "with tunnel:\n",
 
135
  " if env != \"Google Colab\":\n",
136
  " commandline_arguments += f' --encrypt-pass={tunnel_port} --api'\n",
137
  "\n",
138
+ " # -- FORGE --\n",
139
+ " if change_webui == 'Forge':\n",
140
+ " commandline_arguments += ' --cuda-stream --pin-shared-memory'\n",
141
+ "\n",
142
  " !COMMANDLINE_ARGS=\"{commandline_arguments}\" python launch.py\n",
143
  "\n",
144
  "\n",
files_cells/notebooks/ru/auto-cleaner_ru.ipynb CHANGED
@@ -242,7 +242,7 @@
242
  "\n",
243
  "# ================ AutoCleaner function ================\n",
244
  "directories = {\n",
245
- " \"Изображения\": f\"{webui_path}/outputs\",\n",
246
  " \"Модели\": f\"{webui_path}/models/Stable-diffusion/\",\n",
247
  " \"Vae\": f\"{webui_path}/models/VAE/\",\n",
248
  " \"LoRa\": f\"{webui_path}/models/Lora/\",\n",
 
242
  "\n",
243
  "# ================ AutoCleaner function ================\n",
244
  "directories = {\n",
245
+ " \"Изображения\": f\"{webui_path}/output\",\n",
246
  " \"Модели\": f\"{webui_path}/models/Stable-diffusion/\",\n",
247
  " \"Vae\": f\"{webui_path}/models/VAE/\",\n",
248
  " \"LoRa\": f\"{webui_path}/models/Lora/\",\n",
files_cells/notebooks/ru/downloading_ru.ipynb CHANGED
@@ -140,7 +140,7 @@
140
  " print(\"⌚ Распаковка Stable Diffusion...\" if change_webui != 'Forge' else \"⌚ Распаковка Stable Diffusion (Forge)...\", end='')\n",
141
  " with capture.capture_output() as cap:\n",
142
  " aria2_command = \"aria2c --console-log-level=error -c -x 16 -s 16 -k 1M\"\n",
143
- " url = \"https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip\" if change_webui != 'Forge' else \"https://huggingface.co/NagisaNao/test/resolve/main/FULL_REPO_forge.zip\"\n",
144
  " !{aria2_command} {url} -o repo.zip\n",
145
  "\n",
146
  " !unzip -q -o repo.zip -d {webui_path}\n",
@@ -253,13 +253,19 @@
253
  "\n",
254
  "# 1-4 (fp16/cleaned)\n",
255
  "vae_list = {\n",
256
- " \"1.Anime.vae\": [\n",
257
- " {\"url\": \"https://civitai.com/api/download/models/131654\", \"name\": \"Anime.vae.safetensors\"},\n",
258
- " {\"url\": \"https://civitai.com/api/download/models/131658\", \"name\": \"vae-ft-mse.vae.safetensors\"}\n",
259
- " ],\n",
260
- " \"2.Anything.vae\": [{\"url\": \"https://civitai.com/api/download/models/131656\", \"name\": \"Anything.vae.safetensors\"}],\n",
261
- " \"3.Blessed2.vae\": [{\"url\": \"https://civitai.com/api/download/models/142467\", \"name\": \"Blessed2.vae.safetensors\"}],\n",
262
- " \"4.ClearVae.vae\": [{\"url\": \"https://civitai.com/api/download/models/133362\", \"name\": \"ClearVae_23.vae.safetensors\"}],\n",
 
 
 
 
 
 
263
  " \"5.WD.vae\": [{\"url\": \"https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors\", \"name\": \"WD.vae.safetensors\"}]\n",
264
  "}\n",
265
  "\n",
@@ -333,26 +339,47 @@
333
  "}\n",
334
  "\n",
335
  "extension_repo = []\n",
336
- "directories = (value for key, value in prefixes.items()) # for unpucking zip files\n",
337
  "!mkdir -p {\" \".join(directories)}\n",
338
  "\n",
339
  "hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
340
  "user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
341
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  "''' Get Image Preview | CivitAi '''\n",
343
  "\n",
344
- "def get_data_from_api(model_id): # get model data\n",
 
345
  " endpoint_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
346
  " headers = {\"Content-Type\": \"application/json\"}\n",
347
  " try:\n",
348
  " response = requests.get(endpoint_url, headers=headers)\n",
349
- " if response.status_code == 200:\n",
350
- " return response.json()\n",
351
  " except requests.exceptions.RequestException as e:\n",
352
  " print(f\"An error occurred: {e}\")\n",
353
  " return None\n",
354
  "\n",
355
  "def extract_model_info(data, url):\n",
 
356
  " if 'type=' in url:\n",
357
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
358
  " model_name = data['files'][1]['name']\n",
@@ -369,59 +396,69 @@
369
  " return model_type, model_name, image_url\n",
370
  "\n",
371
  "def gen_preview_filename(model_name, image_url):\n",
 
372
  " name = model_name.split('.')\n",
373
  " img_exts = image_url.split('.')\n",
374
- " return f\"{name[0]}.preview.{img_exts[-1]}\" # assigning the original image format\n",
375
  "\n",
376
  "''' main download code '''\n",
377
  "\n",
378
  "def handle_manual(url):\n",
379
- " original_url = url\n",
380
- " url = url.split(':', 1)[1]\n",
381
- " file_name = re.search(r'\\[(.*?)\\]', url)\n",
382
- " file_name = file_name.group(1) if file_name else None\n",
383
- " if file_name:\n",
384
- " url = re.sub(r'\\[.*?\\]', '', url)\n",
385
  "\n",
386
- " for prefix, dir in prefixes.items():\n",
387
- " if original_url.startswith(f\"{prefix}:\"):\n",
388
- " if prefix != \"extension\":\n",
389
- " manual_download(url, dir, file_name=file_name)\n",
390
- " else:\n",
391
- " extension_repo.append((url, file_name))\n",
 
 
 
 
 
 
 
 
392
  "\n",
393
  "def manual_download(url, dst_dir, file_name):\n",
394
  " aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
395
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
396
  " header_option = f\"--header={user_header}\"\n",
397
  "\n",
398
- " # === CivitAi API ===\n",
399
  " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image\n",
400
  " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
401
  "\n",
402
  " if 'civitai' in url:\n",
403
- " url = f\"{url}{'&' if '?' in url else '?'}token={civitai_token}\" # some authors are pussies and they need API token...\n",
404
- " model_id = url.split('/')[-1]\n",
405
- " clean_url = url.split('?')[0]\n",
406
  "\n",
407
  " data = get_data_from_api(model_id)\n",
408
  " if data:\n",
409
  " model_type, model_name, image_url = extract_model_info(data, url)\n",
410
- " if model_name and image_url:\n",
411
- " image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)\n",
412
- " if any(types in model_type for types in support_types):\n",
413
- " with capture.capture_output() as cap: # clear shit\n",
414
- " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}\n",
 
415
  " del cap\n",
416
- " file_name = file_name or model_name # assigns the original file name if not specified initially\n",
 
 
 
 
 
 
417
  "\n",
418
- " \"\"\" information output \"\"\"\n",
419
- " # -- wrold's best print info --\n",
420
- " print(f\"\\n\\033[32m{'---'*45}\\n\\033[33mURL: \\033[34m{clean_url if 'civitai' in url else url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name if not 'huggingface' in url else basename}\\033[0m\")\n",
421
  " print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
422
- " if 'civitai' in url and data and any(types in model_type for types in support_types) and (locals().get('image_file_name') or ''):\n",
423
  " print(f\"\\033[32m[Preview DL]:\\033[0m {image_file_name} - {image_url}\\n\")\n",
424
- " # ===================\n",
425
  "\n",
426
  " # -- GDrive --\n",
427
  " if 'drive.google' in url:\n",
@@ -465,9 +502,8 @@
465
  "\n",
466
  " unpucking_zip_files()\n",
467
  "\n",
468
- "## unpucking zip files\n",
469
  "def unpucking_zip_files():\n",
470
- " # directories - above\n",
471
  " for directory in directories:\n",
472
  " for root, dirs, files in os.walk(directory):\n",
473
  " for file in files:\n",
@@ -480,8 +516,6 @@
480
  "\n",
481
  "''' submodels - added urls '''\n",
482
  "\n",
483
- "submodels = []\n",
484
- "\n",
485
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
486
  " if selection == \"none\":\n",
487
  " return []\n",
@@ -493,33 +527,32 @@
493
  " else:\n",
494
  " selected_models = model_dict[selection]\n",
495
  " selected_nums = map(int, num_selection.replace(',', '').split())\n",
496
- "\n",
497
  " for num in selected_nums:\n",
498
  " if 1 <= num <= len(model_dict):\n",
499
  " name = list(model_dict)[num - 1]\n",
500
  " selected_models.extend(model_dict[name])\n",
501
  "\n",
502
  " unique_models = list({model['name']: model for model in selected_models}.values())\n",
503
- "\n",
504
  " for model in unique_models:\n",
505
  " model['dst_dir'] = dst_dir\n",
506
  "\n",
507
  " return unique_models\n",
508
  "\n",
509
- "submodels += add_submodels(Model, Model_Num, model_list, models_dir) # model\n",
510
- "submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir) # vae\n",
511
- "submodels += add_submodels(controlnet, controlnet_Num, controlnet_list, control_dir) # controlnet\n",
 
 
 
 
512
  "\n",
513
- "for submodel in submodels:\n",
514
- " if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
515
- " continue\n",
516
- " url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
517
  "\n",
518
  "''' file.txt - added urls '''\n",
519
  "\n",
520
- "unique_urls = []\n",
521
- "\n",
522
- "def process_file_download(file_url):\n",
523
  " files_urls = \"\"\n",
524
  "\n",
525
  " if file_url.startswith(\"http\"):\n",
@@ -533,19 +566,23 @@
533
  "\n",
534
  " current_tag = None\n",
535
  " for line in lines:\n",
 
536
  " if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
537
  " current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
538
  "\n",
539
  " urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
540
  " for url in urls:\n",
541
- " if url.startswith(\"http\") and url not in unique_urls:\n",
 
 
542
  " files_urls += f\"{current_tag}:{url}, \"\n",
543
- " unique_urls.append(url)\n",
544
  "\n",
545
  " return files_urls\n",
546
  "\n",
547
- "# fix all possible errors/options and function call\n",
548
  "file_urls = \"\"\n",
 
 
549
  "if custom_file_urls:\n",
550
  " for custom_file_url in custom_file_urls.replace(',', '').split():\n",
551
  " if not custom_file_url.endswith('.txt'):\n",
@@ -555,7 +592,7 @@
555
  " custom_file_url = f'{root_path}/{custom_file_url}'\n",
556
  "\n",
557
  " try:\n",
558
- " file_urls += process_file_download(custom_file_url)\n",
559
  " except FileNotFoundError:\n",
560
  " pass\n",
561
  "\n",
 
140
  " print(\"⌚ Распаковка Stable Diffusion...\" if change_webui != 'Forge' else \"⌚ Распаковка Stable Diffusion (Forge)...\", end='')\n",
141
  " with capture.capture_output() as cap:\n",
142
  " aria2_command = \"aria2c --console-log-level=error -c -x 16 -s 16 -k 1M\"\n",
143
+ " url = \"https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip\" if change_webui != 'Forge' else \"https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO_forge.zip\"\n",
144
  " !{aria2_command} {url} -o repo.zip\n",
145
  "\n",
146
  " !unzip -q -o repo.zip -d {webui_path}\n",
 
253
  "\n",
254
  "# 1-4 (fp16/cleaned)\n",
255
  "vae_list = {\n",
256
+ " ## vae broke - the author's an asshole\n",
257
+ " # \"1.Anime.vae\": [\n",
258
+ " # {\"url\": \"https://civitai.com/api/download/models/131654\", \"name\": \"Anime.vae.safetensors\"},\n",
259
+ " # {\"url\": \"https://civitai.com/api/download/models/131658\", \"name\": \"vae-ft-mse.vae.safetensors\"}\n",
260
+ " # ],\n",
261
+ " # \"2.Anything.vae\": [{\"url\": \"https://civitai.com/api/download/models/131656\", \"name\": \"Anything.vae.safetensors\"}],\n",
262
+ " # \"3.Blessed2.vae\": [{\"url\": \"https://civitai.com/api/download/models/142467\", \"name\": \"Blessed2.vae.safetensors\"}],\n",
263
+ " # \"4.ClearVae.vae\": [{\"url\": \"https://civitai.com/api/download/models/133362\", \"name\": \"ClearVae_23.vae.safetensors\"}],\n",
264
+ "\n",
265
+ " \"1.Anime.vae\": [{\"url\": \"https://civitai.com/api/download/models/311162\", \"name\": \"vae-ft-mse-840000-ema-pruned.vae.safetensors\"}],\n",
266
+ " \"2.Anything.vae\": [{\"url\": \"https://civitai.com/api/download/models/119279\", \"name\": \"Anything.vae.safetensors\"}],\n",
267
+ " \"3.Blessed2.vae\": [{\"url\": \"https://huggingface.co/NoCrypt/blessed_vae/resolve/main/blessed2.vae.pt\", \"name\": \"Blessed2.vae.safetensors\"}],\n",
268
+ " \"4.ClearVae.vae\": [{\"url\": \"https://civitai.com/api/download/models/88156\", \"name\": \"ClearVae_23.vae.safetensors\"}],\n",
269
  " \"5.WD.vae\": [{\"url\": \"https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors\", \"name\": \"WD.vae.safetensors\"}]\n",
270
  "}\n",
271
  "\n",
 
339
  "}\n",
340
  "\n",
341
  "extension_repo = []\n",
342
+ "directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
343
  "!mkdir -p {\" \".join(directories)}\n",
344
  "\n",
345
  "hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
346
  "user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
347
  "\n",
348
+ "''' Formatted Info Output '''\n",
349
+ "\n",
350
+ "from math import floor\n",
351
+ "\n",
352
+ "def center_text(text, terminal_width=45):\n",
353
+ " text_length = len(text)\n",
354
+ " left_padding = floor((terminal_width - text_length) / 2)\n",
355
+ " right_padding = terminal_width - text_length - left_padding\n",
356
+ " return f\"\\033[1m\\033[36m{' ' * left_padding}{text}{' ' * right_padding}\\033[0m\\033[32m\"\n",
357
+ "\n",
358
+ "def format_output(url, dst_dir, file_name):\n",
359
+ " info = f\"[{file_name.split('.')[0]}]\"\n",
360
+ " info = center_text(info)\n",
361
+ "\n",
362
+ " print(f\"\\n\\033[32m{'---'*20}]{info}[{'---'*20}\")\n",
363
+ " print(f\"\\033[33mURL: \\033[34m{url}\")\n",
364
+ " print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
365
+ " print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
366
+ "\n",
367
  "''' Get Image Preview | CivitAi '''\n",
368
  "\n",
369
+ "def get_data_from_api(model_id):\n",
370
+ " \"\"\"Fetch model data from the API\"\"\"\n",
371
  " endpoint_url = f\"https://civitai.com/api/v1/model-versions/{model_id}\"\n",
372
  " headers = {\"Content-Type\": \"application/json\"}\n",
373
  " try:\n",
374
  " response = requests.get(endpoint_url, headers=headers)\n",
375
+ " response.raise_for_status()\n",
376
+ " return response.json()\n",
377
  " except requests.exceptions.RequestException as e:\n",
378
  " print(f\"An error occurred: {e}\")\n",
379
  " return None\n",
380
  "\n",
381
  "def extract_model_info(data, url):\n",
382
+ " \"\"\"Extract model information based on URL\"\"\"\n",
383
  " if 'type=' in url:\n",
384
  " model_type = parse_qs(urlparse(url).query).get('type', [''])[0]\n",
385
  " model_name = data['files'][1]['name']\n",
 
396
  " return model_type, model_name, image_url\n",
397
  "\n",
398
  "def gen_preview_filename(model_name, image_url):\n",
399
+ " \"\"\"Generate a preview filename\"\"\"\n",
400
  " name = model_name.split('.')\n",
401
  " img_exts = image_url.split('.')\n",
402
+ " return f\"{name[0]}.preview.{img_exts[-1]}\"\n",
403
  "\n",
404
  "''' main download code '''\n",
405
  "\n",
406
  "def handle_manual(url):\n",
407
+ " url_parts = url.split(':', 1)\n",
408
+ " prefix = url_parts[0]\n",
409
+ " path = url_parts[1]\n",
 
 
 
410
  "\n",
411
+ " file_name_match = re.search(r'\\[(.*?)\\]', path)\n",
412
+ " file_name = file_name_match.group(1) if file_name_match else None\n",
413
+ " if file_name:\n",
414
+ " path = re.sub(r'\\[.*?\\]', '', path)\n",
415
+ "\n",
416
+ " if prefix in prefixes:\n",
417
+ " dir = prefixes[prefix]\n",
418
+ " if prefix != \"extension\":\n",
419
+ " try:\n",
420
+ " manual_download(path, dir, file_name=file_name)\n",
421
+ " except Exception as e:\n",
422
+ " print(f\"Error downloading file: {e}\")\n",
423
+ " else:\n",
424
+ " extension_repo.append((path, file_name))\n",
425
  "\n",
426
  "def manual_download(url, dst_dir, file_name):\n",
427
  " aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
428
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
429
  " header_option = f\"--header={user_header}\"\n",
430
  "\n",
431
+ " # ==== CivitAi API+ ====\n",
432
  " support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image\n",
433
  " civitai_token = \"62c0c5956b2f9defbd844d754000180b\"\n",
434
  "\n",
435
  " if 'civitai' in url:\n",
436
+ " url = f\"{url}{'&' if '?' in url else '?'}token={civitai_token}\"\n",
437
+ " model_id = url.split('/')[-1].split('?')[0]\n",
438
+ " clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token\n",
439
  "\n",
440
  " data = get_data_from_api(model_id)\n",
441
  " if data:\n",
442
  " model_type, model_name, image_url = extract_model_info(data, url)\n",
443
+ "\n",
444
+ " if any(t in model_type for t in support_types):\n",
445
+ " if model_name and image_url:\n",
446
+ " image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)\n",
447
+ " with capture.capture_output() as cap:\n",
448
+ " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'\n",
449
  " del cap\n",
450
+ " file_name = file_name or model_name\n",
451
+ " else:\n",
452
+ " clean_url = url\n",
453
+ "\n",
454
+ " \"\"\" Formatted info output \"\"\"\n",
455
+ " model_name_or_basename = file_name if not 'huggingface' in url else basename\n",
456
+ " format_output(clean_url or url, dst_dir, model_name_or_basename)\n",
457
  "\n",
 
 
 
458
  " print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\") if 'civitai' in url and not data else None\n",
459
+ " if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):\n",
460
  " print(f\"\\033[32m[Preview DL]:\\033[0m {image_file_name} - {image_url}\\n\")\n",
461
+ " # =====================\n",
462
  "\n",
463
  " # -- GDrive --\n",
464
  " if 'drive.google' in url:\n",
 
502
  "\n",
503
  " unpucking_zip_files()\n",
504
  "\n",
505
+ "# unpucking zip files\n",
506
  "def unpucking_zip_files():\n",
 
507
  " for directory in directories:\n",
508
  " for root, dirs, files in os.walk(directory):\n",
509
  " for file in files:\n",
 
516
  "\n",
517
  "''' submodels - added urls '''\n",
518
  "\n",
 
 
519
  "def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
520
  " if selection == \"none\":\n",
521
  " return []\n",
 
527
  " else:\n",
528
  " selected_models = model_dict[selection]\n",
529
  " selected_nums = map(int, num_selection.replace(',', '').split())\n",
 
530
  " for num in selected_nums:\n",
531
  " if 1 <= num <= len(model_dict):\n",
532
  " name = list(model_dict)[num - 1]\n",
533
  " selected_models.extend(model_dict[name])\n",
534
  "\n",
535
  " unique_models = list({model['name']: model for model in selected_models}.values())\n",
 
536
  " for model in unique_models:\n",
537
  " model['dst_dir'] = dst_dir\n",
538
  "\n",
539
  " return unique_models\n",
540
  "\n",
541
+ "def handle_submodels(selection, num_selection, model_dict, dst_dir, url):\n",
542
+ " submodels = add_submodels(selection, num_selection, model_dict, dst_dir)\n",
543
+ " for submodel in submodels:\n",
544
+ " if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
545
+ " continue\n",
546
+ " url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
547
+ " return url\n",
548
  "\n",
549
+ "url = handle_submodels(Model, Model_Num, model_list, models_dir, url)\n",
550
+ "url = handle_submodels(Vae, Vae_Num, vae_list, vaes_dir, url)\n",
551
+ "url = handle_submodels(controlnet, controlnet_Num, controlnet_list, control_dir, url)\n",
 
552
  "\n",
553
  "''' file.txt - added urls '''\n",
554
  "\n",
555
+ "def process_file_download(file_url, prefixes, unique_urls):\n",
 
 
556
  " files_urls = \"\"\n",
557
  "\n",
558
  " if file_url.startswith(\"http\"):\n",
 
566
  "\n",
567
  " current_tag = None\n",
568
  " for line in lines:\n",
569
+ " line = line.strip()\n",
570
  " if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
571
  " current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
572
  "\n",
573
  " urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
574
  " for url in urls:\n",
575
+ " filter_url = url.split('[')[0] # same url filter\n",
576
+ "\n",
577
+ " if url.startswith(\"http\") and filter_url not in unique_urls:\n",
578
  " files_urls += f\"{current_tag}:{url}, \"\n",
579
+ " unique_urls.add(filter_url)\n",
580
  "\n",
581
  " return files_urls\n",
582
  "\n",
 
583
  "file_urls = \"\"\n",
584
+ "unique_urls = set()\n",
585
+ "\n",
586
  "if custom_file_urls:\n",
587
  " for custom_file_url in custom_file_urls.replace(',', '').split():\n",
588
  " if not custom_file_url.endswith('.txt'):\n",
 
592
  " custom_file_url = f'{root_path}/{custom_file_url}'\n",
593
  "\n",
594
  " try:\n",
595
+ " file_urls += process_file_download(custom_file_url, prefixes, unique_urls)\n",
596
  " except FileNotFoundError:\n",
597
  " pass\n",
598
  "\n",
files_cells/notebooks/ru/launch_ru.ipynb CHANGED
@@ -61,9 +61,10 @@
61
  "ngrok_token = settings['ngrok_token']\n",
62
  "zrok_token = settings['zrok_token']\n",
63
  "commandline_arguments = settings['commandline_arguments']\n",
 
64
  "\n",
65
  "\n",
66
- "# ======================== TUNNEL ========================\n",
67
  "print('Please Wait...')\n",
68
  "\n",
69
  "def get_public_ip(version='ipv4'):\n",
@@ -76,10 +77,17 @@
76
  " except Exception as e:\n",
77
  " print(f\"Error getting public {version} address:\", e)\n",
78
  "\n",
79
- "public_ipv4 = get_public_ip(version='ipv4')\n",
 
 
 
 
 
 
 
80
  "\n",
81
  "tunnel_class = pickle.load(open(f\"{root_path}/new_tunnel\", \"rb\"), encoding=\"utf-8\")\n",
82
- "tunnel_port= 1734\n",
83
  "tunnel = tunnel_class(tunnel_port)\n",
84
  "tunnel.add_tunnel(command=\"cl tunnel --url localhost:{port}\", name=\"cl\", pattern=re.compile(r\"[\\w-]+\\.trycloudflare\\.com\"))\n",
85
  "tunnel.add_tunnel(command=\"lt --port {port}\", name=\"lt\", pattern=re.compile(r\"[\\w-]+\\.loca\\.lt\"), note=\"Password : \" + \"\\033[32m\" + public_ipv4 + \"\\033[0m\" + \" rerun cell if 404 error.\")\n",
@@ -90,17 +98,33 @@
90
  " tunnel.add_tunnel(command=\"zrok share public http://localhost:{port}/ --headless\", name=\"zrok\", pattern=re.compile(r\"[\\w-]+\\.share\\.zrok\\.io\"))\n",
91
  "\n",
92
  "clear_output()\n",
93
- "# ======================== TUNNEL ========================\n",
94
  "\n",
95
  "\n",
96
- "# automatic fixing path V2\n",
97
- "!sed -i 's|\"tagger_hf_cache_dir\": \".*\"|\"tagger_hf_cache_dir\": \"{webui_path}/models/interrogators/\"|' {webui_path}/config.json\n",
98
- "!sed -i 's|\"additional_networks_extra_lora_path\": \".*\"|\"additional_networks_extra_lora_path\": \"{webui_path}/models/Lora/\"|' {webui_path}/config.json\n",
99
- "!sed -i 's|\"ad_extra_models_dir\": \".*\"|\"ad_extra_models_dir\": \"{webui_path}/models/adetailer/\"|' {webui_path}/config.json\n",
100
- "!sed -i 's/\"sd_checkpoint_hash\": \".*\"/\"sd_checkpoint_hash\": \"\"/g; s/\"sd_model_checkpoint\": \".*\"/\"sd_model_checkpoint\": \"\"/g; s/\"sd_vae\": \".*\"/\"sd_vae\": \"None\"/g' {webui_path}/config.json\n",
101
- "# for kaggle\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  "if env == 'Kaggle':\n",
103
- " !sed -i 's/\"civitai_interface\\/NSFW content\\/value\":.*/\"civitai_interface\\/NSFW content\\/value\": false/g' {webui_path}/ui-config.json\n",
 
 
104
  "\n",
105
  "with tunnel:\n",
106
  " %cd {webui_path}\n",
@@ -111,6 +135,10 @@
111
  " if env != \"Google Colab\":\n",
112
  " commandline_arguments += f' --encrypt-pass={tunnel_port} --api'\n",
113
  "\n",
 
 
 
 
114
  " !COMMANDLINE_ARGS=\"{commandline_arguments}\" python launch.py\n",
115
  "\n",
116
  "\n",
 
61
  "ngrok_token = settings['ngrok_token']\n",
62
  "zrok_token = settings['zrok_token']\n",
63
  "commandline_arguments = settings['commandline_arguments']\n",
64
+ "change_webui = settings['change_webui']\n",
65
  "\n",
66
  "\n",
67
+ "# ======================== TUNNEL V2 ========================\n",
68
  "print('Please Wait...')\n",
69
  "\n",
70
  "def get_public_ip(version='ipv4'):\n",
 
77
  " except Exception as e:\n",
78
  " print(f\"Error getting public {version} address:\", e)\n",
79
  "\n",
80
+ "# Check if public IP is already saved, if not then get it\n",
81
+ "try:\n",
82
+ " with open(f\"{root_path}/public_ip.txt\", \"r\") as file:\n",
83
+ " public_ipv4 = file.read().strip()\n",
84
+ "except FileNotFoundError:\n",
85
+ " public_ipv4 = get_public_ip(version='ipv4')\n",
86
+ " with open(f\"{root_path}/public_ip.txt\", \"w\") as file:\n",
87
+ " file.write(public_ipv4)\n",
88
  "\n",
89
  "tunnel_class = pickle.load(open(f\"{root_path}/new_tunnel\", \"rb\"), encoding=\"utf-8\")\n",
90
+ "tunnel_port = 1734\n",
91
  "tunnel = tunnel_class(tunnel_port)\n",
92
  "tunnel.add_tunnel(command=\"cl tunnel --url localhost:{port}\", name=\"cl\", pattern=re.compile(r\"[\\w-]+\\.trycloudflare\\.com\"))\n",
93
  "tunnel.add_tunnel(command=\"lt --port {port}\", name=\"lt\", pattern=re.compile(r\"[\\w-]+\\.loca\\.lt\"), note=\"Password : \" + \"\\033[32m\" + public_ipv4 + \"\\033[0m\" + \" rerun cell if 404 error.\")\n",
 
98
  " tunnel.add_tunnel(command=\"zrok share public http://localhost:{port}/ --headless\", name=\"zrok\", pattern=re.compile(r\"[\\w-]+\\.share\\.zrok\\.io\"))\n",
99
  "\n",
100
  "clear_output()\n",
 
101
  "\n",
102
  "\n",
103
+ "# =============== Automatic Fixing Path V3 ===============\n",
104
+ "paths_to_check = [\n",
105
+ " (\"tagger_hf_cache_dir\", f\"{webui_path}/models/interrogators/\"),\n",
106
+ " (\"additional_networks_extra_lora_path\", f\"{webui_path}/models/Lora/\"),\n",
107
+ " (\"ad_extra_models_dir\", f\"{webui_path}/models/adetailer/\"),\n",
108
+ " (\"sd_checkpoint_hash\", \"\"),\n",
109
+ " (\"sd_model_checkpoint\", \"\"),\n",
110
+ " (\"sd_vae\", \"None\")\n",
111
+ "]\n",
112
+ "\n",
113
+ "config_path = f'{webui_path}/ui-config.json'\n",
114
+ "\n",
115
+ "with open(config_path, 'r') as file:\n",
116
+ " config_data = json.load(file)\n",
117
+ "\n",
118
+ "for key, value in paths_to_check:\n",
119
+ " if key in config_data and config_data[key] != value:\n",
120
+ " sed_command = f\"sed -i 's|\\\"{key}\\\": \\\".*\\\"|\\\"{key}\\\": \\\"{value}\\\"|' {config_path}\"\n",
121
+ " os.system(sed_command)\n",
122
+ "\n",
123
+ "# Additional check for Kaggle\n",
124
  "if env == 'Kaggle':\n",
125
+ " get_ipython().system('sed -i \\'s/\"civitai_interface\\\\/NSFW content\\\\/value\":.*/\"civitai_interface\\\\/NSFW content\\\\/value\": false/g\\' {webui_path}/ui-config.json')\n",
126
+ "# -------------------------------------------------------\n",
127
+ "\n",
128
  "\n",
129
  "with tunnel:\n",
130
  " %cd {webui_path}\n",
 
135
  " if env != \"Google Colab\":\n",
136
  " commandline_arguments += f' --encrypt-pass={tunnel_port} --api'\n",
137
  "\n",
138
+ " # -- FORGE --\n",
139
+ " if change_webui == 'Forge':\n",
140
+ " commandline_arguments += ' --cuda-stream --pin-shared-memory'\n",
141
+ "\n",
142
  " !COMMANDLINE_ARGS=\"{commandline_arguments}\" python launch.py\n",
143
  "\n",
144
  "\n",
files_cells/python/en/auto-cleaner_en.py CHANGED
@@ -223,7 +223,7 @@ display(HTML(CSS))
223
 
224
  # ================ AutoCleaner function ================
225
  directories = {
226
- "Images": f"{webui_path}/outputs",
227
  "Models": f"{webui_path}/models/Stable-diffusion/",
228
  "Vae": f"{webui_path}/models/VAE/",
229
  "LoRa": f"{webui_path}/models/Lora/",
 
223
 
224
  # ================ AutoCleaner function ================
225
  directories = {
226
+ "Images": f"{webui_path}/output",
227
  "Models": f"{webui_path}/models/Stable-diffusion/",
228
  "Vae": f"{webui_path}/models/VAE/",
229
  "LoRa": f"{webui_path}/models/Lora/",
files_cells/python/en/downloading_en.py CHANGED
@@ -127,10 +127,10 @@ adetailer_dir = f"{webui_path}/models/adetailer"
127
  # ================= MAIN CODE =================
128
  if not os.path.exists(webui_path):
129
  start_install = int(time.time())
130
- print("⌚ Unpacking Stable Diffusion...", end='')
131
  with capture.capture_output() as cap:
132
  aria2_command = "aria2c --console-log-level=error -c -x 16 -s 16 -k 1M"
133
- url = "https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip" if change_webui != 'Forge' else "https://huggingface.co/NagisaNao/test/resolve/main/FULL_REPO_forge.zip"
134
  get_ipython().system('{aria2_command} {url} -o repo.zip')
135
 
136
  get_ipython().system('unzip -q -o repo.zip -d {webui_path}')
@@ -243,13 +243,19 @@ model_list = {
243
 
244
  # 1-4 (fp16/cleaned)
245
  vae_list = {
246
- "1.Anime.vae": [
247
- {"url": "https://civitai.com/api/download/models/131654", "name": "Anime.vae.safetensors"},
248
- {"url": "https://civitai.com/api/download/models/131658", "name": "vae-ft-mse.vae.safetensors"}
249
- ],
250
- "2.Anything.vae": [{"url": "https://civitai.com/api/download/models/131656", "name": "Anything.vae.safetensors"}],
251
- "3.Blessed2.vae": [{"url": "https://civitai.com/api/download/models/142467", "name": "Blessed2.vae.safetensors"}],
252
- "4.ClearVae.vae": [{"url": "https://civitai.com/api/download/models/133362", "name": "ClearVae_23.vae.safetensors"}],
 
 
 
 
 
 
253
  "5.WD.vae": [{"url": "https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors", "name": "WD.vae.safetensors"}]
254
  }
255
 
@@ -323,26 +329,47 @@ prefixes = {
323
  }
324
 
325
  extension_repo = []
326
- directories = (value for key, value in prefixes.items()) # for unpucking zip files
327
  get_ipython().system('mkdir -p {" ".join(directories)}')
328
 
329
  hf_token = optional_huggingface_token if optional_huggingface_token else "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"
330
  user_header = f"\"Authorization: Bearer {hf_token}\""
331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
  ''' Get Image Preview | CivitAi '''
333
 
334
- def get_data_from_api(model_id): # get model data
 
335
  endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
336
  headers = {"Content-Type": "application/json"}
337
  try:
338
  response = requests.get(endpoint_url, headers=headers)
339
- if response.status_code == 200:
340
- return response.json()
341
  except requests.exceptions.RequestException as e:
342
  print(f"An error occurred: {e}")
343
  return None
344
 
345
  def extract_model_info(data, url):
 
346
  if 'type=' in url:
347
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
348
  model_name = data['files'][1]['name']
@@ -359,59 +386,69 @@ def extract_model_info(data, url):
359
  return model_type, model_name, image_url
360
 
361
  def gen_preview_filename(model_name, image_url):
 
362
  name = model_name.split('.')
363
  img_exts = image_url.split('.')
364
- return f"{name[0]}.preview.{img_exts[-1]}" # assigning the original image format
365
 
366
  ''' main download code '''
367
 
368
  def handle_manual(url):
369
- original_url = url
370
- url = url.split(':', 1)[1]
371
- file_name = re.search(r'\[(.*?)\]', url)
372
- file_name = file_name.group(1) if file_name else None
373
- if file_name:
374
- url = re.sub(r'\[.*?\]', '', url)
375
 
376
- for prefix, dir in prefixes.items():
377
- if original_url.startswith(f"{prefix}:"):
378
- if prefix != "extension":
379
- manual_download(url, dir, file_name=file_name)
380
- else:
381
- extension_repo.append((url, file_name))
 
 
 
 
 
 
 
 
382
 
383
  def manual_download(url, dst_dir, file_name):
384
  aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
385
  basename = url.split("/")[-1] if file_name is None else file_name
386
  header_option = f"--header={user_header}"
387
 
388
- # === CivitAi API ===
389
  support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image
390
  civitai_token = "62c0c5956b2f9defbd844d754000180b"
391
 
392
  if 'civitai' in url:
393
- url = f"{url}{'&' if '?' in url else '?'}token={civitai_token}" # some authors are pussies and they need API token...
394
- model_id = url.split('/')[-1]
395
- clean_url = url.split('?')[0]
396
 
397
  data = get_data_from_api(model_id)
398
  if data:
399
  model_type, model_name, image_url = extract_model_info(data, url)
400
- if model_name and image_url:
401
- image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)
402
- if any(types in model_type for types in support_types):
403
- with capture.capture_output() as cap: # clear shit
404
- get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}')
 
405
  del cap
406
- file_name = file_name or model_name # assigns the original file name if not specified initially
 
 
 
 
 
 
407
 
408
- """ information output """
409
- # -- wrold's best print info --
410
- print(f"\n\033[32m{'---'*45}\n\033[33mURL: \033[34m{clean_url if 'civitai' in url else url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name if not 'huggingface' in url else basename}\033[0m")
411
  print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
412
- if 'civitai' in url and data and any(types in model_type for types in support_types) and (locals().get('image_file_name') or ''):
413
  print(f"\033[32m[Preview DL]:\033[0m {image_file_name} - {image_url}\n")
414
- # ===================
415
 
416
  # -- GDrive --
417
  if 'drive.google' in url:
@@ -455,9 +492,8 @@ def download(url):
455
 
456
  unpucking_zip_files()
457
 
458
- ## unpucking zip files
459
  def unpucking_zip_files():
460
- # directories - above
461
  for directory in directories:
462
  for root, dirs, files in os.walk(directory):
463
  for file in files:
@@ -470,8 +506,6 @@ def unpucking_zip_files():
470
 
471
  ''' submodels - added urls '''
472
 
473
- submodels = []
474
-
475
  def add_submodels(selection, num_selection, model_dict, dst_dir):
476
  if selection == "none":
477
  return []
@@ -483,33 +517,32 @@ def add_submodels(selection, num_selection, model_dict, dst_dir):
483
  else:
484
  selected_models = model_dict[selection]
485
  selected_nums = map(int, num_selection.replace(',', '').split())
486
-
487
  for num in selected_nums:
488
  if 1 <= num <= len(model_dict):
489
  name = list(model_dict)[num - 1]
490
  selected_models.extend(model_dict[name])
491
 
492
  unique_models = list({model['name']: model for model in selected_models}.values())
493
-
494
  for model in unique_models:
495
  model['dst_dir'] = dst_dir
496
 
497
  return unique_models
498
 
499
- submodels += add_submodels(Model, Model_Num, model_list, models_dir) # model
500
- submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir) # vae
501
- submodels += add_submodels(controlnet, controlnet_Num, controlnet_list, control_dir) # controlnet
 
 
 
 
502
 
503
- for submodel in submodels:
504
- if not Inpainting_Model and "inpainting" in submodel['name']:
505
- continue
506
- url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
507
 
508
  ''' file.txt - added urls '''
509
 
510
- unique_urls = []
511
-
512
- def process_file_download(file_url):
513
  files_urls = ""
514
 
515
  if file_url.startswith("http"):
@@ -523,19 +556,23 @@ def process_file_download(file_url):
523
 
524
  current_tag = None
525
  for line in lines:
 
526
  if any(f'# {tag}' in line.lower() for tag in prefixes):
527
  current_tag = next((tag for tag in prefixes if tag in line.lower()))
528
 
529
  urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
530
  for url in urls:
531
- if url.startswith("http") and url not in unique_urls:
 
 
532
  files_urls += f"{current_tag}:{url}, "
533
- unique_urls.append(url)
534
 
535
  return files_urls
536
 
537
- # fix all possible errors/options and function call
538
  file_urls = ""
 
 
539
  if custom_file_urls:
540
  for custom_file_url in custom_file_urls.replace(',', '').split():
541
  if not custom_file_url.endswith('.txt'):
@@ -545,7 +582,7 @@ if custom_file_urls:
545
  custom_file_url = f'{root_path}/{custom_file_url}'
546
 
547
  try:
548
- file_urls += process_file_download(custom_file_url)
549
  except FileNotFoundError:
550
  pass
551
 
 
127
  # ================= MAIN CODE =================
128
  if not os.path.exists(webui_path):
129
  start_install = int(time.time())
130
+ print("⌚ Unpacking Stable Diffusion..." if change_webui != 'Forge' else "⌚ Unpacking Stable Diffusion (Forge)...", end='')
131
  with capture.capture_output() as cap:
132
  aria2_command = "aria2c --console-log-level=error -c -x 16 -s 16 -k 1M"
133
+ url = "https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip" if change_webui != 'Forge' else "https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO_forge.zip"
134
  get_ipython().system('{aria2_command} {url} -o repo.zip')
135
 
136
  get_ipython().system('unzip -q -o repo.zip -d {webui_path}')
 
243
 
244
  # 1-4 (fp16/cleaned)
245
  vae_list = {
246
+ ## vae broke - the author's an asshole
247
+ # "1.Anime.vae": [
248
+ # {"url": "https://civitai.com/api/download/models/131654", "name": "Anime.vae.safetensors"},
249
+ # {"url": "https://civitai.com/api/download/models/131658", "name": "vae-ft-mse.vae.safetensors"}
250
+ # ],
251
+ # "2.Anything.vae": [{"url": "https://civitai.com/api/download/models/131656", "name": "Anything.vae.safetensors"}],
252
+ # "3.Blessed2.vae": [{"url": "https://civitai.com/api/download/models/142467", "name": "Blessed2.vae.safetensors"}],
253
+ # "4.ClearVae.vae": [{"url": "https://civitai.com/api/download/models/133362", "name": "ClearVae_23.vae.safetensors"}],
254
+
255
+ "1.Anime.vae": [{"url": "https://civitai.com/api/download/models/311162", "name": "vae-ft-mse-840000-ema-pruned.vae.safetensors"}],
256
+ "2.Anything.vae": [{"url": "https://civitai.com/api/download/models/119279", "name": "Anything.vae.safetensors"}],
257
+ "3.Blessed2.vae": [{"url": "https://huggingface.co/NoCrypt/blessed_vae/resolve/main/blessed2.vae.pt", "name": "Blessed2.vae.safetensors"}],
258
+ "4.ClearVae.vae": [{"url": "https://civitai.com/api/download/models/88156", "name": "ClearVae_23.vae.safetensors"}],
259
  "5.WD.vae": [{"url": "https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors", "name": "WD.vae.safetensors"}]
260
  }
261
 
 
329
  }
330
 
331
  extension_repo = []
332
+ directories = [value for key, value in prefixes.items()] # for unpucking zip files
333
  get_ipython().system('mkdir -p {" ".join(directories)}')
334
 
335
  hf_token = optional_huggingface_token if optional_huggingface_token else "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"
336
  user_header = f"\"Authorization: Bearer {hf_token}\""
337
 
338
+ ''' Formatted Info Output '''
339
+
340
+ from math import floor
341
+
342
+ def center_text(text, terminal_width=45):
343
+ text_length = len(text)
344
+ left_padding = floor((terminal_width - text_length) / 2)
345
+ right_padding = terminal_width - text_length - left_padding
346
+ return f"\033[1m\033[36m{' ' * left_padding}{text}{' ' * right_padding}\033[0m\033[32m"
347
+
348
+ def format_output(url, dst_dir, file_name):
349
+ info = f"[{file_name.split('.')[0]}]"
350
+ info = center_text(info)
351
+
352
+ print(f"\n\033[32m{'---'*20}]{info}[{'---'*20}")
353
+ print(f"\033[33mURL: \033[34m{url}")
354
+ print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
355
+ print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
356
+
357
  ''' Get Image Preview | CivitAi '''
358
 
359
+ def get_data_from_api(model_id):
360
+ """Fetch model data from the API"""
361
  endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
362
  headers = {"Content-Type": "application/json"}
363
  try:
364
  response = requests.get(endpoint_url, headers=headers)
365
+ response.raise_for_status()
366
+ return response.json()
367
  except requests.exceptions.RequestException as e:
368
  print(f"An error occurred: {e}")
369
  return None
370
 
371
  def extract_model_info(data, url):
372
+ """Extract model information based on URL"""
373
  if 'type=' in url:
374
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
375
  model_name = data['files'][1]['name']
 
386
  return model_type, model_name, image_url
387
 
388
  def gen_preview_filename(model_name, image_url):
389
+ """Generate a preview filename"""
390
  name = model_name.split('.')
391
  img_exts = image_url.split('.')
392
+ return f"{name[0]}.preview.{img_exts[-1]}"
393
 
394
  ''' main download code '''
395
 
396
  def handle_manual(url):
397
+ url_parts = url.split(':', 1)
398
+ prefix = url_parts[0]
399
+ path = url_parts[1]
 
 
 
400
 
401
+ file_name_match = re.search(r'\[(.*?)\]', path)
402
+ file_name = file_name_match.group(1) if file_name_match else None
403
+ if file_name:
404
+ path = re.sub(r'\[.*?\]', '', path)
405
+
406
+ if prefix in prefixes:
407
+ dir = prefixes[prefix]
408
+ if prefix != "extension":
409
+ try:
410
+ manual_download(path, dir, file_name=file_name)
411
+ except Exception as e:
412
+ print(f"Error downloading file: {e}")
413
+ else:
414
+ extension_repo.append((path, file_name))
415
 
416
  def manual_download(url, dst_dir, file_name):
417
  aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
418
  basename = url.split("/")[-1] if file_name is None else file_name
419
  header_option = f"--header={user_header}"
420
 
421
+ # ==== CivitAi API+ ====
422
  support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image
423
  civitai_token = "62c0c5956b2f9defbd844d754000180b"
424
 
425
  if 'civitai' in url:
426
+ url = f"{url}{'&' if '?' in url else '?'}token={civitai_token}"
427
+ model_id = url.split('/')[-1].split('?')[0]
428
+ clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token
429
 
430
  data = get_data_from_api(model_id)
431
  if data:
432
  model_type, model_name, image_url = extract_model_info(data, url)
433
+
434
+ if any(t in model_type for t in support_types):
435
+ if model_name and image_url:
436
+ image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)
437
+ with capture.capture_output() as cap:
438
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'")
439
  del cap
440
+ file_name = file_name or model_name
441
+ else:
442
+ clean_url = url
443
+
444
+ """ Formatted info output """
445
+ model_name_or_basename = file_name if not 'huggingface' in url else basename
446
+ format_output(clean_url or url, dst_dir, model_name_or_basename)
447
 
 
 
 
448
  print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
449
+ if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):
450
  print(f"\033[32m[Preview DL]:\033[0m {image_file_name} - {image_url}\n")
451
+ # =====================
452
 
453
  # -- GDrive --
454
  if 'drive.google' in url:
 
492
 
493
  unpucking_zip_files()
494
 
495
+ # unpucking zip files
496
  def unpucking_zip_files():
 
497
  for directory in directories:
498
  for root, dirs, files in os.walk(directory):
499
  for file in files:
 
506
 
507
  ''' submodels - added urls '''
508
 
 
 
509
  def add_submodels(selection, num_selection, model_dict, dst_dir):
510
  if selection == "none":
511
  return []
 
517
  else:
518
  selected_models = model_dict[selection]
519
  selected_nums = map(int, num_selection.replace(',', '').split())
 
520
  for num in selected_nums:
521
  if 1 <= num <= len(model_dict):
522
  name = list(model_dict)[num - 1]
523
  selected_models.extend(model_dict[name])
524
 
525
  unique_models = list({model['name']: model for model in selected_models}.values())
 
526
  for model in unique_models:
527
  model['dst_dir'] = dst_dir
528
 
529
  return unique_models
530
 
531
+ def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
532
+ submodels = add_submodels(selection, num_selection, model_dict, dst_dir)
533
+ for submodel in submodels:
534
+ if not Inpainting_Model and "inpainting" in submodel['name']:
535
+ continue
536
+ url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
537
+ return url
538
 
539
+ url = handle_submodels(Model, Model_Num, model_list, models_dir, url)
540
+ url = handle_submodels(Vae, Vae_Num, vae_list, vaes_dir, url)
541
+ url = handle_submodels(controlnet, controlnet_Num, controlnet_list, control_dir, url)
 
542
 
543
  ''' file.txt - added urls '''
544
 
545
+ def process_file_download(file_url, prefixes, unique_urls):
 
 
546
  files_urls = ""
547
 
548
  if file_url.startswith("http"):
 
556
 
557
  current_tag = None
558
  for line in lines:
559
+ line = line.strip()
560
  if any(f'# {tag}' in line.lower() for tag in prefixes):
561
  current_tag = next((tag for tag in prefixes if tag in line.lower()))
562
 
563
  urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
564
  for url in urls:
565
+ filter_url = url.split('[')[0] # same url filter
566
+
567
+ if url.startswith("http") and filter_url not in unique_urls:
568
  files_urls += f"{current_tag}:{url}, "
569
+ unique_urls.add(filter_url)
570
 
571
  return files_urls
572
 
 
573
  file_urls = ""
574
+ unique_urls = set()
575
+
576
  if custom_file_urls:
577
  for custom_file_url in custom_file_urls.replace(',', '').split():
578
  if not custom_file_url.endswith('.txt'):
 
582
  custom_file_url = f'{root_path}/{custom_file_url}'
583
 
584
  try:
585
+ file_urls += process_file_download(custom_file_url, prefixes, unique_urls)
586
  except FileNotFoundError:
587
  pass
588
 
files_cells/python/en/launch_en.py CHANGED
@@ -12,7 +12,7 @@ from IPython.display import clear_output
12
 
13
  # ================= DETECT ENV =================
14
  def detect_environment():
15
- free_plan = (os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024. ** 3) <= 20)
16
  environments = {
17
  'COLAB_GPU': ('Google Colab', "/root" if free_plan else "/content"),
18
  'KAGGLE_URL_BASE': ('Kaggle', "/kaggle/working/content")
@@ -37,9 +37,10 @@ settings = load_settings()
37
  ngrok_token = settings['ngrok_token']
38
  zrok_token = settings['zrok_token']
39
  commandline_arguments = settings['commandline_arguments']
 
40
 
41
 
42
- # ======================== TUNNEL ========================
43
  print('Please Wait...')
44
 
45
  def get_public_ip(version='ipv4'):
@@ -52,10 +53,17 @@ def get_public_ip(version='ipv4'):
52
  except Exception as e:
53
  print(f"Error getting public {version} address:", e)
54
 
55
- public_ipv4 = get_public_ip(version='ipv4')
 
 
 
 
 
 
 
56
 
57
  tunnel_class = pickle.load(open(f"{root_path}/new_tunnel", "rb"), encoding="utf-8")
58
- tunnel_port= 1734
59
  tunnel = tunnel_class(tunnel_port)
60
  tunnel.add_tunnel(command="cl tunnel --url localhost:{port}", name="cl", pattern=re.compile(r"[\w-]+\.trycloudflare\.com"))
61
  tunnel.add_tunnel(command="lt --port {port}", name="lt", pattern=re.compile(r"[\w-]+\.loca\.lt"), note="Password : " + "\033[32m" + public_ipv4 + "\033[0m" + " rerun cell if 404 error.")
@@ -66,17 +74,32 @@ if zrok_token:
66
  tunnel.add_tunnel(command="zrok share public http://localhost:{port}/ --headless", name="zrok", pattern=re.compile(r"[\w-]+\.share\.zrok\.io"))
67
 
68
  clear_output()
69
- # ======================== TUNNEL ========================
70
 
71
 
72
- # automatic fixing path V2
73
- get_ipython().system('sed -i \'s|"tagger_hf_cache_dir": ".*"|"tagger_hf_cache_dir": "{webui_path}/models/interrogators/"|\' {webui_path}/config.json')
74
- get_ipython().system('sed -i \'s|"additional_networks_extra_lora_path": ".*"|"additional_networks_extra_lora_path": "{webui_path}/models/Lora/"|\' {webui_path}/config.json')
75
- get_ipython().system('sed -i \'s|"ad_extra_models_dir": ".*"|"ad_extra_models_dir": "{webui_path}/models/adetailer/"|\' {webui_path}/config.json')
76
- get_ipython().system('sed -i \'s/"sd_checkpoint_hash": ".*"/"sd_checkpoint_hash": ""/g; s/"sd_model_checkpoint": ".*"/"sd_model_checkpoint": ""/g; s/"sd_vae": ".*"/"sd_vae": "None"/g\' {webui_path}/config.json')
77
- # for kaggle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  if env == 'Kaggle':
79
  get_ipython().system('sed -i \'s/"civitai_interface\\/NSFW content\\/value":.*/"civitai_interface\\/NSFW content\\/value": false/g\' {webui_path}/ui-config.json')
 
80
 
81
 
82
  with tunnel:
@@ -88,6 +111,10 @@ with tunnel:
88
  if env != "Google Colab":
89
  commandline_arguments += f' --encrypt-pass={tunnel_port} --api'
90
 
 
 
 
 
91
  get_ipython().system('COMMANDLINE_ARGS="{commandline_arguments}" python launch.py')
92
 
93
 
 
12
 
13
  # ================= DETECT ENV =================
14
  def detect_environment():
15
+ free_plan = (os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024 ** 3) <= 20)
16
  environments = {
17
  'COLAB_GPU': ('Google Colab', "/root" if free_plan else "/content"),
18
  'KAGGLE_URL_BASE': ('Kaggle', "/kaggle/working/content")
 
37
  ngrok_token = settings['ngrok_token']
38
  zrok_token = settings['zrok_token']
39
  commandline_arguments = settings['commandline_arguments']
40
+ change_webui = settings['change_webui']
41
 
42
 
43
+ # ======================== TUNNEL V2 ========================
44
  print('Please Wait...')
45
 
46
  def get_public_ip(version='ipv4'):
 
53
  except Exception as e:
54
  print(f"Error getting public {version} address:", e)
55
 
56
+ # Check if public IP is already saved, if not then get it
57
+ try:
58
+ with open(f"{root_path}/public_ip.txt", "r") as file:
59
+ public_ipv4 = file.read().strip()
60
+ except FileNotFoundError:
61
+ public_ipv4 = get_public_ip(version='ipv4')
62
+ with open(f"{root_path}/public_ip.txt", "w") as file:
63
+ file.write(public_ipv4)
64
 
65
  tunnel_class = pickle.load(open(f"{root_path}/new_tunnel", "rb"), encoding="utf-8")
66
+ tunnel_port = 1734
67
  tunnel = tunnel_class(tunnel_port)
68
  tunnel.add_tunnel(command="cl tunnel --url localhost:{port}", name="cl", pattern=re.compile(r"[\w-]+\.trycloudflare\.com"))
69
  tunnel.add_tunnel(command="lt --port {port}", name="lt", pattern=re.compile(r"[\w-]+\.loca\.lt"), note="Password : " + "\033[32m" + public_ipv4 + "\033[0m" + " rerun cell if 404 error.")
 
74
  tunnel.add_tunnel(command="zrok share public http://localhost:{port}/ --headless", name="zrok", pattern=re.compile(r"[\w-]+\.share\.zrok\.io"))
75
 
76
  clear_output()
 
77
 
78
 
79
+ # =============== Automatic Fixing Path V3 ===============
80
+ paths_to_check = [
81
+ ("tagger_hf_cache_dir", f"{webui_path}/models/interrogators/"),
82
+ ("additional_networks_extra_lora_path", f"{webui_path}/models/Lora/"),
83
+ ("ad_extra_models_dir", f"{webui_path}/models/adetailer/"),
84
+ ("sd_checkpoint_hash", ""),
85
+ ("sd_model_checkpoint", ""),
86
+ ("sd_vae", "None")
87
+ ]
88
+
89
+ config_path = f'{webui_path}/ui-config.json'
90
+
91
+ with open(config_path, 'r') as file:
92
+ config_data = json.load(file)
93
+
94
+ for key, value in paths_to_check:
95
+ if key in config_data and config_data[key] != value:
96
+ sed_command = f"sed -i 's|\"{key}\": \".*\"|\"{key}\": \"{value}\"|' {config_path}"
97
+ os.system(sed_command)
98
+
99
+ # Additional check for Kaggle
100
  if env == 'Kaggle':
101
  get_ipython().system('sed -i \'s/"civitai_interface\\/NSFW content\\/value":.*/"civitai_interface\\/NSFW content\\/value": false/g\' {webui_path}/ui-config.json')
102
+ # -------------------------------------------------------
103
 
104
 
105
  with tunnel:
 
111
  if env != "Google Colab":
112
  commandline_arguments += f' --encrypt-pass={tunnel_port} --api'
113
 
114
+ # -- FORGE --
115
+ if change_webui == 'Forge':
116
+ commandline_arguments += ' --cuda-stream --pin-shared-memory'
117
+
118
  get_ipython().system('COMMANDLINE_ARGS="{commandline_arguments}" python launch.py')
119
 
120
 
files_cells/python/ru/auto-cleaner_ru.py CHANGED
@@ -223,7 +223,7 @@ display(HTML(CSS_AC))
223
 
224
  # ================ AutoCleaner function ================
225
  directories = {
226
- "Изображения": f"{webui_path}/outputs",
227
  "Модели": f"{webui_path}/models/Stable-diffusion/",
228
  "Vae": f"{webui_path}/models/VAE/",
229
  "LoRa": f"{webui_path}/models/Lora/",
 
223
 
224
  # ================ AutoCleaner function ================
225
  directories = {
226
+ "Изображения": f"{webui_path}/output",
227
  "Модели": f"{webui_path}/models/Stable-diffusion/",
228
  "Vae": f"{webui_path}/models/VAE/",
229
  "LoRa": f"{webui_path}/models/Lora/",
files_cells/python/ru/downloading_ru.py CHANGED
@@ -130,7 +130,7 @@ if not os.path.exists(webui_path):
130
  print("⌚ Распаковка Stable Diffusion..." if change_webui != 'Forge' else "⌚ Распаковка Stable Diffusion (Forge)...", end='')
131
  with capture.capture_output() as cap:
132
  aria2_command = "aria2c --console-log-level=error -c -x 16 -s 16 -k 1M"
133
- url = "https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip" if change_webui != 'Forge' else "https://huggingface.co/NagisaNao/test/resolve/main/FULL_REPO_forge.zip"
134
  get_ipython().system('{aria2_command} {url} -o repo.zip')
135
 
136
  get_ipython().system('unzip -q -o repo.zip -d {webui_path}')
@@ -243,13 +243,19 @@ model_list = {
243
 
244
  # 1-4 (fp16/cleaned)
245
  vae_list = {
246
- "1.Anime.vae": [
247
- {"url": "https://civitai.com/api/download/models/131654", "name": "Anime.vae.safetensors"},
248
- {"url": "https://civitai.com/api/download/models/131658", "name": "vae-ft-mse.vae.safetensors"}
249
- ],
250
- "2.Anything.vae": [{"url": "https://civitai.com/api/download/models/131656", "name": "Anything.vae.safetensors"}],
251
- "3.Blessed2.vae": [{"url": "https://civitai.com/api/download/models/142467", "name": "Blessed2.vae.safetensors"}],
252
- "4.ClearVae.vae": [{"url": "https://civitai.com/api/download/models/133362", "name": "ClearVae_23.vae.safetensors"}],
 
 
 
 
 
 
253
  "5.WD.vae": [{"url": "https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors", "name": "WD.vae.safetensors"}]
254
  }
255
 
@@ -323,26 +329,47 @@ prefixes = {
323
  }
324
 
325
  extension_repo = []
326
- directories = (value for key, value in prefixes.items()) # for unpucking zip files
327
  get_ipython().system('mkdir -p {" ".join(directories)}')
328
 
329
  hf_token = optional_huggingface_token if optional_huggingface_token else "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"
330
  user_header = f"\"Authorization: Bearer {hf_token}\""
331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
  ''' Get Image Preview | CivitAi '''
333
 
334
- def get_data_from_api(model_id): # get model data
 
335
  endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
336
  headers = {"Content-Type": "application/json"}
337
  try:
338
  response = requests.get(endpoint_url, headers=headers)
339
- if response.status_code == 200:
340
- return response.json()
341
  except requests.exceptions.RequestException as e:
342
  print(f"An error occurred: {e}")
343
  return None
344
 
345
  def extract_model_info(data, url):
 
346
  if 'type=' in url:
347
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
348
  model_name = data['files'][1]['name']
@@ -359,59 +386,69 @@ def extract_model_info(data, url):
359
  return model_type, model_name, image_url
360
 
361
  def gen_preview_filename(model_name, image_url):
 
362
  name = model_name.split('.')
363
  img_exts = image_url.split('.')
364
- return f"{name[0]}.preview.{img_exts[-1]}" # assigning the original image format
365
 
366
  ''' main download code '''
367
 
368
  def handle_manual(url):
369
- original_url = url
370
- url = url.split(':', 1)[1]
371
- file_name = re.search(r'\[(.*?)\]', url)
372
- file_name = file_name.group(1) if file_name else None
373
- if file_name:
374
- url = re.sub(r'\[.*?\]', '', url)
375
 
376
- for prefix, dir in prefixes.items():
377
- if original_url.startswith(f"{prefix}:"):
378
- if prefix != "extension":
379
- manual_download(url, dir, file_name=file_name)
380
- else:
381
- extension_repo.append((url, file_name))
 
 
 
 
 
 
 
 
382
 
383
  def manual_download(url, dst_dir, file_name):
384
  aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
385
  basename = url.split("/")[-1] if file_name is None else file_name
386
  header_option = f"--header={user_header}"
387
 
388
- # === CivitAi API ===
389
  support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image
390
  civitai_token = "62c0c5956b2f9defbd844d754000180b"
391
 
392
  if 'civitai' in url:
393
- url = f"{url}{'&' if '?' in url else '?'}token={civitai_token}" # some authors are pussies and they need API token...
394
- model_id = url.split('/')[-1]
395
- clean_url = url.split('?')[0]
396
 
397
  data = get_data_from_api(model_id)
398
  if data:
399
  model_type, model_name, image_url = extract_model_info(data, url)
400
- if model_name and image_url:
401
- image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)
402
- if any(types in model_type for types in support_types):
403
- with capture.capture_output() as cap: # clear shit
404
- get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}')
 
405
  del cap
406
- file_name = file_name or model_name # assigns the original file name if not specified initially
 
 
 
 
 
 
407
 
408
- """ information output """
409
- # -- wrold's best print info --
410
- print(f"\n\033[32m{'---'*45}\n\033[33mURL: \033[34m{clean_url if 'civitai' in url else url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name if not 'huggingface' in url else basename}\033[0m")
411
  print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
412
- if 'civitai' in url and data and any(types in model_type for types in support_types) and (locals().get('image_file_name') or ''):
413
  print(f"\033[32m[Preview DL]:\033[0m {image_file_name} - {image_url}\n")
414
- # ===================
415
 
416
  # -- GDrive --
417
  if 'drive.google' in url:
@@ -455,9 +492,8 @@ def download(url):
455
 
456
  unpucking_zip_files()
457
 
458
- ## unpucking zip files
459
  def unpucking_zip_files():
460
- # directories - above
461
  for directory in directories:
462
  for root, dirs, files in os.walk(directory):
463
  for file in files:
@@ -470,8 +506,6 @@ def unpucking_zip_files():
470
 
471
  ''' submodels - added urls '''
472
 
473
- submodels = []
474
-
475
  def add_submodels(selection, num_selection, model_dict, dst_dir):
476
  if selection == "none":
477
  return []
@@ -483,33 +517,32 @@ def add_submodels(selection, num_selection, model_dict, dst_dir):
483
  else:
484
  selected_models = model_dict[selection]
485
  selected_nums = map(int, num_selection.replace(',', '').split())
486
-
487
  for num in selected_nums:
488
  if 1 <= num <= len(model_dict):
489
  name = list(model_dict)[num - 1]
490
  selected_models.extend(model_dict[name])
491
 
492
  unique_models = list({model['name']: model for model in selected_models}.values())
493
-
494
  for model in unique_models:
495
  model['dst_dir'] = dst_dir
496
 
497
  return unique_models
498
 
499
- submodels += add_submodels(Model, Model_Num, model_list, models_dir) # model
500
- submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir) # vae
501
- submodels += add_submodels(controlnet, controlnet_Num, controlnet_list, control_dir) # controlnet
 
 
 
 
502
 
503
- for submodel in submodels:
504
- if not Inpainting_Model and "inpainting" in submodel['name']:
505
- continue
506
- url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
507
 
508
  ''' file.txt - added urls '''
509
 
510
- unique_urls = []
511
-
512
- def process_file_download(file_url):
513
  files_urls = ""
514
 
515
  if file_url.startswith("http"):
@@ -523,19 +556,23 @@ def process_file_download(file_url):
523
 
524
  current_tag = None
525
  for line in lines:
 
526
  if any(f'# {tag}' in line.lower() for tag in prefixes):
527
  current_tag = next((tag for tag in prefixes if tag in line.lower()))
528
 
529
  urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
530
  for url in urls:
531
- if url.startswith("http") and url not in unique_urls:
 
 
532
  files_urls += f"{current_tag}:{url}, "
533
- unique_urls.append(url)
534
 
535
  return files_urls
536
 
537
- # fix all possible errors/options and function call
538
  file_urls = ""
 
 
539
  if custom_file_urls:
540
  for custom_file_url in custom_file_urls.replace(',', '').split():
541
  if not custom_file_url.endswith('.txt'):
@@ -545,7 +582,7 @@ if custom_file_urls:
545
  custom_file_url = f'{root_path}/{custom_file_url}'
546
 
547
  try:
548
- file_urls += process_file_download(custom_file_url)
549
  except FileNotFoundError:
550
  pass
551
 
 
130
  print("⌚ Распаковка Stable Diffusion..." if change_webui != 'Forge' else "⌚ Распаковка Stable Diffusion (Forge)...", end='')
131
  with capture.capture_output() as cap:
132
  aria2_command = "aria2c --console-log-level=error -c -x 16 -s 16 -k 1M"
133
+ url = "https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO.zip" if change_webui != 'Forge' else "https://huggingface.co/NagisaNao/fast_repo/resolve/main/FULL_REPO_forge.zip"
134
  get_ipython().system('{aria2_command} {url} -o repo.zip')
135
 
136
  get_ipython().system('unzip -q -o repo.zip -d {webui_path}')
 
243
 
244
  # 1-4 (fp16/cleaned)
245
  vae_list = {
246
+ ## vae broke - the author's an asshole
247
+ # "1.Anime.vae": [
248
+ # {"url": "https://civitai.com/api/download/models/131654", "name": "Anime.vae.safetensors"},
249
+ # {"url": "https://civitai.com/api/download/models/131658", "name": "vae-ft-mse.vae.safetensors"}
250
+ # ],
251
+ # "2.Anything.vae": [{"url": "https://civitai.com/api/download/models/131656", "name": "Anything.vae.safetensors"}],
252
+ # "3.Blessed2.vae": [{"url": "https://civitai.com/api/download/models/142467", "name": "Blessed2.vae.safetensors"}],
253
+ # "4.ClearVae.vae": [{"url": "https://civitai.com/api/download/models/133362", "name": "ClearVae_23.vae.safetensors"}],
254
+
255
+ "1.Anime.vae": [{"url": "https://civitai.com/api/download/models/311162", "name": "vae-ft-mse-840000-ema-pruned.vae.safetensors"}],
256
+ "2.Anything.vae": [{"url": "https://civitai.com/api/download/models/119279", "name": "Anything.vae.safetensors"}],
257
+ "3.Blessed2.vae": [{"url": "https://huggingface.co/NoCrypt/blessed_vae/resolve/main/blessed2.vae.pt", "name": "Blessed2.vae.safetensors"}],
258
+ "4.ClearVae.vae": [{"url": "https://civitai.com/api/download/models/88156", "name": "ClearVae_23.vae.safetensors"}],
259
  "5.WD.vae": [{"url": "https://huggingface.co/NoCrypt/resources/resolve/main/VAE/wd.vae.safetensors", "name": "WD.vae.safetensors"}]
260
  }
261
 
 
329
  }
330
 
331
  extension_repo = []
332
+ directories = [value for key, value in prefixes.items()] # for unpucking zip files
333
  get_ipython().system('mkdir -p {" ".join(directories)}')
334
 
335
  hf_token = optional_huggingface_token if optional_huggingface_token else "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"
336
  user_header = f"\"Authorization: Bearer {hf_token}\""
337
 
338
+ ''' Formatted Info Output '''
339
+
340
+ from math import floor
341
+
342
+ def center_text(text, terminal_width=45):
343
+ text_length = len(text)
344
+ left_padding = floor((terminal_width - text_length) / 2)
345
+ right_padding = terminal_width - text_length - left_padding
346
+ return f"\033[1m\033[36m{' ' * left_padding}{text}{' ' * right_padding}\033[0m\033[32m"
347
+
348
+ def format_output(url, dst_dir, file_name):
349
+ info = f"[{file_name.split('.')[0]}]"
350
+ info = center_text(info)
351
+
352
+ print(f"\n\033[32m{'---'*20}]{info}[{'---'*20}")
353
+ print(f"\033[33mURL: \033[34m{url}")
354
+ print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
355
+ print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
356
+
357
  ''' Get Image Preview | CivitAi '''
358
 
359
+ def get_data_from_api(model_id):
360
+ """Fetch model data from the API"""
361
  endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_id}"
362
  headers = {"Content-Type": "application/json"}
363
  try:
364
  response = requests.get(endpoint_url, headers=headers)
365
+ response.raise_for_status()
366
+ return response.json()
367
  except requests.exceptions.RequestException as e:
368
  print(f"An error occurred: {e}")
369
  return None
370
 
371
  def extract_model_info(data, url):
372
+ """Extract model information based on URL"""
373
  if 'type=' in url:
374
  model_type = parse_qs(urlparse(url).query).get('type', [''])[0]
375
  model_name = data['files'][1]['name']
 
386
  return model_type, model_name, image_url
387
 
388
  def gen_preview_filename(model_name, image_url):
389
+ """Generate a preview filename"""
390
  name = model_name.split('.')
391
  img_exts = image_url.split('.')
392
+ return f"{name[0]}.preview.{img_exts[-1]}"
393
 
394
  ''' main download code '''
395
 
396
  def handle_manual(url):
397
+ url_parts = url.split(':', 1)
398
+ prefix = url_parts[0]
399
+ path = url_parts[1]
 
 
 
400
 
401
+ file_name_match = re.search(r'\[(.*?)\]', path)
402
+ file_name = file_name_match.group(1) if file_name_match else None
403
+ if file_name:
404
+ path = re.sub(r'\[.*?\]', '', path)
405
+
406
+ if prefix in prefixes:
407
+ dir = prefixes[prefix]
408
+ if prefix != "extension":
409
+ try:
410
+ manual_download(path, dir, file_name=file_name)
411
+ except Exception as e:
412
+ print(f"Error downloading file: {e}")
413
+ else:
414
+ extension_repo.append((path, file_name))
415
 
416
  def manual_download(url, dst_dir, file_name):
417
  aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
418
  basename = url.split("/")[-1] if file_name is None else file_name
419
  header_option = f"--header={user_header}"
420
 
421
+ # ==== CivitAi API+ ====
422
  support_types = ('Checkpoint', 'Model', 'TextualInversion', 'LORA') # for dl preview image
423
  civitai_token = "62c0c5956b2f9defbd844d754000180b"
424
 
425
  if 'civitai' in url:
426
+ url = f"{url}{'&' if '?' in url else '?'}token={civitai_token}"
427
+ model_id = url.split('/')[-1].split('?')[0]
428
+ clean_url = re.sub(r'[?&]token=[^&]*', '', url) # hide token
429
 
430
  data = get_data_from_api(model_id)
431
  if data:
432
  model_type, model_name, image_url = extract_model_info(data, url)
433
+
434
+ if any(t in model_type for t in support_types):
435
+ if model_name and image_url:
436
+ image_file_name = gen_preview_filename(model_name if not file_name else file_name, image_url)
437
+ with capture.capture_output() as cap:
438
+ get_ipython().system("aria2c {aria2_args} -d {dst_dir} -o {image_file_name} '{image_url}'")
439
  del cap
440
+ file_name = file_name or model_name
441
+ else:
442
+ clean_url = url
443
+
444
+ """ Formatted info output """
445
+ model_name_or_basename = file_name if not 'huggingface' in url else basename
446
+ format_output(clean_url or url, dst_dir, model_name_or_basename)
447
 
 
 
 
448
  print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n") if 'civitai' in url and not data else None
449
+ if 'civitai' in url and data and any(t in model_type for t in support_types) and (locals().get('image_file_name') or ''):
450
  print(f"\033[32m[Preview DL]:\033[0m {image_file_name} - {image_url}\n")
451
+ # =====================
452
 
453
  # -- GDrive --
454
  if 'drive.google' in url:
 
492
 
493
  unpucking_zip_files()
494
 
495
+ # unpucking zip files
496
  def unpucking_zip_files():
 
497
  for directory in directories:
498
  for root, dirs, files in os.walk(directory):
499
  for file in files:
 
506
 
507
  ''' submodels - added urls '''
508
 
 
 
509
  def add_submodels(selection, num_selection, model_dict, dst_dir):
510
  if selection == "none":
511
  return []
 
517
  else:
518
  selected_models = model_dict[selection]
519
  selected_nums = map(int, num_selection.replace(',', '').split())
 
520
  for num in selected_nums:
521
  if 1 <= num <= len(model_dict):
522
  name = list(model_dict)[num - 1]
523
  selected_models.extend(model_dict[name])
524
 
525
  unique_models = list({model['name']: model for model in selected_models}.values())
 
526
  for model in unique_models:
527
  model['dst_dir'] = dst_dir
528
 
529
  return unique_models
530
 
531
+ def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
532
+ submodels = add_submodels(selection, num_selection, model_dict, dst_dir)
533
+ for submodel in submodels:
534
+ if not Inpainting_Model and "inpainting" in submodel['name']:
535
+ continue
536
+ url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
537
+ return url
538
 
539
+ url = handle_submodels(Model, Model_Num, model_list, models_dir, url)
540
+ url = handle_submodels(Vae, Vae_Num, vae_list, vaes_dir, url)
541
+ url = handle_submodels(controlnet, controlnet_Num, controlnet_list, control_dir, url)
 
542
 
543
  ''' file.txt - added urls '''
544
 
545
+ def process_file_download(file_url, prefixes, unique_urls):
 
 
546
  files_urls = ""
547
 
548
  if file_url.startswith("http"):
 
556
 
557
  current_tag = None
558
  for line in lines:
559
+ line = line.strip()
560
  if any(f'# {tag}' in line.lower() for tag in prefixes):
561
  current_tag = next((tag for tag in prefixes if tag in line.lower()))
562
 
563
  urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
564
  for url in urls:
565
+ filter_url = url.split('[')[0] # same url filter
566
+
567
+ if url.startswith("http") and filter_url not in unique_urls:
568
  files_urls += f"{current_tag}:{url}, "
569
+ unique_urls.add(filter_url)
570
 
571
  return files_urls
572
 
 
573
  file_urls = ""
574
+ unique_urls = set()
575
+
576
  if custom_file_urls:
577
  for custom_file_url in custom_file_urls.replace(',', '').split():
578
  if not custom_file_url.endswith('.txt'):
 
582
  custom_file_url = f'{root_path}/{custom_file_url}'
583
 
584
  try:
585
+ file_urls += process_file_download(custom_file_url, prefixes, unique_urls)
586
  except FileNotFoundError:
587
  pass
588
 
files_cells/python/ru/launch_ru.py CHANGED
@@ -12,7 +12,7 @@ from IPython.display import clear_output
12
 
13
  # ================= DETECT ENV =================
14
  def detect_environment():
15
- free_plan = (os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024. ** 3) <= 20)
16
  environments = {
17
  'COLAB_GPU': ('Google Colab', "/root" if free_plan else "/content"),
18
  'KAGGLE_URL_BASE': ('Kaggle', "/kaggle/working/content")
@@ -37,9 +37,10 @@ settings = load_settings()
37
  ngrok_token = settings['ngrok_token']
38
  zrok_token = settings['zrok_token']
39
  commandline_arguments = settings['commandline_arguments']
 
40
 
41
 
42
- # ======================== TUNNEL ========================
43
  print('Please Wait...')
44
 
45
  def get_public_ip(version='ipv4'):
@@ -52,10 +53,17 @@ def get_public_ip(version='ipv4'):
52
  except Exception as e:
53
  print(f"Error getting public {version} address:", e)
54
 
55
- public_ipv4 = get_public_ip(version='ipv4')
 
 
 
 
 
 
 
56
 
57
  tunnel_class = pickle.load(open(f"{root_path}/new_tunnel", "rb"), encoding="utf-8")
58
- tunnel_port= 1734
59
  tunnel = tunnel_class(tunnel_port)
60
  tunnel.add_tunnel(command="cl tunnel --url localhost:{port}", name="cl", pattern=re.compile(r"[\w-]+\.trycloudflare\.com"))
61
  tunnel.add_tunnel(command="lt --port {port}", name="lt", pattern=re.compile(r"[\w-]+\.loca\.lt"), note="Password : " + "\033[32m" + public_ipv4 + "\033[0m" + " rerun cell if 404 error.")
@@ -66,17 +74,33 @@ if zrok_token:
66
  tunnel.add_tunnel(command="zrok share public http://localhost:{port}/ --headless", name="zrok", pattern=re.compile(r"[\w-]+\.share\.zrok\.io"))
67
 
68
  clear_output()
69
- # ======================== TUNNEL ========================
70
 
71
 
72
- # automatic fixing path V2
73
- get_ipython().system('sed -i \'s|"tagger_hf_cache_dir": ".*"|"tagger_hf_cache_dir": "{webui_path}/models/interrogators/"|\' {webui_path}/config.json')
74
- get_ipython().system('sed -i \'s|"additional_networks_extra_lora_path": ".*"|"additional_networks_extra_lora_path": "{webui_path}/models/Lora/"|\' {webui_path}/config.json')
75
- get_ipython().system('sed -i \'s|"ad_extra_models_dir": ".*"|"ad_extra_models_dir": "{webui_path}/models/adetailer/"|\' {webui_path}/config.json')
76
- get_ipython().system('sed -i \'s/"sd_checkpoint_hash": ".*"/"sd_checkpoint_hash": ""/g; s/"sd_model_checkpoint": ".*"/"sd_model_checkpoint": ""/g; s/"sd_vae": ".*"/"sd_vae": "None"/g\' {webui_path}/config.json')
77
- # for kaggle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  if env == 'Kaggle':
79
  get_ipython().system('sed -i \'s/"civitai_interface\\/NSFW content\\/value":.*/"civitai_interface\\/NSFW content\\/value": false/g\' {webui_path}/ui-config.json')
 
 
80
 
81
  with tunnel:
82
  get_ipython().run_line_magic('cd', '{webui_path}')
@@ -87,6 +111,10 @@ with tunnel:
87
  if env != "Google Colab":
88
  commandline_arguments += f' --encrypt-pass={tunnel_port} --api'
89
 
 
 
 
 
90
  get_ipython().system('COMMANDLINE_ARGS="{commandline_arguments}" python launch.py')
91
 
92
 
 
12
 
13
  # ================= DETECT ENV =================
14
  def detect_environment():
15
+ free_plan = (os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') / (1024 ** 3) <= 20)
16
  environments = {
17
  'COLAB_GPU': ('Google Colab', "/root" if free_plan else "/content"),
18
  'KAGGLE_URL_BASE': ('Kaggle', "/kaggle/working/content")
 
37
  ngrok_token = settings['ngrok_token']
38
  zrok_token = settings['zrok_token']
39
  commandline_arguments = settings['commandline_arguments']
40
+ change_webui = settings['change_webui']
41
 
42
 
43
+ # ======================== TUNNEL V2 ========================
44
  print('Please Wait...')
45
 
46
  def get_public_ip(version='ipv4'):
 
53
  except Exception as e:
54
  print(f"Error getting public {version} address:", e)
55
 
56
+ # Check if public IP is already saved, if not then get it
57
+ try:
58
+ with open(f"{root_path}/public_ip.txt", "r") as file:
59
+ public_ipv4 = file.read().strip()
60
+ except FileNotFoundError:
61
+ public_ipv4 = get_public_ip(version='ipv4')
62
+ with open(f"{root_path}/public_ip.txt", "w") as file:
63
+ file.write(public_ipv4)
64
 
65
  tunnel_class = pickle.load(open(f"{root_path}/new_tunnel", "rb"), encoding="utf-8")
66
+ tunnel_port = 1734
67
  tunnel = tunnel_class(tunnel_port)
68
  tunnel.add_tunnel(command="cl tunnel --url localhost:{port}", name="cl", pattern=re.compile(r"[\w-]+\.trycloudflare\.com"))
69
  tunnel.add_tunnel(command="lt --port {port}", name="lt", pattern=re.compile(r"[\w-]+\.loca\.lt"), note="Password : " + "\033[32m" + public_ipv4 + "\033[0m" + " rerun cell if 404 error.")
 
74
  tunnel.add_tunnel(command="zrok share public http://localhost:{port}/ --headless", name="zrok", pattern=re.compile(r"[\w-]+\.share\.zrok\.io"))
75
 
76
  clear_output()
 
77
 
78
 
79
+ # =============== Automatic Fixing Path V3 ===============
80
+ paths_to_check = [
81
+ ("tagger_hf_cache_dir", f"{webui_path}/models/interrogators/"),
82
+ ("additional_networks_extra_lora_path", f"{webui_path}/models/Lora/"),
83
+ ("ad_extra_models_dir", f"{webui_path}/models/adetailer/"),
84
+ ("sd_checkpoint_hash", ""),
85
+ ("sd_model_checkpoint", ""),
86
+ ("sd_vae", "None")
87
+ ]
88
+
89
+ config_path = f'{webui_path}/ui-config.json'
90
+
91
+ with open(config_path, 'r') as file:
92
+ config_data = json.load(file)
93
+
94
+ for key, value in paths_to_check:
95
+ if key in config_data and config_data[key] != value:
96
+ sed_command = f"sed -i 's|\"{key}\": \".*\"|\"{key}\": \"{value}\"|' {config_path}"
97
+ os.system(sed_command)
98
+
99
+ # Additional check for Kaggle
100
  if env == 'Kaggle':
101
  get_ipython().system('sed -i \'s/"civitai_interface\\/NSFW content\\/value":.*/"civitai_interface\\/NSFW content\\/value": false/g\' {webui_path}/ui-config.json')
102
+ # -------------------------------------------------------
103
+
104
 
105
  with tunnel:
106
  get_ipython().run_line_magic('cd', '{webui_path}')
 
111
  if env != "Google Colab":
112
  commandline_arguments += f' --encrypt-pass={tunnel_port} --api'
113
 
114
+ # -- FORGE --
115
+ if change_webui == 'Forge':
116
+ commandline_arguments += ' --cuda-stream --pin-shared-memory'
117
+
118
  get_ipython().system('COMMANDLINE_ARGS="{commandline_arguments}" python launch.py')
119
 
120