test1: saving files to subdirectories
Browse files- files_cells/notebooks/en/downloading_en.ipynb +49 -32
- files_cells/notebooks/en/widgets_en.ipynb +0 -0
- files_cells/notebooks/ru/downloading_ru.ipynb +48 -31
- files_cells/notebooks/ru/widgets_ru.ipynb +0 -0
- files_cells/python/en/downloading_en.py +49 -32
- files_cells/python/en/widgets_en.py +19 -12
- files_cells/python/ru/downloading_ru.py +48 -31
- files_cells/python/ru/widgets_ru.py +20 -13
files_cells/notebooks/en/downloading_en.ipynb
CHANGED
@@ -67,11 +67,11 @@
|
|
67 |
" # Dictionary of additional libraries specific to certain environments\n",
|
68 |
" additional_libs = {\n",
|
69 |
" \"Google Colab\": {\n",
|
70 |
-
" \"xformers\": \"pip install xformers==0.0.
|
71 |
" },\n",
|
72 |
" \"Kaggle\": {\n",
|
73 |
-
" \"xformers\": \"pip install -q xformers==0.0.
|
74 |
-
" \"torch\": \"pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121\"\n",
|
75 |
" }\n",
|
76 |
" }\n",
|
77 |
"\n",
|
@@ -104,7 +104,7 @@
|
|
104 |
"## dl special files\n",
|
105 |
"with capture.capture_output() as cap:\n",
|
106 |
" !mkdir -p {root_path}/file_cell/special\n",
|
107 |
-
" !wget https://huggingface.co/NagisaNao/
|
108 |
"del cap\n",
|
109 |
"\n",
|
110 |
"\n",
|
@@ -344,7 +344,8 @@
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
-
"
|
|
|
348 |
"\n",
|
349 |
"url = \"\"\n",
|
350 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
@@ -355,22 +356,33 @@
|
|
355 |
"def handle_manual(url):\n",
|
356 |
" original_url = url\n",
|
357 |
" url = url.split(':', 1)[1]\n",
|
|
|
358 |
" file_name = re.search(r'\\[(.*?)\\]', url)\n",
|
359 |
-
" file_name = file_name.group(1) if file_name else None\n",
|
|
|
|
|
|
|
360 |
" if file_name:\n",
|
361 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
|
|
|
|
362 |
"\n",
|
363 |
" for prefix, dir in prefixes.items():\n",
|
364 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
365 |
" if prefix != \"extension\":\n",
|
366 |
-
" manual_download(url, dir, file_name
|
367 |
" else:\n",
|
368 |
" extension_repo.append((url, file_name))\n",
|
369 |
"\n",
|
370 |
-
"
|
|
|
371 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
372 |
" header_option = f\"--header={user_header}\"\n",
|
373 |
"\n",
|
|
|
|
|
|
|
|
|
374 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
375 |
" # print(url, dst_dir, file_name)\n",
|
376 |
"\n",
|
@@ -388,7 +400,7 @@
|
|
388 |
" !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
|
389 |
" else:\n",
|
390 |
" !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
|
391 |
-
" # --
|
392 |
" elif 'huggingface' in url:\n",
|
393 |
" if '/blob/' in url:\n",
|
394 |
" url = url.replace('/blob/', '/resolve/')\n",
|
@@ -423,8 +435,7 @@
|
|
423 |
"\n",
|
424 |
"## unpucking zip files\n",
|
425 |
"def unpucking_zip_files():\n",
|
426 |
-
" directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
427 |
-
"\n",
|
428 |
" for directory in directories:\n",
|
429 |
" for root, dirs, files in os.walk(directory):\n",
|
430 |
" for file in files:\n",
|
@@ -439,33 +450,40 @@
|
|
439 |
"\n",
|
440 |
"submodels = []\n",
|
441 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
443 |
" if selection == \"none\":\n",
|
444 |
" return []\n",
|
445 |
-
"
|
446 |
-
"
|
447 |
-
" for models in model_dict.values():\n",
|
448 |
-
" all_models.extend(models)\n",
|
449 |
-
" selected_models = all_models\n",
|
450 |
" else:\n",
|
451 |
-
" selected_models = model_dict[
|
452 |
-
"
|
453 |
-
"\n",
|
454 |
-
" for num in selected_nums:\n",
|
455 |
-
" if 1 <= num <= len(model_dict):\n",
|
456 |
-
" name = list(model_dict)[num - 1]\n",
|
457 |
-
" selected_models.extend(model_dict[name])\n",
|
458 |
"\n",
|
459 |
" unique_models = list({model['name']: model for model in selected_models}.values())\n",
|
460 |
-
"\n",
|
461 |
" for model in unique_models:\n",
|
462 |
" model['dst_dir'] = dst_dir\n",
|
463 |
"\n",
|
464 |
" return unique_models\n",
|
465 |
"\n",
|
466 |
-
"submodels += add_submodels(Model, Model_Num, model_list, models_dir)
|
467 |
-
"submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)
|
468 |
-
"submodels += add_submodels(controlnet, \"\" if controlnet == \"ALL\" else controlnet_Num, controlnet_list, control_dir)
|
469 |
"\n",
|
470 |
"for submodel in submodels:\n",
|
471 |
" if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
|
@@ -491,12 +509,11 @@
|
|
491 |
" current_tag = None\n",
|
492 |
" for line in lines:\n",
|
493 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
494 |
-
" current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
|
495 |
"\n",
|
496 |
-
" urls = [url.split('#')[0].strip() for url in line.split(',')]
|
497 |
" for url in urls:\n",
|
498 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
499 |
-
" # handle_manual(f\"{current_tag}:{url}\")\n",
|
500 |
" files_urls += f\"{current_tag}:{url}, \"\n",
|
501 |
" unique_urls.append(url)\n",
|
502 |
"\n",
|
@@ -518,7 +535,7 @@
|
|
518 |
" pass\n",
|
519 |
"\n",
|
520 |
"# url prefixing\n",
|
521 |
-
"urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]\n",
|
522 |
"prefixed_urls = [f\"{prefix}:{url}\" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]\n",
|
523 |
"url += \", \".join(prefixed_urls) + \", \" + file_urls\n",
|
524 |
"\n",
|
@@ -555,7 +572,7 @@
|
|
555 |
"\n",
|
556 |
"## List Models and stuff V2\n",
|
557 |
"if detailed_download == \"off\":\n",
|
558 |
-
" print(\"\\n\\n\\033[
|
559 |
" get_ipython().run_line_magic('run', '{root_path}/file_cell/special/dl_display_results.py') # display widgets result\n",
|
560 |
"\n",
|
561 |
"else:\n",
|
|
|
67 |
" # Dictionary of additional libraries specific to certain environments\n",
|
68 |
" additional_libs = {\n",
|
69 |
" \"Google Colab\": {\n",
|
70 |
+
" \"xformers\": \"pip install xformers==0.0.26.post1 --no-deps\"\n",
|
71 |
" },\n",
|
72 |
" \"Kaggle\": {\n",
|
73 |
+
" \"xformers\": \"pip install -q xformers==0.0.26.post1\",\n",
|
74 |
+
" # \"torch\": \"pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121\"\n",
|
75 |
" }\n",
|
76 |
" }\n",
|
77 |
"\n",
|
|
|
104 |
"## dl special files\n",
|
105 |
"with capture.capture_output() as cap:\n",
|
106 |
" !mkdir -p {root_path}/file_cell/special\n",
|
107 |
+
" !wget https://huggingface.co/NagisaNao/fast_repo/resolve/main/special/dl_display_results.py -O {root_path}/file_cell/special/dl_display_results.py\n",
|
108 |
"del cap\n",
|
109 |
"\n",
|
110 |
"\n",
|
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
+
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
348 |
+
"!mkdir -p {\" \".join(directories)}\n",
|
349 |
"\n",
|
350 |
"url = \"\"\n",
|
351 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
|
|
356 |
"def handle_manual(url):\n",
|
357 |
" original_url = url\n",
|
358 |
" url = url.split(':', 1)[1]\n",
|
359 |
+
"\n",
|
360 |
" file_name = re.search(r'\\[(.*?)\\]', url)\n",
|
361 |
+
" file_name = file_name.group(1) if file_name else None # for [] in url - change file name\n",
|
362 |
+
" dir_path = re.search(r'\\((.*?)\\)', url)\n",
|
363 |
+
" dir_path = dir_path.group(1) if dir_path else None # for () in url - create subdir\n",
|
364 |
+
"\n",
|
365 |
" if file_name:\n",
|
366 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
367 |
+
" if dir_path:\n",
|
368 |
+
" url = re.sub(r'\\(.*?\\)', '', url)\n",
|
369 |
"\n",
|
370 |
" for prefix, dir in prefixes.items():\n",
|
371 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
372 |
" if prefix != \"extension\":\n",
|
373 |
+
" manual_download(url, dir, file_name, dir_path)\n",
|
374 |
" else:\n",
|
375 |
" extension_repo.append((url, file_name))\n",
|
376 |
"\n",
|
377 |
+
"\n",
|
378 |
+
"def manual_download(url, dst_dir, file_name, dir_path=None):\n",
|
379 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
380 |
" header_option = f\"--header={user_header}\"\n",
|
381 |
"\n",
|
382 |
+
" if dir_path:\n",
|
383 |
+
" dst_dir = f\"{dst_dir}/{dir_path}\"\n",
|
384 |
+
" !mkdir -p {dst_dir}\n",
|
385 |
+
"\n",
|
386 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
387 |
" # print(url, dst_dir, file_name)\n",
|
388 |
"\n",
|
|
|
400 |
" !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
|
401 |
" else:\n",
|
402 |
" !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
|
403 |
+
" # -- Hugging Face --\n",
|
404 |
" elif 'huggingface' in url:\n",
|
405 |
" if '/blob/' in url:\n",
|
406 |
" url = url.replace('/blob/', '/resolve/')\n",
|
|
|
435 |
"\n",
|
436 |
"## unpucking zip files\n",
|
437 |
"def unpucking_zip_files():\n",
|
438 |
+
" # directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
|
|
439 |
" for directory in directories:\n",
|
440 |
" for root, dirs, files in os.walk(directory):\n",
|
441 |
" for file in files:\n",
|
|
|
450 |
"\n",
|
451 |
"submodels = []\n",
|
452 |
"\n",
|
453 |
+
"def get_all_models(model_dict):\n",
|
454 |
+
" all_models = []\n",
|
455 |
+
" for models in model_dict.values():\n",
|
456 |
+
" all_models.extend(models)\n",
|
457 |
+
" return all_models\n",
|
458 |
+
"\n",
|
459 |
+
"def get_selected_models(model_dict, num_selection):\n",
|
460 |
+
" selected_models = []\n",
|
461 |
+
" selected_nums = map(int, num_selection.replace(',', '').split())\n",
|
462 |
+
" for num in selected_nums:\n",
|
463 |
+
" if 1 <= num <= len(model_dict):\n",
|
464 |
+
" name = list(model_dict)[num - 1]\n",
|
465 |
+
" selected_models.extend(model_dict[name])\n",
|
466 |
+
" return selected_models\n",
|
467 |
+
"\n",
|
468 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
469 |
" if selection == \"none\":\n",
|
470 |
" return []\n",
|
471 |
+
" elif selection == \"ALL\":\n",
|
472 |
+
" selected_models = get_all_models(model_dict)\n",
|
|
|
|
|
|
|
473 |
" else:\n",
|
474 |
+
" selected_models = model_dict.get(selection, [])\n",
|
475 |
+
" if num_selection:\n",
|
476 |
+
" selected_models.extend(get_selected_models(model_dict, num_selection))\n",
|
|
|
|
|
|
|
|
|
477 |
"\n",
|
478 |
" unique_models = list({model['name']: model for model in selected_models}.values())\n",
|
|
|
479 |
" for model in unique_models:\n",
|
480 |
" model['dst_dir'] = dst_dir\n",
|
481 |
"\n",
|
482 |
" return unique_models\n",
|
483 |
"\n",
|
484 |
+
"submodels += add_submodels(Model, Model_Num, model_list, models_dir)\n",
|
485 |
+
"submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)\n",
|
486 |
+
"submodels += add_submodels(controlnet, \"\" if controlnet == \"ALL\" else controlnet_Num, controlnet_list, control_dir)\n",
|
487 |
"\n",
|
488 |
"for submodel in submodels:\n",
|
489 |
" if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
|
|
|
509 |
" current_tag = None\n",
|
510 |
" for line in lines:\n",
|
511 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
512 |
+
" current_tag = next((tag for tag in prefixes if tag in line.lower()), None)\n",
|
513 |
"\n",
|
514 |
+
" urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
|
515 |
" for url in urls:\n",
|
516 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
|
|
517 |
" files_urls += f\"{current_tag}:{url}, \"\n",
|
518 |
" unique_urls.append(url)\n",
|
519 |
"\n",
|
|
|
535 |
" pass\n",
|
536 |
"\n",
|
537 |
"# url prefixing\n",
|
538 |
+
"urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url] # base url\n",
|
539 |
"prefixed_urls = [f\"{prefix}:{url}\" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]\n",
|
540 |
"url += \", \".join(prefixed_urls) + \", \" + file_urls\n",
|
541 |
"\n",
|
|
|
572 |
"\n",
|
573 |
"## List Models and stuff V2\n",
|
574 |
"if detailed_download == \"off\":\n",
|
575 |
+
" print(\"\\n\\n\\033[33mIf you don't see any downloaded files, enable the 'Detailed Downloads' feature in the widget.\")\n",
|
576 |
" get_ipython().run_line_magic('run', '{root_path}/file_cell/special/dl_display_results.py') # display widgets result\n",
|
577 |
"\n",
|
578 |
"else:\n",
|
files_cells/notebooks/en/widgets_en.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
files_cells/notebooks/ru/downloading_ru.ipynb
CHANGED
@@ -67,11 +67,11 @@
|
|
67 |
" # Dictionary of additional libraries specific to certain environments\n",
|
68 |
" additional_libs = {\n",
|
69 |
" \"Google Colab\": {\n",
|
70 |
-
" \"xformers\": \"pip install xformers==0.0.
|
71 |
" },\n",
|
72 |
" \"Kaggle\": {\n",
|
73 |
-
" \"xformers\": \"pip install -q xformers==0.0.
|
74 |
-
" \"torch\": \"pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121\"\n",
|
75 |
" }\n",
|
76 |
" }\n",
|
77 |
"\n",
|
@@ -104,7 +104,7 @@
|
|
104 |
"## dl special files\n",
|
105 |
"with capture.capture_output() as cap:\n",
|
106 |
" !mkdir -p {root_path}/file_cell/special\n",
|
107 |
-
" !wget https://huggingface.co/NagisaNao/
|
108 |
"del cap\n",
|
109 |
"\n",
|
110 |
"\n",
|
@@ -344,7 +344,8 @@
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
-
"
|
|
|
348 |
"\n",
|
349 |
"url = \"\"\n",
|
350 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
@@ -355,22 +356,33 @@
|
|
355 |
"def handle_manual(url):\n",
|
356 |
" original_url = url\n",
|
357 |
" url = url.split(':', 1)[1]\n",
|
|
|
358 |
" file_name = re.search(r'\\[(.*?)\\]', url)\n",
|
359 |
-
" file_name = file_name.group(1) if file_name else None\n",
|
|
|
|
|
|
|
360 |
" if file_name:\n",
|
361 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
|
|
|
|
362 |
"\n",
|
363 |
" for prefix, dir in prefixes.items():\n",
|
364 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
365 |
" if prefix != \"extension\":\n",
|
366 |
-
" manual_download(url, dir, file_name
|
367 |
" else:\n",
|
368 |
" extension_repo.append((url, file_name))\n",
|
369 |
"\n",
|
370 |
-
"
|
|
|
371 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
372 |
" header_option = f\"--header={user_header}\"\n",
|
373 |
"\n",
|
|
|
|
|
|
|
|
|
374 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
375 |
" # print(url, dst_dir, file_name)\n",
|
376 |
"\n",
|
@@ -388,7 +400,7 @@
|
|
388 |
" !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
|
389 |
" else:\n",
|
390 |
" !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
|
391 |
-
" # --
|
392 |
" elif 'huggingface' in url:\n",
|
393 |
" if '/blob/' in url:\n",
|
394 |
" url = url.replace('/blob/', '/resolve/')\n",
|
@@ -423,8 +435,7 @@
|
|
423 |
"\n",
|
424 |
"## unpucking zip files\n",
|
425 |
"def unpucking_zip_files():\n",
|
426 |
-
" directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
427 |
-
"\n",
|
428 |
" for directory in directories:\n",
|
429 |
" for root, dirs, files in os.walk(directory):\n",
|
430 |
" for file in files:\n",
|
@@ -439,33 +450,40 @@
|
|
439 |
"\n",
|
440 |
"submodels = []\n",
|
441 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
442 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
443 |
" if selection == \"none\":\n",
|
444 |
" return []\n",
|
445 |
-
"
|
446 |
-
"
|
447 |
-
" for models in model_dict.values():\n",
|
448 |
-
" all_models.extend(models)\n",
|
449 |
-
" selected_models = all_models\n",
|
450 |
" else:\n",
|
451 |
-
" selected_models = model_dict[
|
452 |
-
"
|
453 |
-
"\n",
|
454 |
-
" for num in selected_nums:\n",
|
455 |
-
" if 1 <= num <= len(model_dict):\n",
|
456 |
-
" name = list(model_dict)[num - 1]\n",
|
457 |
-
" selected_models.extend(model_dict[name])\n",
|
458 |
"\n",
|
459 |
" unique_models = list({model['name']: model for model in selected_models}.values())\n",
|
460 |
-
"\n",
|
461 |
" for model in unique_models:\n",
|
462 |
" model['dst_dir'] = dst_dir\n",
|
463 |
"\n",
|
464 |
" return unique_models\n",
|
465 |
"\n",
|
466 |
-
"submodels += add_submodels(Model, Model_Num, model_list, models_dir)
|
467 |
-
"submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)
|
468 |
-
"submodels += add_submodels(controlnet, \"\" if controlnet == \"ALL\" else controlnet_Num, controlnet_list, control_dir)
|
469 |
"\n",
|
470 |
"for submodel in submodels:\n",
|
471 |
" if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
|
@@ -491,12 +509,11 @@
|
|
491 |
" current_tag = None\n",
|
492 |
" for line in lines:\n",
|
493 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
494 |
-
" current_tag = next((tag for tag in prefixes if tag in line.lower()))\n",
|
495 |
"\n",
|
496 |
-
" urls = [url.split('#')[0].strip() for url in line.split(',')]
|
497 |
" for url in urls:\n",
|
498 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
499 |
-
" # handle_manual(f\"{current_tag}:{url}\")\n",
|
500 |
" files_urls += f\"{current_tag}:{url}, \"\n",
|
501 |
" unique_urls.append(url)\n",
|
502 |
"\n",
|
@@ -518,7 +535,7 @@
|
|
518 |
" pass\n",
|
519 |
"\n",
|
520 |
"# url prefixing\n",
|
521 |
-
"urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]\n",
|
522 |
"prefixed_urls = [f\"{prefix}:{url}\" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]\n",
|
523 |
"url += \", \".join(prefixed_urls) + \", \" + file_urls\n",
|
524 |
"\n",
|
|
|
67 |
" # Dictionary of additional libraries specific to certain environments\n",
|
68 |
" additional_libs = {\n",
|
69 |
" \"Google Colab\": {\n",
|
70 |
+
" \"xformers\": \"pip install xformers==0.0.26.post1 --no-deps\"\n",
|
71 |
" },\n",
|
72 |
" \"Kaggle\": {\n",
|
73 |
+
" \"xformers\": \"pip install -q xformers==0.0.26.post1\",\n",
|
74 |
+
" # \"torch\": \"pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121\"\n",
|
75 |
" }\n",
|
76 |
" }\n",
|
77 |
"\n",
|
|
|
104 |
"## dl special files\n",
|
105 |
"with capture.capture_output() as cap:\n",
|
106 |
" !mkdir -p {root_path}/file_cell/special\n",
|
107 |
+
" !wget https://huggingface.co/NagisaNao/fast_repo/resolve/main/special/dl_display_results.py -O {root_path}/file_cell/special/dl_display_results.py\n",
|
108 |
"del cap\n",
|
109 |
"\n",
|
110 |
"\n",
|
|
|
344 |
" \"adetailer\": adetailer_dir\n",
|
345 |
"}\n",
|
346 |
"\n",
|
347 |
+
"directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
|
348 |
+
"!mkdir -p {\" \".join(directories)}\n",
|
349 |
"\n",
|
350 |
"url = \"\"\n",
|
351 |
"hf_token = optional_huggingface_token if optional_huggingface_token else \"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\"\n",
|
|
|
356 |
"def handle_manual(url):\n",
|
357 |
" original_url = url\n",
|
358 |
" url = url.split(':', 1)[1]\n",
|
359 |
+
"\n",
|
360 |
" file_name = re.search(r'\\[(.*?)\\]', url)\n",
|
361 |
+
" file_name = file_name.group(1) if file_name else None # for [] in url - change file name\n",
|
362 |
+
" dir_path = re.search(r'\\((.*?)\\)', url)\n",
|
363 |
+
" dir_path = dir_path.group(1) if dir_path else None # for () in url - create subdir\n",
|
364 |
+
"\n",
|
365 |
" if file_name:\n",
|
366 |
" url = re.sub(r'\\[.*?\\]', '', url)\n",
|
367 |
+
" if dir_path:\n",
|
368 |
+
" url = re.sub(r'\\(.*?\\)', '', url)\n",
|
369 |
"\n",
|
370 |
" for prefix, dir in prefixes.items():\n",
|
371 |
" if original_url.startswith(f\"{prefix}:\"):\n",
|
372 |
" if prefix != \"extension\":\n",
|
373 |
+
" manual_download(url, dir, file_name, dir_path)\n",
|
374 |
" else:\n",
|
375 |
" extension_repo.append((url, file_name))\n",
|
376 |
"\n",
|
377 |
+
"\n",
|
378 |
+
"def manual_download(url, dst_dir, file_name, dir_path=None):\n",
|
379 |
" basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
|
380 |
" header_option = f\"--header={user_header}\"\n",
|
381 |
"\n",
|
382 |
+
" if dir_path:\n",
|
383 |
+
" dst_dir = f\"{dst_dir}/{dir_path}\"\n",
|
384 |
+
" !mkdir -p {dst_dir}\n",
|
385 |
+
"\n",
|
386 |
" print(\"\\033[32m---\"*45 + f\"\\n\\033[33mURL: \\033[34m{url}\\n\\033[33mSAVE DIR: \\033[34m{dst_dir}\\n\\033[33mFILE NAME: \\033[34m{file_name}\\033[32m\\n~~~\\033[0m\")\n",
|
387 |
" # print(url, dst_dir, file_name)\n",
|
388 |
"\n",
|
|
|
400 |
" !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
|
401 |
" else:\n",
|
402 |
" !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
|
403 |
+
" # -- Hugging Face --\n",
|
404 |
" elif 'huggingface' in url:\n",
|
405 |
" if '/blob/' in url:\n",
|
406 |
" url = url.replace('/blob/', '/resolve/')\n",
|
|
|
435 |
"\n",
|
436 |
"## unpucking zip files\n",
|
437 |
"def unpucking_zip_files():\n",
|
438 |
+
" # directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]\n",
|
|
|
439 |
" for directory in directories:\n",
|
440 |
" for root, dirs, files in os.walk(directory):\n",
|
441 |
" for file in files:\n",
|
|
|
450 |
"\n",
|
451 |
"submodels = []\n",
|
452 |
"\n",
|
453 |
+
"def get_all_models(model_dict):\n",
|
454 |
+
" all_models = []\n",
|
455 |
+
" for models in model_dict.values():\n",
|
456 |
+
" all_models.extend(models)\n",
|
457 |
+
" return all_models\n",
|
458 |
+
"\n",
|
459 |
+
"def get_selected_models(model_dict, num_selection):\n",
|
460 |
+
" selected_models = []\n",
|
461 |
+
" selected_nums = map(int, num_selection.replace(',', '').split())\n",
|
462 |
+
" for num in selected_nums:\n",
|
463 |
+
" if 1 <= num <= len(model_dict):\n",
|
464 |
+
" name = list(model_dict)[num - 1]\n",
|
465 |
+
" selected_models.extend(model_dict[name])\n",
|
466 |
+
" return selected_models\n",
|
467 |
+
"\n",
|
468 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
469 |
" if selection == \"none\":\n",
|
470 |
" return []\n",
|
471 |
+
" elif selection == \"ALL\":\n",
|
472 |
+
" selected_models = get_all_models(model_dict)\n",
|
|
|
|
|
|
|
473 |
" else:\n",
|
474 |
+
" selected_models = model_dict.get(selection, [])\n",
|
475 |
+
" if num_selection:\n",
|
476 |
+
" selected_models.extend(get_selected_models(model_dict, num_selection))\n",
|
|
|
|
|
|
|
|
|
477 |
"\n",
|
478 |
" unique_models = list({model['name']: model for model in selected_models}.values())\n",
|
|
|
479 |
" for model in unique_models:\n",
|
480 |
" model['dst_dir'] = dst_dir\n",
|
481 |
"\n",
|
482 |
" return unique_models\n",
|
483 |
"\n",
|
484 |
+
"submodels += add_submodels(Model, Model_Num, model_list, models_dir)\n",
|
485 |
+
"submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)\n",
|
486 |
+
"submodels += add_submodels(controlnet, \"\" if controlnet == \"ALL\" else controlnet_Num, controlnet_list, control_dir)\n",
|
487 |
"\n",
|
488 |
"for submodel in submodels:\n",
|
489 |
" if not Inpainting_Model and \"inpainting\" in submodel['name']:\n",
|
|
|
509 |
" current_tag = None\n",
|
510 |
" for line in lines:\n",
|
511 |
" if any(f'# {tag}' in line.lower() for tag in prefixes):\n",
|
512 |
+
" current_tag = next((tag for tag in prefixes if tag in line.lower()), None)\n",
|
513 |
"\n",
|
514 |
+
" urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls\n",
|
515 |
" for url in urls:\n",
|
516 |
" if url.startswith(\"http\") and url not in unique_urls:\n",
|
|
|
517 |
" files_urls += f\"{current_tag}:{url}, \"\n",
|
518 |
" unique_urls.append(url)\n",
|
519 |
"\n",
|
|
|
535 |
" pass\n",
|
536 |
"\n",
|
537 |
"# url prefixing\n",
|
538 |
+
"urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url] # base url\n",
|
539 |
"prefixed_urls = [f\"{prefix}:{url}\" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]\n",
|
540 |
"url += \", \".join(prefixed_urls) + \", \" + file_urls\n",
|
541 |
"\n",
|
files_cells/notebooks/ru/widgets_ru.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
files_cells/python/en/downloading_en.py
CHANGED
@@ -48,11 +48,11 @@ if not os.path.exists(flag_file):
|
|
48 |
# Dictionary of additional libraries specific to certain environments
|
49 |
additional_libs = {
|
50 |
"Google Colab": {
|
51 |
-
"xformers": "pip install xformers==0.0.
|
52 |
},
|
53 |
"Kaggle": {
|
54 |
-
"xformers": "pip install -q xformers==0.0.
|
55 |
-
"torch": "pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121"
|
56 |
}
|
57 |
}
|
58 |
|
@@ -85,7 +85,7 @@ if not os.path.exists(flag_file):
|
|
85 |
## dl special files
|
86 |
with capture.capture_output() as cap:
|
87 |
get_ipython().system('mkdir -p {root_path}/file_cell/special')
|
88 |
-
get_ipython().system('wget https://huggingface.co/NagisaNao/
|
89 |
del cap
|
90 |
|
91 |
|
@@ -325,7 +325,8 @@ prefixes = {
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
-
|
|
|
329 |
|
330 |
url = ""
|
331 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
@@ -336,22 +337,33 @@ user_header = f"\"Authorization: Bearer {hf_token}\""
|
|
336 |
def handle_manual(url):
|
337 |
original_url = url
|
338 |
url = url.split(':', 1)[1]
|
|
|
339 |
file_name = re.search(r'\[(.*?)\]', url)
|
340 |
-
file_name = file_name.group(1) if file_name else None
|
|
|
|
|
|
|
341 |
if file_name:
|
342 |
url = re.sub(r'\[.*?\]', '', url)
|
|
|
|
|
343 |
|
344 |
for prefix, dir in prefixes.items():
|
345 |
if original_url.startswith(f"{prefix}:"):
|
346 |
if prefix != "extension":
|
347 |
-
manual_download(url, dir, file_name
|
348 |
else:
|
349 |
extension_repo.append((url, file_name))
|
350 |
|
351 |
-
|
|
|
352 |
basename = url.split("/")[-1] if file_name is None else file_name
|
353 |
header_option = f"--header={user_header}"
|
354 |
|
|
|
|
|
|
|
|
|
355 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
356 |
# print(url, dst_dir, file_name)
|
357 |
|
@@ -369,7 +381,7 @@ def manual_download(url, dst_dir, file_name):
|
|
369 |
get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
|
370 |
else:
|
371 |
get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
|
372 |
-
# --
|
373 |
elif 'huggingface' in url:
|
374 |
if '/blob/' in url:
|
375 |
url = url.replace('/blob/', '/resolve/')
|
@@ -404,8 +416,7 @@ def download(url):
|
|
404 |
|
405 |
## unpucking zip files
|
406 |
def unpucking_zip_files():
|
407 |
-
directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
408 |
-
|
409 |
for directory in directories:
|
410 |
for root, dirs, files in os.walk(directory):
|
411 |
for file in files:
|
@@ -420,33 +431,40 @@ def unpucking_zip_files():
|
|
420 |
|
421 |
submodels = []
|
422 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
423 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
424 |
if selection == "none":
|
425 |
return []
|
426 |
-
|
427 |
-
|
428 |
-
for models in model_dict.values():
|
429 |
-
all_models.extend(models)
|
430 |
-
selected_models = all_models
|
431 |
else:
|
432 |
-
selected_models = model_dict[
|
433 |
-
|
434 |
-
|
435 |
-
for num in selected_nums:
|
436 |
-
if 1 <= num <= len(model_dict):
|
437 |
-
name = list(model_dict)[num - 1]
|
438 |
-
selected_models.extend(model_dict[name])
|
439 |
|
440 |
unique_models = list({model['name']: model for model in selected_models}.values())
|
441 |
-
|
442 |
for model in unique_models:
|
443 |
model['dst_dir'] = dst_dir
|
444 |
|
445 |
return unique_models
|
446 |
|
447 |
-
submodels += add_submodels(Model, Model_Num, model_list, models_dir)
|
448 |
-
submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)
|
449 |
-
submodels += add_submodels(controlnet, "" if controlnet == "ALL" else controlnet_Num, controlnet_list, control_dir)
|
450 |
|
451 |
for submodel in submodels:
|
452 |
if not Inpainting_Model and "inpainting" in submodel['name']:
|
@@ -472,12 +490,11 @@ def process_file_download(file_url):
|
|
472 |
current_tag = None
|
473 |
for line in lines:
|
474 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
475 |
-
current_tag = next((tag for tag in prefixes if tag in line.lower()))
|
476 |
|
477 |
-
urls = [url.split('#')[0].strip() for url in line.split(',')]
|
478 |
for url in urls:
|
479 |
if url.startswith("http") and url not in unique_urls:
|
480 |
-
# handle_manual(f"{current_tag}:{url}")
|
481 |
files_urls += f"{current_tag}:{url}, "
|
482 |
unique_urls.append(url)
|
483 |
|
@@ -499,7 +516,7 @@ if custom_file_urls:
|
|
499 |
pass
|
500 |
|
501 |
# url prefixing
|
502 |
-
urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]
|
503 |
prefixed_urls = [f"{prefix}:{url}" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]
|
504 |
url += ", ".join(prefixed_urls) + ", " + file_urls
|
505 |
|
@@ -533,7 +550,7 @@ if len(extension_repo) > 0:
|
|
533 |
|
534 |
## List Models and stuff V2
|
535 |
if detailed_download == "off":
|
536 |
-
print("\n\n\033[
|
537 |
get_ipython().run_line_magic('run', '{root_path}/file_cell/special/dl_display_results.py') # display widgets result
|
538 |
|
539 |
else:
|
|
|
48 |
# Dictionary of additional libraries specific to certain environments
|
49 |
additional_libs = {
|
50 |
"Google Colab": {
|
51 |
+
"xformers": "pip install xformers==0.0.26.post1 --no-deps"
|
52 |
},
|
53 |
"Kaggle": {
|
54 |
+
"xformers": "pip install -q xformers==0.0.26.post1",
|
55 |
+
# "torch": "pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121"
|
56 |
}
|
57 |
}
|
58 |
|
|
|
85 |
## dl special files
|
86 |
with capture.capture_output() as cap:
|
87 |
get_ipython().system('mkdir -p {root_path}/file_cell/special')
|
88 |
+
get_ipython().system('wget https://huggingface.co/NagisaNao/fast_repo/resolve/main/special/dl_display_results.py -O {root_path}/file_cell/special/dl_display_results.py')
|
89 |
del cap
|
90 |
|
91 |
|
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
+
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
329 |
+
get_ipython().system('mkdir -p {" ".join(directories)}')
|
330 |
|
331 |
url = ""
|
332 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
|
|
337 |
def handle_manual(url):
|
338 |
original_url = url
|
339 |
url = url.split(':', 1)[1]
|
340 |
+
|
341 |
file_name = re.search(r'\[(.*?)\]', url)
|
342 |
+
file_name = file_name.group(1) if file_name else None # for [] in url - change file name
|
343 |
+
dir_path = re.search(r'\((.*?)\)', url)
|
344 |
+
dir_path = dir_path.group(1) if dir_path else None # for () in url - create subdir
|
345 |
+
|
346 |
if file_name:
|
347 |
url = re.sub(r'\[.*?\]', '', url)
|
348 |
+
if dir_path:
|
349 |
+
url = re.sub(r'\(.*?\)', '', url)
|
350 |
|
351 |
for prefix, dir in prefixes.items():
|
352 |
if original_url.startswith(f"{prefix}:"):
|
353 |
if prefix != "extension":
|
354 |
+
manual_download(url, dir, file_name, dir_path)
|
355 |
else:
|
356 |
extension_repo.append((url, file_name))
|
357 |
|
358 |
+
|
359 |
+
def manual_download(url, dst_dir, file_name, dir_path=None):
|
360 |
basename = url.split("/")[-1] if file_name is None else file_name
|
361 |
header_option = f"--header={user_header}"
|
362 |
|
363 |
+
if dir_path:
|
364 |
+
dst_dir = f"{dst_dir}/{dir_path}"
|
365 |
+
get_ipython().system('mkdir -p {dst_dir}')
|
366 |
+
|
367 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
368 |
# print(url, dst_dir, file_name)
|
369 |
|
|
|
381 |
get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
|
382 |
else:
|
383 |
get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
|
384 |
+
# -- Hugging Face --
|
385 |
elif 'huggingface' in url:
|
386 |
if '/blob/' in url:
|
387 |
url = url.replace('/blob/', '/resolve/')
|
|
|
416 |
|
417 |
## unpucking zip files
|
418 |
def unpucking_zip_files():
|
419 |
+
# directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
|
|
420 |
for directory in directories:
|
421 |
for root, dirs, files in os.walk(directory):
|
422 |
for file in files:
|
|
|
431 |
|
432 |
submodels = []
|
433 |
|
434 |
+
def get_all_models(model_dict):
|
435 |
+
all_models = []
|
436 |
+
for models in model_dict.values():
|
437 |
+
all_models.extend(models)
|
438 |
+
return all_models
|
439 |
+
|
440 |
+
def get_selected_models(model_dict, num_selection):
|
441 |
+
selected_models = []
|
442 |
+
selected_nums = map(int, num_selection.replace(',', '').split())
|
443 |
+
for num in selected_nums:
|
444 |
+
if 1 <= num <= len(model_dict):
|
445 |
+
name = list(model_dict)[num - 1]
|
446 |
+
selected_models.extend(model_dict[name])
|
447 |
+
return selected_models
|
448 |
+
|
449 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
450 |
if selection == "none":
|
451 |
return []
|
452 |
+
elif selection == "ALL":
|
453 |
+
selected_models = get_all_models(model_dict)
|
|
|
|
|
|
|
454 |
else:
|
455 |
+
selected_models = model_dict.get(selection, [])
|
456 |
+
if num_selection:
|
457 |
+
selected_models.extend(get_selected_models(model_dict, num_selection))
|
|
|
|
|
|
|
|
|
458 |
|
459 |
unique_models = list({model['name']: model for model in selected_models}.values())
|
|
|
460 |
for model in unique_models:
|
461 |
model['dst_dir'] = dst_dir
|
462 |
|
463 |
return unique_models
|
464 |
|
465 |
+
submodels += add_submodels(Model, Model_Num, model_list, models_dir)
|
466 |
+
submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)
|
467 |
+
submodels += add_submodels(controlnet, "" if controlnet == "ALL" else controlnet_Num, controlnet_list, control_dir)
|
468 |
|
469 |
for submodel in submodels:
|
470 |
if not Inpainting_Model and "inpainting" in submodel['name']:
|
|
|
490 |
current_tag = None
|
491 |
for line in lines:
|
492 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
493 |
+
current_tag = next((tag for tag in prefixes if tag in line.lower()), None)
|
494 |
|
495 |
+
urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
|
496 |
for url in urls:
|
497 |
if url.startswith("http") and url not in unique_urls:
|
|
|
498 |
files_urls += f"{current_tag}:{url}, "
|
499 |
unique_urls.append(url)
|
500 |
|
|
|
516 |
pass
|
517 |
|
518 |
# url prefixing
|
519 |
+
urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url] # base url
|
520 |
prefixed_urls = [f"{prefix}:{url}" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]
|
521 |
url += ", ".join(prefixed_urls) + ", " + file_urls
|
522 |
|
|
|
550 |
|
551 |
## List Models and stuff V2
|
552 |
if detailed_download == "off":
|
553 |
+
print("\n\n\033[33mIf you don't see any downloaded files, enable the 'Detailed Downloads' feature in the widget.")
|
554 |
get_ipython().run_line_magic('run', '{root_path}/file_cell/special/dl_display_results.py') # display widgets result
|
555 |
|
556 |
else:
|
files_cells/python/en/widgets_en.py
CHANGED
@@ -477,7 +477,7 @@ Vae_widget = widgets.Dropdown(options=vae_options, value='3.Blessed2.vae', descr
|
|
477 |
Vae_Num_widget = widgets.Text(description='Vae Number:', placeholder='Enter the vae numbers to be downloaded using comma/space.', style=style, layout=layout)
|
478 |
|
479 |
''' Display Vae'''
|
480 |
-
all_vae_box= widgets.VBox([vae_header, Vae_widget, Vae_Num_widget]).add_class("container").add_class("image_2")
|
481 |
display(all_vae_box)
|
482 |
|
483 |
# --- ADDITIONAL ---
|
@@ -519,26 +519,33 @@ display(all_additional_box)
|
|
519 |
# --- CUSTOM DOWNLOAD ---
|
520 |
custom_download_header_popup = widgets.HTML('''
|
521 |
<style>
|
522 |
-
/*
|
523 |
-
.
|
524 |
-
.
|
525 |
-
.
|
526 |
-
.
|
|
|
|
|
|
|
527 |
</style>
|
528 |
|
529 |
<div class="header" style="cursor: pointer;" onclick="toggleContainer()">Custom Download</div>
|
530 |
<!-- PopUp Window -->
|
531 |
<div class="info">INFO</div>
|
532 |
<div class="popup">
|
533 |
-
Separate multiple URLs with a comma/space. For a <span class="
|
534 |
after the URL without spaces.
|
535 |
-
<
|
|
|
|
|
|
|
|
|
536 |
<div class="sample">
|
537 |
-
<span class="
|
538 |
-
https://civitai.com/api/download/models/229782<span class="
|
539 |
<br>
|
540 |
-
<span class="
|
541 |
-
https://github.com/hako-mikan/sd-webui-
|
542 |
</div>
|
543 |
</div>
|
544 |
''')
|
|
|
477 |
Vae_Num_widget = widgets.Text(description='Vae Number:', placeholder='Enter the vae numbers to be downloaded using comma/space.', style=style, layout=layout)
|
478 |
|
479 |
''' Display Vae'''
|
480 |
+
all_vae_box = widgets.VBox([vae_header, Vae_widget, Vae_Num_widget]).add_class("container").add_class("image_2")
|
481 |
display(all_vae_box)
|
482 |
|
483 |
# --- ADDITIONAL ---
|
|
|
519 |
# --- CUSTOM DOWNLOAD ---
|
520 |
custom_download_header_popup = widgets.HTML('''
|
521 |
<style>
|
522 |
+
/* Color */
|
523 |
+
.sl {color: #DBAFFF;} /* sample label */
|
524 |
+
.br {color: #FFFF00;} /* braces */
|
525 |
+
.br2 {color: #C596F0;} /* braces2 */
|
526 |
+
.fn {color: #FFFFD8;} /* file name */
|
527 |
+
.ex {color: #EB934B;} /* extension */
|
528 |
+
.sd {color: #FF8CD7;} /* subdirs */
|
529 |
+
.wr {color: #FF9999;} /* warning */
|
530 |
</style>
|
531 |
|
532 |
<div class="header" style="cursor: pointer;" onclick="toggleContainer()">Custom Download</div>
|
533 |
<!-- PopUp Window -->
|
534 |
<div class="info">INFO</div>
|
535 |
<div class="popup">
|
536 |
+
Separate multiple URLs with a comma/space. For a <span class="fn">custom name</span> file/extension, specify it with <span class="br">[]</span>
|
537 |
after the URL without spaces.
|
538 |
+
<br>
|
539 |
+
<span>You can also specify the <span class="sd">subdirectory</span> to which the file will be saved, specify it with <span class="br2">()</span> after the URL without spaces.</span>\
|
540 |
+
<br>
|
541 |
+
<span class="wr">For files, be sure to specify</span> - <span class="ex">Filename Extension.</span>
|
542 |
+
|
543 |
<div class="sample">
|
544 |
+
<span class="sl">Example for File:</span>
|
545 |
+
https://civitai.com/api/download/models/229782<span class="br">[</span><span class="fn">Detailer</span><span class="ex">.safetensors</span><span class="br">]</span><span class="br2">(</span><span class="sd">detailers</span><span class="br2">)</span>
|
546 |
<br>
|
547 |
+
<span class="sl">Example for Extension:</span>
|
548 |
+
https://github.com/hako-mikan/sd-webui-cd-tuner<span class="br">[</span><span class="fn">CD-Tuner</span><span class="br">]</span>
|
549 |
</div>
|
550 |
</div>
|
551 |
''')
|
files_cells/python/ru/downloading_ru.py
CHANGED
@@ -48,11 +48,11 @@ if not os.path.exists(flag_file):
|
|
48 |
# Dictionary of additional libraries specific to certain environments
|
49 |
additional_libs = {
|
50 |
"Google Colab": {
|
51 |
-
"xformers": "pip install xformers==0.0.
|
52 |
},
|
53 |
"Kaggle": {
|
54 |
-
"xformers": "pip install -q xformers==0.0.
|
55 |
-
"torch": "pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121"
|
56 |
}
|
57 |
}
|
58 |
|
@@ -85,7 +85,7 @@ if not os.path.exists(flag_file):
|
|
85 |
## dl special files
|
86 |
with capture.capture_output() as cap:
|
87 |
get_ipython().system('mkdir -p {root_path}/file_cell/special')
|
88 |
-
get_ipython().system('wget https://huggingface.co/NagisaNao/
|
89 |
del cap
|
90 |
|
91 |
|
@@ -325,7 +325,8 @@ prefixes = {
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
-
|
|
|
329 |
|
330 |
url = ""
|
331 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
@@ -336,22 +337,33 @@ user_header = f"\"Authorization: Bearer {hf_token}\""
|
|
336 |
def handle_manual(url):
|
337 |
original_url = url
|
338 |
url = url.split(':', 1)[1]
|
|
|
339 |
file_name = re.search(r'\[(.*?)\]', url)
|
340 |
-
file_name = file_name.group(1) if file_name else None
|
|
|
|
|
|
|
341 |
if file_name:
|
342 |
url = re.sub(r'\[.*?\]', '', url)
|
|
|
|
|
343 |
|
344 |
for prefix, dir in prefixes.items():
|
345 |
if original_url.startswith(f"{prefix}:"):
|
346 |
if prefix != "extension":
|
347 |
-
manual_download(url, dir, file_name
|
348 |
else:
|
349 |
extension_repo.append((url, file_name))
|
350 |
|
351 |
-
|
|
|
352 |
basename = url.split("/")[-1] if file_name is None else file_name
|
353 |
header_option = f"--header={user_header}"
|
354 |
|
|
|
|
|
|
|
|
|
355 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
356 |
# print(url, dst_dir, file_name)
|
357 |
|
@@ -369,7 +381,7 @@ def manual_download(url, dst_dir, file_name):
|
|
369 |
get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
|
370 |
else:
|
371 |
get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
|
372 |
-
# --
|
373 |
elif 'huggingface' in url:
|
374 |
if '/blob/' in url:
|
375 |
url = url.replace('/blob/', '/resolve/')
|
@@ -404,8 +416,7 @@ def download(url):
|
|
404 |
|
405 |
## unpucking zip files
|
406 |
def unpucking_zip_files():
|
407 |
-
directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
408 |
-
|
409 |
for directory in directories:
|
410 |
for root, dirs, files in os.walk(directory):
|
411 |
for file in files:
|
@@ -420,33 +431,40 @@ def unpucking_zip_files():
|
|
420 |
|
421 |
submodels = []
|
422 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
423 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
424 |
if selection == "none":
|
425 |
return []
|
426 |
-
|
427 |
-
|
428 |
-
for models in model_dict.values():
|
429 |
-
all_models.extend(models)
|
430 |
-
selected_models = all_models
|
431 |
else:
|
432 |
-
selected_models = model_dict[
|
433 |
-
|
434 |
-
|
435 |
-
for num in selected_nums:
|
436 |
-
if 1 <= num <= len(model_dict):
|
437 |
-
name = list(model_dict)[num - 1]
|
438 |
-
selected_models.extend(model_dict[name])
|
439 |
|
440 |
unique_models = list({model['name']: model for model in selected_models}.values())
|
441 |
-
|
442 |
for model in unique_models:
|
443 |
model['dst_dir'] = dst_dir
|
444 |
|
445 |
return unique_models
|
446 |
|
447 |
-
submodels += add_submodels(Model, Model_Num, model_list, models_dir)
|
448 |
-
submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)
|
449 |
-
submodels += add_submodels(controlnet, "" if controlnet == "ALL" else controlnet_Num, controlnet_list, control_dir)
|
450 |
|
451 |
for submodel in submodels:
|
452 |
if not Inpainting_Model and "inpainting" in submodel['name']:
|
@@ -472,12 +490,11 @@ def process_file_download(file_url):
|
|
472 |
current_tag = None
|
473 |
for line in lines:
|
474 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
475 |
-
current_tag = next((tag for tag in prefixes if tag in line.lower()))
|
476 |
|
477 |
-
urls = [url.split('#')[0].strip() for url in line.split(',')]
|
478 |
for url in urls:
|
479 |
if url.startswith("http") and url not in unique_urls:
|
480 |
-
# handle_manual(f"{current_tag}:{url}")
|
481 |
files_urls += f"{current_tag}:{url}, "
|
482 |
unique_urls.append(url)
|
483 |
|
@@ -499,7 +516,7 @@ if custom_file_urls:
|
|
499 |
pass
|
500 |
|
501 |
# url prefixing
|
502 |
-
urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url]
|
503 |
prefixed_urls = [f"{prefix}:{url}" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]
|
504 |
url += ", ".join(prefixed_urls) + ", " + file_urls
|
505 |
|
|
|
48 |
# Dictionary of additional libraries specific to certain environments
|
49 |
additional_libs = {
|
50 |
"Google Colab": {
|
51 |
+
"xformers": "pip install xformers==0.0.26.post1 --no-deps"
|
52 |
},
|
53 |
"Kaggle": {
|
54 |
+
"xformers": "pip install -q xformers==0.0.26.post1",
|
55 |
+
# "torch": "pip install -q torch==2.1.2+cu121 torchvision==0.16.2+cu121 torchaudio==2.1.2 --extra-index-url https://download.pytorch.org/whl/cu121"
|
56 |
}
|
57 |
}
|
58 |
|
|
|
85 |
## dl special files
|
86 |
with capture.capture_output() as cap:
|
87 |
get_ipython().system('mkdir -p {root_path}/file_cell/special')
|
88 |
+
get_ipython().system('wget https://huggingface.co/NagisaNao/fast_repo/resolve/main/special/dl_display_results.py -O {root_path}/file_cell/special/dl_display_results.py')
|
89 |
del cap
|
90 |
|
91 |
|
|
|
325 |
"adetailer": adetailer_dir
|
326 |
}
|
327 |
|
328 |
+
directories = [value for key, value in prefixes.items()] # for unpucking zip files
|
329 |
+
get_ipython().system('mkdir -p {" ".join(directories)}')
|
330 |
|
331 |
url = ""
|
332 |
hf_token = optional_huggingface_token if optional_huggingface_token else "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"
|
|
|
337 |
def handle_manual(url):
|
338 |
original_url = url
|
339 |
url = url.split(':', 1)[1]
|
340 |
+
|
341 |
file_name = re.search(r'\[(.*?)\]', url)
|
342 |
+
file_name = file_name.group(1) if file_name else None # for [] in url - change file name
|
343 |
+
dir_path = re.search(r'\((.*?)\)', url)
|
344 |
+
dir_path = dir_path.group(1) if dir_path else None # for () in url - create subdir
|
345 |
+
|
346 |
if file_name:
|
347 |
url = re.sub(r'\[.*?\]', '', url)
|
348 |
+
if dir_path:
|
349 |
+
url = re.sub(r'\(.*?\)', '', url)
|
350 |
|
351 |
for prefix, dir in prefixes.items():
|
352 |
if original_url.startswith(f"{prefix}:"):
|
353 |
if prefix != "extension":
|
354 |
+
manual_download(url, dir, file_name, dir_path)
|
355 |
else:
|
356 |
extension_repo.append((url, file_name))
|
357 |
|
358 |
+
|
359 |
+
def manual_download(url, dst_dir, file_name, dir_path=None):
|
360 |
basename = url.split("/")[-1] if file_name is None else file_name
|
361 |
header_option = f"--header={user_header}"
|
362 |
|
363 |
+
if dir_path:
|
364 |
+
dst_dir = f"{dst_dir}/{dir_path}"
|
365 |
+
get_ipython().system('mkdir -p {dst_dir}')
|
366 |
+
|
367 |
print("\033[32m---"*45 + f"\n\033[33mURL: \033[34m{url}\n\033[33mSAVE DIR: \033[34m{dst_dir}\n\033[33mFILE NAME: \033[34m{file_name}\033[32m\n~~~\033[0m")
|
368 |
# print(url, dst_dir, file_name)
|
369 |
|
|
|
381 |
get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
|
382 |
else:
|
383 |
get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
|
384 |
+
# -- Hugging Face --
|
385 |
elif 'huggingface' in url:
|
386 |
if '/blob/' in url:
|
387 |
url = url.replace('/blob/', '/resolve/')
|
|
|
416 |
|
417 |
## unpucking zip files
|
418 |
def unpucking_zip_files():
|
419 |
+
# directories = [models_dir, vaes_dir, embeddings_dir, loras_dir , extensions_dir, control_dir , adetailer_dir]
|
|
|
420 |
for directory in directories:
|
421 |
for root, dirs, files in os.walk(directory):
|
422 |
for file in files:
|
|
|
431 |
|
432 |
submodels = []
|
433 |
|
434 |
+
def get_all_models(model_dict):
|
435 |
+
all_models = []
|
436 |
+
for models in model_dict.values():
|
437 |
+
all_models.extend(models)
|
438 |
+
return all_models
|
439 |
+
|
440 |
+
def get_selected_models(model_dict, num_selection):
|
441 |
+
selected_models = []
|
442 |
+
selected_nums = map(int, num_selection.replace(',', '').split())
|
443 |
+
for num in selected_nums:
|
444 |
+
if 1 <= num <= len(model_dict):
|
445 |
+
name = list(model_dict)[num - 1]
|
446 |
+
selected_models.extend(model_dict[name])
|
447 |
+
return selected_models
|
448 |
+
|
449 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
450 |
if selection == "none":
|
451 |
return []
|
452 |
+
elif selection == "ALL":
|
453 |
+
selected_models = get_all_models(model_dict)
|
|
|
|
|
|
|
454 |
else:
|
455 |
+
selected_models = model_dict.get(selection, [])
|
456 |
+
if num_selection:
|
457 |
+
selected_models.extend(get_selected_models(model_dict, num_selection))
|
|
|
|
|
|
|
|
|
458 |
|
459 |
unique_models = list({model['name']: model for model in selected_models}.values())
|
|
|
460 |
for model in unique_models:
|
461 |
model['dst_dir'] = dst_dir
|
462 |
|
463 |
return unique_models
|
464 |
|
465 |
+
submodels += add_submodels(Model, Model_Num, model_list, models_dir)
|
466 |
+
submodels += add_submodels(Vae, Vae_Num, vae_list, vaes_dir)
|
467 |
+
submodels += add_submodels(controlnet, "" if controlnet == "ALL" else controlnet_Num, controlnet_list, control_dir)
|
468 |
|
469 |
for submodel in submodels:
|
470 |
if not Inpainting_Model and "inpainting" in submodel['name']:
|
|
|
490 |
current_tag = None
|
491 |
for line in lines:
|
492 |
if any(f'# {tag}' in line.lower() for tag in prefixes):
|
493 |
+
current_tag = next((tag for tag in prefixes if tag in line.lower()), None)
|
494 |
|
495 |
+
urls = [url.split('#')[0].strip() for url in line.split(',')] # filter urls
|
496 |
for url in urls:
|
497 |
if url.startswith("http") and url not in unique_urls:
|
|
|
498 |
files_urls += f"{current_tag}:{url}, "
|
499 |
unique_urls.append(url)
|
500 |
|
|
|
516 |
pass
|
517 |
|
518 |
# url prefixing
|
519 |
+
urls = [Model_url, Vae_url, LoRA_url, Embedding_url, Extensions_url] # base url
|
520 |
prefixed_urls = [f"{prefix}:{url}" for prefix, url in zip(prefixes.keys(), urls) if url for url in url.replace(',', '').split()]
|
521 |
url += ", ".join(prefixed_urls) + ", " + file_urls
|
522 |
|
files_cells/python/ru/widgets_ru.py
CHANGED
@@ -477,7 +477,7 @@ Vae_widget = widgets.Dropdown(options=vae_options, value='3.Blessed2.vae', descr
|
|
477 |
Vae_Num_widget = widgets.Text(description='Номер Vae:', placeholder='Введите номера vae для скачивания через запятую/пробел.', style=style, layout=layout)
|
478 |
|
479 |
''' Display Vae'''
|
480 |
-
all_vae_box= widgets.VBox([vae_header, Vae_widget, Vae_Num_widget]).add_class("container").add_class("image_2")
|
481 |
display(all_vae_box)
|
482 |
|
483 |
# --- ADDITIONAL ---
|
@@ -501,7 +501,7 @@ optional_huggingface_token_widget = widgets.Text(description='Токен Hugging
|
|
501 |
ngrok_token_widget = widgets.Text(description='Токен Ngrok:', style=style, layout=widgets.Layout(width='1047px'))
|
502 |
ngrock_button = widgets.HTML('<a href="https://dashboard.ngrok.com/get-started/your-authtoken" target="_blank">Получить Ngrok Токен</a>').add_class("button_ngrok")
|
503 |
ngrok_widget = widgets.HBox([ngrok_token_widget, ngrock_button], style=style, layout=layout)
|
504 |
-
zrok_token_widget = widgets.Text(description='Zrok
|
505 |
zrok_button = widgets.HTML('<a href="https://colab.research.google.com/drive/1d2sjWDJi_GYBUavrHSuQyHTDuLy36WpU" target="_blank">Зарегать Zrok Токен</a>').add_class("button_ngrok")
|
506 |
zrok_widget = widgets.HBox([zrok_token_widget, zrok_button], style=style, layout=layout)
|
507 |
# ---
|
@@ -520,25 +520,32 @@ display(all_additional_box)
|
|
520 |
custom_download_header_popup = widgets.HTML('''
|
521 |
<style>
|
522 |
/* Color */
|
523 |
-
.
|
524 |
-
.
|
525 |
-
.
|
526 |
-
.
|
|
|
|
|
|
|
527 |
</style>
|
528 |
|
529 |
<div class="header" style="cursor: pointer;" onclick="toggleContainer()">Кастомная Загрузка</div>
|
530 |
<!-- PopUp window -->
|
531 |
<div class="info" id="info_dl">INFO</div>
|
532 |
<div class="popup">
|
533 |
-
|
534 |
после URL без пробелов.
|
535 |
-
<
|
536 |
-
<
|
537 |
-
|
538 |
-
|
|
|
|
|
|
|
|
|
539 |
<br>
|
540 |
-
<span class="
|
541 |
-
https://github.com/hako-mikan/sd-webui-
|
542 |
</div>
|
543 |
</div>
|
544 |
''')
|
|
|
477 |
Vae_Num_widget = widgets.Text(description='Номер Vae:', placeholder='Введите номера vae для скачивания через запятую/пробел.', style=style, layout=layout)
|
478 |
|
479 |
''' Display Vae'''
|
480 |
+
all_vae_box = widgets.VBox([vae_header, Vae_widget, Vae_Num_widget]).add_class("container").add_class("image_2")
|
481 |
display(all_vae_box)
|
482 |
|
483 |
# --- ADDITIONAL ---
|
|
|
501 |
ngrok_token_widget = widgets.Text(description='Токен Ngrok:', style=style, layout=widgets.Layout(width='1047px'))
|
502 |
ngrock_button = widgets.HTML('<a href="https://dashboard.ngrok.com/get-started/your-authtoken" target="_blank">Получить Ngrok Токен</a>').add_class("button_ngrok")
|
503 |
ngrok_widget = widgets.HBox([ngrok_token_widget, ngrock_button], style=style, layout=layout)
|
504 |
+
zrok_token_widget = widgets.Text(description='Токен Zrok:', style=style, layout=widgets.Layout(width='1047px'))
|
505 |
zrok_button = widgets.HTML('<a href="https://colab.research.google.com/drive/1d2sjWDJi_GYBUavrHSuQyHTDuLy36WpU" target="_blank">Зарегать Zrok Токен</a>').add_class("button_ngrok")
|
506 |
zrok_widget = widgets.HBox([zrok_token_widget, zrok_button], style=style, layout=layout)
|
507 |
# ---
|
|
|
520 |
custom_download_header_popup = widgets.HTML('''
|
521 |
<style>
|
522 |
/* Color */
|
523 |
+
.sl {color: #DBAFFF;} /* sample label */
|
524 |
+
.br {color: #FFFF00;} /* braces */
|
525 |
+
.br2 {color: #C596F0;} /* braces2 */
|
526 |
+
.fn {color: #FFFFD8;} /* file name */
|
527 |
+
.ex {color: #EB934B;} /* extension */
|
528 |
+
.sd {color: #FF8CD7;} /* subdirs */
|
529 |
+
.wr {color: #FF9999;} /* warning */
|
530 |
</style>
|
531 |
|
532 |
<div class="header" style="cursor: pointer;" onclick="toggleContainer()">Кастомная Загрузка</div>
|
533 |
<!-- PopUp window -->
|
534 |
<div class="info" id="info_dl">INFO</div>
|
535 |
<div class="popup">
|
536 |
+
URLs: разделяются запятой/пробелом. Для <span class="fn">кастомного имени</span> файла/расширения укажите его через <span class="br">[]</span>
|
537 |
после URL без пробелов.
|
538 |
+
<br>
|
539 |
+
<span>Вы также можете указать <span class="sd">субдиректорию</span>, в которую будет сохранен файл, указав её через <span class="br2">()</span> после URL без пробелов.</span>
|
540 |
+
<br>
|
541 |
+
<span class="wr">Для файла обязательно укажите</span> - <span class="ex">Расширение Файла.</span>
|
542 |
+
|
543 |
+
<div class="sample" >
|
544 |
+
<span class="sl">Пример для Файла:</span>
|
545 |
+
https://civitai.com/api/download/models/229782<span class="br">[</span><span class="fn">Detailer</span><span class="ex">.safetensors</span><span class="br">]</span><span class="br2">(</span><span class="sd">detailers</span><span class="br2">)</span>
|
546 |
<br>
|
547 |
+
<span class="sl">Пример для Расширения:</span>
|
548 |
+
https://github.com/hako-mikan/sd-webui-cd-tuner<span class="br">[</span><span class="fn">CD-Tuner</span><span class="br">]</span>
|
549 |
</div>
|
550 |
</div>
|
551 |
''')
|