codeShare commited on
Commit
e46266a
β€’
1 Parent(s): b10cb69

Upload 2 files

Browse files
Google Colab Notebooks/fusion_t2i_CLIP_interrogator.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
Google Colab Notebooks/sd_token_similarity_calculator.ipynb CHANGED
@@ -259,33 +259,25 @@
259
  {
260
  "cell_type": "code",
261
  "source": [
262
- "# @title ✳️ Select items for the vocab\n",
263
  "\n",
264
  "prompt_features = True # @param {\"type\":\"boolean\",\"placeholder\":\"🦜\"}\n",
265
  "civitai_blue_set = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
266
  "suffix = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
267
  "prefix = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
268
- "emojis = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
269
  "#------#\n",
270
  "\n",
271
  "first_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
272
  "last_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
273
- "full_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
274
- "celebs = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ†”πŸ‘¨\"}\n",
275
- "#These are borked\n",
276
- "celebs_young = False # param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
277
  "#-------#\n",
278
- "\n",
279
  "danbooru_tags = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸŽ€\"}\n",
280
- "\n",
281
- "lyrics = False # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
282
- "\n",
283
- "tripple_nouns = False # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
284
- "\n",
285
  "#-----#\n",
286
- "female_fullnames = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
287
  "debug = False\n",
288
- "\n",
289
  "#------#\n",
290
  "prompts = {}\n",
291
  "text_encodings = {}\n",
@@ -316,21 +308,11 @@
316
  " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
317
  "#--------#\n",
318
  "\n",
319
- "if full_names:\n",
320
- " url = '/content/text-to-image-prompts/names/fullnames'\n",
321
- " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
322
- "#--------#\n",
323
- "\n",
324
  "if celebs:\n",
325
  " url = '/content/text-to-image-prompts/names/celebs/mixed'\n",
326
  " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
327
  "#--------#\n",
328
  "\n",
329
- "if celebs_young :\n",
330
- " url = '/content/text-to-image-prompts/names/celebs/young'\n",
331
- " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
332
- "#--------#\n",
333
- "\n",
334
  "if female_fullnames:\n",
335
  " url = '/content/text-to-image-prompts/names/fullnames'\n",
336
  " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
@@ -394,13 +376,14 @@
394
  "source": [
395
  "# @title \tβš„ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
396
  "\n",
397
- "\n",
398
  "#image_index = 0 # @param {type:'number'}\n",
399
  "# @markdown πŸ“₯ Load the data (only required one time)\n",
400
  "load_the_data = False # @param {type:\"boolean\"}\n",
401
  "\n",
402
  "# @markdown πŸ–ΌοΈ Choose a pre-encoded reference\n",
403
- "index = 829 # @param {type:\"slider\", min:0, max:1668, step:1}\n",
 
 
404
  "\n",
405
  "# @markdown βš–οΈ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
406
  "\n",
@@ -475,7 +458,7 @@
475
  "\n",
476
  " # @markdown -----------\n",
477
  " # @markdown βš™οΈπŸ“ Printing options\n",
478
- " newline_Separator = True # @param {type:\"boolean\"}\n",
479
  "\n",
480
  " import random\n",
481
  " list_size2 = 1000 # param {type:'number'}\n",
@@ -483,7 +466,7 @@
483
  " rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
484
  "\n",
485
  " # @markdown Repeat output N times\n",
486
- " N = 6 # @param {type:\"slider\", min:0, max:10, step:1}\n",
487
  "\n",
488
  " # title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
489
  " RANGE = list_size\n",
@@ -507,7 +490,7 @@
507
  " _prompts = _prompts + prompt + separator\n",
508
  " #------#\n",
509
  " #------#\n",
510
- " __prompts = fix_bad_symbols(__prompts)\n",
511
  " __prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
512
  " __sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
513
  " #------#\n",
@@ -527,8 +510,6 @@
527
  " for i in range(N) : print(__prompts)\n",
528
  " #-------#\n",
529
  " #-------#\n",
530
- "\n",
531
- "\n",
532
  "#-------#\n",
533
  "image\n"
534
  ],
@@ -538,6 +519,196 @@
538
  "execution_count": null,
539
  "outputs": []
540
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541
  {
542
  "cell_type": "code",
543
  "source": [
@@ -609,6 +780,53 @@
609
  "execution_count": null,
610
  "outputs": []
611
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
612
  {
613
  "cell_type": "code",
614
  "source": [
@@ -1006,7 +1224,7 @@
1006
  "id": "SEPUbRwpVwRQ",
1007
  "outputId": "b058be19-2fe5-4de2-ff3c-3e821043a177"
1008
  },
1009
- "execution_count": 9,
1010
  "outputs": [
1011
  {
1012
  "output_type": "stream",
@@ -1031,7 +1249,7 @@
1031
  "id": "5oXvYS1aXdjt",
1032
  "outputId": "00491826-4329-4c02-d038-bc3b221937b1"
1033
  },
1034
- "execution_count": 14,
1035
  "outputs": [
1036
  {
1037
  "output_type": "stream",
@@ -1340,7 +1558,7 @@
1340
  "cellView": "form",
1341
  "id": "Cbt78mgJYHgr"
1342
  },
1343
- "execution_count": 28,
1344
  "outputs": []
1345
  },
1346
  {
 
259
  {
260
  "cell_type": "code",
261
  "source": [
262
+ "# @title πŸ“š Select items to sample from\n",
263
  "\n",
264
  "prompt_features = True # @param {\"type\":\"boolean\",\"placeholder\":\"🦜\"}\n",
265
  "civitai_blue_set = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ“˜\"}\n",
266
  "suffix = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
267
  "prefix = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
268
+ "emojis = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
269
  "#------#\n",
270
  "\n",
271
  "first_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Ή\"}\n",
272
  "last_names = False # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ”Έ\"}\n",
273
+ "celebs = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ†”πŸ‘¨\"}\n",
 
 
 
274
  "#-------#\n",
 
275
  "danbooru_tags = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸŽ€\"}\n",
276
+ "lyrics = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
277
+ "tripple_nouns = True # @param {\"type\":\"boolean\",\"placeholder\":\"🎼\"}\n",
 
 
 
278
  "#-----#\n",
279
+ "female_fullnames = True # @param {\"type\":\"boolean\",\"placeholder\":\"πŸ˜ƒ\"}\n",
280
  "debug = False\n",
 
281
  "#------#\n",
282
  "prompts = {}\n",
283
  "text_encodings = {}\n",
 
308
  " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
309
  "#--------#\n",
310
  "\n",
 
 
 
 
 
311
  "if celebs:\n",
312
  " url = '/content/text-to-image-prompts/names/celebs/mixed'\n",
313
  " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
314
  "#--------#\n",
315
  "\n",
 
 
 
 
 
316
  "if female_fullnames:\n",
317
  " url = '/content/text-to-image-prompts/names/fullnames'\n",
318
  " prompts , text_encodings, nA = append_from_url(prompts , text_encodings, nA , url , '')\n",
 
376
  "source": [
377
  "# @title \tβš„ Use a pre-encoded prompt + image pair from the fusion gen (note: NSFW!)\n",
378
  "\n",
 
379
  "#image_index = 0 # @param {type:'number'}\n",
380
  "# @markdown πŸ“₯ Load the data (only required one time)\n",
381
  "load_the_data = False # @param {type:\"boolean\"}\n",
382
  "\n",
383
  "# @markdown πŸ–ΌοΈ Choose a pre-encoded reference\n",
384
+ "index = 708 # @param {type:\"slider\", min:0, max:1666, step:1}\n",
385
+ "\n",
386
+ "PROMPT_INDEX = index\n",
387
  "\n",
388
  "# @markdown βš–οΈ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
389
  "\n",
 
458
  "\n",
459
  " # @markdown -----------\n",
460
  " # @markdown βš™οΈπŸ“ Printing options\n",
461
+ " newline_Separator = False # @param {type:\"boolean\"}\n",
462
  "\n",
463
  " import random\n",
464
  " list_size2 = 1000 # param {type:'number'}\n",
 
466
  " rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
467
  "\n",
468
  " # @markdown Repeat output N times\n",
469
+ " N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
470
  "\n",
471
  " # title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
472
  " RANGE = list_size\n",
 
490
  " _prompts = _prompts + prompt + separator\n",
491
  " #------#\n",
492
  " #------#\n",
493
+ " _prompts = fix_bad_symbols(_prompts)\n",
494
  " __prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
495
  " __sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
496
  " #------#\n",
 
510
  " for i in range(N) : print(__prompts)\n",
511
  " #-------#\n",
512
  " #-------#\n",
 
 
513
  "#-------#\n",
514
  "image\n"
515
  ],
 
519
  "execution_count": null,
520
  "outputs": []
521
  },
522
+ {
523
+ "cell_type": "code",
524
+ "source": [
525
+ "# @title \tβš„ Create a savefile-set from the entire range of pre-encoded items\n",
526
+ "\n",
527
+ "#image_index = 0 # @param {type:'number'}\n",
528
+ "# @markdown πŸ“₯ Load the data (only required one time)\n",
529
+ "load_the_data = True # @param {type:\"boolean\"}\n",
530
+ "\n",
531
+ "# @markdown βš–οΈ Set the value for C in the reference <br> <br> sim = C* text_enc + image_enc*(1-C) <br><br>\n",
532
+ "\n",
533
+ "C = 0.5 # @param {type:\"slider\", min:0, max:1, step:0.01}\n",
534
+ "\n",
535
+ "# @markdown 🚫 Penalize similarity to this prompt(optional)\n",
536
+ "\n",
537
+ "if(load_the_data):\n",
538
+ " from PIL import Image\n",
539
+ " import requests\n",
540
+ " target_prompts , target_text_encodings , urls , target_image_encodings , NUM_ITEMS = getPromptsAndLinks('/content/text-to-image-prompts/fusion')\n",
541
+ " from transformers import AutoTokenizer\n",
542
+ " tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
543
+ " from transformers import CLIPProcessor, CLIPModel\n",
544
+ " processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
545
+ " model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n",
546
+ " logit_scale = model.logit_scale.exp() #logit_scale = 100.00000762939453\n",
547
+ "#---------#\n",
548
+ "\n",
549
+ "filename = 'blank.json'\n",
550
+ "path = '/content/text-to-image-prompts/fusion/'\n",
551
+ "print(f'reading {filename}....')\n",
552
+ "_index = 0\n",
553
+ "%cd {path}\n",
554
+ "with open(f'{filename}', 'r') as f:\n",
555
+ " data = json.load(f)\n",
556
+ "#------#\n",
557
+ "_df = pd.DataFrame({'count': data})['count']\n",
558
+ "_blank = {\n",
559
+ " key : value for key, value in _df.items()\n",
560
+ "}\n",
561
+ "#------#\n",
562
+ "\n",
563
+ "root_savefile_name = 'fusion_C05_X7_1000_'\n",
564
+ "output_folder = '/content/output/savefiles/'\n",
565
+ "my_mkdirs(output_folder)\n",
566
+ "NEG = '' # @param {type:'string'}\n",
567
+ "strength = 1 # @param {type:\"slider\", min:-5, max:5, step:0.1}\n",
568
+ "\n",
569
+ "for index in range(1667):\n",
570
+ "\n",
571
+ " PROMPT_INDEX = index\n",
572
+ "\n",
573
+ " prompt = target_prompts[f'{index}']\n",
574
+ " url = urls[f'{index}']\n",
575
+ " if url.find('perchance')>-1:\n",
576
+ " image = Image.open(requests.get(url, stream=True).raw)\n",
577
+ " else: continue #print(\"(No image for this ID)\")\n",
578
+ "\n",
579
+ " print(f\"no. {PROMPT_INDEX} : '{prompt}'\")\n",
580
+ "\n",
581
+ "\n",
582
+ " if(True):\n",
583
+ " text_features_A = target_text_encodings[f'{index}']\n",
584
+ " image_features_A = target_image_encodings[f'{index}']\n",
585
+ "\n",
586
+ " # text-similarity\n",
587
+ " sims = C * torch.matmul(text_tensor, text_features_A.t())\n",
588
+ "\n",
589
+ " neg_sims = 0*sims\n",
590
+ " if(NEG != ''):\n",
591
+ "\n",
592
+ " # Get text features for user input\n",
593
+ " inputs = tokenizer(text = NEG, padding=True, return_tensors=\"pt\")\n",
594
+ " text_features_NEG = model.get_text_features(**inputs)\n",
595
+ " text_features_NEG = text_features_A/text_features_A.norm(p=2, dim=-1, keepdim=True)\n",
596
+ "\n",
597
+ " # text-similarity\n",
598
+ " neg_sims = strength*torch.matmul(text_tensor, text_features_NEG.t())\n",
599
+ " #------#\n",
600
+ "\n",
601
+ " # plus image-similarity\n",
602
+ " sims = sims + (1-C) * torch.matmul(text_tensor, image_features_A.t()) * logit_scale\n",
603
+ "\n",
604
+ " # minus NEG-similarity\n",
605
+ " sims = sims - neg_sims\n",
606
+ "\n",
607
+ " # Sort the items\n",
608
+ " sorted , indices = torch.sort(sims,dim=0 , descending=True)\n",
609
+ "\n",
610
+ " # @title βš™οΈπŸ“ Print the results (Advanced)\n",
611
+ " list_size = 1000 # param {type:'number'}\n",
612
+ " start_at_index = 0 # param {type:'number'}\n",
613
+ " print_Similarity = True # param {type:\"boolean\"}\n",
614
+ " print_Prompts = True # param {type:\"boolean\"}\n",
615
+ " print_Prefix = True # param {type:\"boolean\"}\n",
616
+ " print_Descriptions = True # param {type:\"boolean\"}\n",
617
+ " compact_Output = True # param {type:\"boolean\"}\n",
618
+ "\n",
619
+ " # @markdown -----------\n",
620
+ " # @markdown βš™οΈπŸ“ Printing options\n",
621
+ " newline_Separator = False # @param {type:\"boolean\"}\n",
622
+ "\n",
623
+ " import random\n",
624
+ " list_size2 = 1000 # param {type:'number'}\n",
625
+ " start_at_index2 = 10000 # param {type:'number'}\n",
626
+ " rate_percent = 0 # param {type:\"slider\", min:0, max:100, step:1}\n",
627
+ "\n",
628
+ " # @markdown Repeat output N times\n",
629
+ " N = 7 # @param {type:\"slider\", min:0, max:10, step:1}\n",
630
+ "\n",
631
+ " # title Show the 100 most similiar suffix and prefix text-encodings to the text encoding\n",
632
+ " RANGE = list_size\n",
633
+ " separator = '|'\n",
634
+ " if newline_Separator : separator = separator + '\\n'\n",
635
+ "\n",
636
+ " _prompts = ''\n",
637
+ " _sims = ''\n",
638
+ " for _index in range(start_at_index + RANGE):\n",
639
+ " if _index < start_at_index : continue\n",
640
+ " index = indices[_index].item()\n",
641
+ "\n",
642
+ " prompt = prompts[f'{index}']\n",
643
+ " if rate_percent >= random.randint(0,100) : prompt = prompts[f'{random.randint(start_at_index2 , start_at_index2 + list_size2)}']\n",
644
+ "\n",
645
+ " #Remove duplicates\n",
646
+ " if _prompts.find(prompt + separator)<=-1:\n",
647
+ " _sims = _sims + f'{round(100*sims[index].item(), 2)} %' + separator\n",
648
+ " #-------#\n",
649
+ " _prompts = _prompts.replace(prompt + separator,'')\n",
650
+ " _prompts = _prompts + prompt + separator\n",
651
+ " #------#\n",
652
+ " #------#\n",
653
+ " _prompts = fix_bad_symbols(_prompts)\n",
654
+ " __prompts = ('{' + _prompts + '}').replace(separator + '}', '}')\n",
655
+ " __sims = ('{' + _sims + '}').replace(separator + '}', '}')\n",
656
+ " #------#\n",
657
+ " #--------#\n",
658
+ " _savefile = _blank\n",
659
+ " from safetensors.torch import load_file\n",
660
+ " import json , os , torch\n",
661
+ " import pandas as pd\n",
662
+ " #----#\n",
663
+ " def my_mkdirs(folder):\n",
664
+ " if os.path.exists(folder)==False:\n",
665
+ " os.makedirs(folder)\n",
666
+ " #------#\n",
667
+ " savefile_prompt = ''\n",
668
+ " for i in range(N) : savefile_prompt = savefile_prompt + ' ' + __prompts\n",
669
+ " _savefile['main'] = savefile_prompt.replace('\\n', ' ').replace(' ', ' ').replace(' ', ' ')\n",
670
+ " #------#\n",
671
+ " save_filename = f'{root_savefile_name}{PROMPT_INDEX}.json'\n",
672
+ " #-----#\n",
673
+ " %cd {output_folder}\n",
674
+ " print(f'Saving savefile {save_filename} to {output_folder}...')\n",
675
+ " with open(save_filename, 'w') as f:\n",
676
+ " json.dump(_savefile, f)\n",
677
+ " #---------#\n",
678
+ " continue\n",
679
+ "#-----------#"
680
+ ],
681
+ "metadata": {
682
+ "id": "NZy2HrkZ1Rto"
683
+ },
684
+ "execution_count": null,
685
+ "outputs": []
686
+ },
687
+ {
688
+ "cell_type": "code",
689
+ "source": [
690
+ "# Determine if this notebook is running on Colab or Kaggle\n",
691
+ "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
692
+ "home_directory = '/content/'\n",
693
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
694
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
695
+ "%cd {home_directory}\n",
696
+ "#-------#\n",
697
+ "\n",
698
+ "# @title Download the text_encodings as .zip\n",
699
+ "import os\n",
700
+ "%cd {home_directory}\n",
701
+ "#os.remove(f'{home_directory}results.zip')\n",
702
+ "root_output_folder = home_directory + 'output/'\n",
703
+ "zip_dest = f'{home_directory}results.zip'\n",
704
+ "!zip -r {zip_dest} {root_output_folder}"
705
+ ],
706
+ "metadata": {
707
+ "id": "DaV1ynRs1XeS"
708
+ },
709
+ "execution_count": null,
710
+ "outputs": []
711
+ },
712
  {
713
  "cell_type": "code",
714
  "source": [
 
780
  "execution_count": null,
781
  "outputs": []
782
  },
783
+ {
784
+ "cell_type": "code",
785
+ "source": [
786
+ "# @title Quick fix to created json files above\n",
787
+ "output_folder = '/content/output/fusion-gen-savefiles/'\n",
788
+ "index = 0\n",
789
+ "path = '/content/text-to-image-prompts/fusion-gen-savefiles'\n",
790
+ "\n",
791
+ "def my_mkdirs(folder):\n",
792
+ " if os.path.exists(folder)==False:\n",
793
+ " os.makedirs(folder)\n",
794
+ "\n",
795
+ "my_mkdirs(output_folder)\n",
796
+ "for filename in os.listdir(f'{path}'):\n",
797
+ " if filename.find('fusion_C05_X7_1000_')<=-1: continue\n",
798
+ " print(f'reading {filename}...')\n",
799
+ " %cd {path}\n",
800
+ " with open(f'{filename}', 'r') as f:\n",
801
+ " data = json.load(f)\n",
802
+ " _df = pd.DataFrame({'count': data})['count']\n",
803
+ " _savefile = {\n",
804
+ " key : value for key, value in _df.items()\n",
805
+ " }\n",
806
+ "\n",
807
+ " _savefile2 = {}\n",
808
+ "\n",
809
+ " for key in _savefile:\n",
810
+ " _savefile2[key] = _savefile[key]\n",
811
+ " if(key == \"_main\") :\n",
812
+ " _savefile2[key] = \"Prompt input only ✏️\"\n",
813
+ " print(\"changed\")\n",
814
+ " #----------#\n",
815
+ "\n",
816
+ " save_filename = f'fusion_C05_X7_1000_{index}.json'\n",
817
+ " index = index + 1\n",
818
+ "\n",
819
+ " %cd {output_folder}\n",
820
+ " print(f'Saving savefile {save_filename} to {output_folder}...')\n",
821
+ " with open(save_filename, 'w') as f:\n",
822
+ " json.dump(_savefile2, f)"
823
+ ],
824
+ "metadata": {
825
+ "id": "mRhTZ6wS1g0m"
826
+ },
827
+ "execution_count": null,
828
+ "outputs": []
829
+ },
830
  {
831
  "cell_type": "code",
832
  "source": [
 
1224
  "id": "SEPUbRwpVwRQ",
1225
  "outputId": "b058be19-2fe5-4de2-ff3c-3e821043a177"
1226
  },
1227
+ "execution_count": null,
1228
  "outputs": [
1229
  {
1230
  "output_type": "stream",
 
1249
  "id": "5oXvYS1aXdjt",
1250
  "outputId": "00491826-4329-4c02-d038-bc3b221937b1"
1251
  },
1252
+ "execution_count": null,
1253
  "outputs": [
1254
  {
1255
  "output_type": "stream",
 
1558
  "cellView": "form",
1559
  "id": "Cbt78mgJYHgr"
1560
  },
1561
+ "execution_count": null,
1562
  "outputs": []
1563
  },
1564
  {