codeShare commited on
Commit
1607bc5
•
1 Parent(s): 831b8e7

Upload sd_token_similarity_calculator.ipynb

Browse files
Google Colab Notebooks/sd_token_similarity_calculator.ipynb CHANGED
@@ -134,14 +134,33 @@
134
  "#--------#\n",
135
  "\n",
136
  "#default NEG values\n",
137
- "if (not name_NEG): name_NEG = ''\n",
138
- "if (not image_NEG): image_NEG = ''\n",
139
- "if (not strength_image_NEG): strength_image_NEG = 1\n",
140
- "if (not strength_NEG): strength_NEG = 1\n",
141
- "if (not NUM_VOCAB_ITEMS): NUM_VOCAB_ITEMS = 0\n",
142
- "if (not using_NEG): using_NEG = False\n",
143
- "if (not using_image_NEG): using_image_NEG = False\n",
 
 
 
 
 
 
 
144
  "#------#\n",
 
 
 
 
 
 
 
 
 
 
 
 
145
  "\n"
146
  ],
147
  "metadata": {
@@ -149,15 +168,25 @@
149
  "colab": {
150
  "base_uri": "https://localhost:8080/"
151
  },
152
- "outputId": "216538c2-9770-438d-a07e-4b965d260080"
153
  },
154
- "execution_count": 17,
155
  "outputs": [
156
  {
157
  "output_type": "stream",
158
  "name": "stdout",
159
  "text": [
160
- "/content\n"
 
 
 
 
 
 
 
 
 
 
161
  ]
162
  }
163
  ]
@@ -286,450 +315,10 @@
286
  "NUM_VOCAB_ITEMS = nA\n"
287
  ],
288
  "metadata": {
289
- "id": "ZMG4CThUAmwW",
290
- "colab": {
291
- "base_uri": "https://localhost:8080/"
292
- },
293
- "outputId": "2d07198f-1a64-4cbf-cb28-aec4bc5e73b1"
294
  },
295
- "execution_count": 18,
296
- "outputs": [
297
- {
298
- "output_type": "stream",
299
- "name": "stdout",
300
- "text": [
301
- "reading tripple_nouns_17-11.json....\n",
302
- "/content/text-to-image-prompts/nouns/text\n",
303
- "/content/text-to-image-prompts/nouns/text_encodings\n",
304
- "reading tripple_nouns_14-1.json....\n",
305
- "/content/text-to-image-prompts/nouns/text\n",
306
- "/content/text-to-image-prompts/nouns/text_encodings\n",
307
- "reading tripple_nouns_13-9.json....\n",
308
- "/content/text-to-image-prompts/nouns/text\n",
309
- "/content/text-to-image-prompts/nouns/text_encodings\n",
310
- "reading tripple_nouns_18-1.json....\n",
311
- "/content/text-to-image-prompts/nouns/text\n",
312
- "/content/text-to-image-prompts/nouns/text_encodings\n",
313
- "reading tripple_nouns_16-10.json....\n",
314
- "/content/text-to-image-prompts/nouns/text\n",
315
- "/content/text-to-image-prompts/nouns/text_encodings\n",
316
- "reading tripple_nouns_17-4.json....\n",
317
- "/content/text-to-image-prompts/nouns/text\n",
318
- "/content/text-to-image-prompts/nouns/text_encodings\n",
319
- "reading tripple_nouns_12-2.json....\n",
320
- "/content/text-to-image-prompts/nouns/text\n",
321
- "/content/text-to-image-prompts/nouns/text_encodings\n",
322
- "reading tripple_nouns_15-10.json....\n",
323
- "/content/text-to-image-prompts/nouns/text\n",
324
- "/content/text-to-image-prompts/nouns/text_encodings\n",
325
- "reading tripple_nouns_13-2.json....\n",
326
- "/content/text-to-image-prompts/nouns/text\n",
327
- "/content/text-to-image-prompts/nouns/text_encodings\n",
328
- "reading tripple_nouns_11-8.json....\n",
329
- "/content/text-to-image-prompts/nouns/text\n",
330
- "/content/text-to-image-prompts/nouns/text_encodings\n",
331
- "reading tripple_nouns_17-7.json....\n",
332
- "/content/text-to-image-prompts/nouns/text\n",
333
- "/content/text-to-image-prompts/nouns/text_encodings\n",
334
- "reading tripple_nouns_12-7.json....\n",
335
- "/content/text-to-image-prompts/nouns/text\n",
336
- "/content/text-to-image-prompts/nouns/text_encodings\n",
337
- "reading tripple_nouns_1-6.json....\n",
338
- "/content/text-to-image-prompts/nouns/text\n",
339
- "/content/text-to-image-prompts/nouns/text_encodings\n",
340
- "reading tripple_nouns_13-11.json....\n",
341
- "/content/text-to-image-prompts/nouns/text\n",
342
- "/content/text-to-image-prompts/nouns/text_encodings\n",
343
- "reading tripple_nouns_1-3.json....\n",
344
- "/content/text-to-image-prompts/nouns/text\n",
345
- "/content/text-to-image-prompts/nouns/text_encodings\n",
346
- "reading tripple_nouns_13-1.json....\n",
347
- "/content/text-to-image-prompts/nouns/text\n",
348
- "/content/text-to-image-prompts/nouns/text_encodings\n",
349
- "reading tripple_nouns_1-10.json....\n",
350
- "/content/text-to-image-prompts/nouns/text\n",
351
- "/content/text-to-image-prompts/nouns/text_encodings\n",
352
- "reading tripple_nouns_13-6.json....\n",
353
- "/content/text-to-image-prompts/nouns/text\n",
354
- "/content/text-to-image-prompts/nouns/text_encodings\n",
355
- "reading tripple_nouns_10-6.json....\n",
356
- "/content/text-to-image-prompts/nouns/text\n",
357
- "/content/text-to-image-prompts/nouns/text_encodings\n",
358
- "reading tripple_nouns_14-7.json....\n",
359
- "/content/text-to-image-prompts/nouns/text\n",
360
- "/content/text-to-image-prompts/nouns/text_encodings\n",
361
- "reading tripple_nouns_10-3.json....\n",
362
- "/content/text-to-image-prompts/nouns/text\n",
363
- "/content/text-to-image-prompts/nouns/text_encodings\n",
364
- "reading tripple_nouns_14-11.json....\n",
365
- "/content/text-to-image-prompts/nouns/text\n",
366
- "/content/text-to-image-prompts/nouns/text_encodings\n",
367
- "reading tripple_nouns_17-2.json....\n",
368
- "/content/text-to-image-prompts/nouns/text\n",
369
- "/content/text-to-image-prompts/nouns/text_encodings\n",
370
- "reading tripple_nouns_15-9.json....\n",
371
- "/content/text-to-image-prompts/nouns/text\n",
372
- "/content/text-to-image-prompts/nouns/text_encodings\n",
373
- "reading tripple_nouns_14-3.json....\n",
374
- "/content/text-to-image-prompts/nouns/text\n",
375
- "/content/text-to-image-prompts/nouns/text_encodings\n",
376
- "reading tripple_nouns_12-5.json....\n",
377
- "/content/text-to-image-prompts/nouns/text\n",
378
- "/content/text-to-image-prompts/nouns/text_encodings\n",
379
- "reading tripple_nouns_11-9.json....\n",
380
- "/content/text-to-image-prompts/nouns/text\n",
381
- "/content/text-to-image-prompts/nouns/text_encodings\n",
382
- "reading tripple_nouns_13-4.json....\n",
383
- "/content/text-to-image-prompts/nouns/text\n",
384
- "/content/text-to-image-prompts/nouns/text_encodings\n",
385
- "reading tripple_nouns_12-3.json....\n",
386
- "/content/text-to-image-prompts/nouns/text\n",
387
- "/content/text-to-image-prompts/nouns/text_encodings\n",
388
- "reading tripple_nouns_16-11.json....\n",
389
- "/content/text-to-image-prompts/nouns/text\n",
390
- "/content/text-to-image-prompts/nouns/text_encodings\n",
391
- "reading tripple_nouns_1-9.json....\n",
392
- "/content/text-to-image-prompts/nouns/text\n",
393
- "/content/text-to-image-prompts/nouns/text_encodings\n",
394
- "reading tripple_nouns_11-3.json....\n",
395
- "/content/text-to-image-prompts/nouns/text\n",
396
- "/content/text-to-image-prompts/nouns/text_encodings\n",
397
- "reading tripple_nouns_16-2.json....\n",
398
- "/content/text-to-image-prompts/nouns/text\n",
399
- "/content/text-to-image-prompts/nouns/text_encodings\n",
400
- "reading tripple_nouns_10-4.json....\n",
401
- "/content/text-to-image-prompts/nouns/text\n",
402
- "/content/text-to-image-prompts/nouns/text_encodings\n",
403
- "reading tripple_nouns_16-3.json....\n",
404
- "/content/text-to-image-prompts/nouns/text\n",
405
- "/content/text-to-image-prompts/nouns/text_encodings\n",
406
- "reading tripple_nouns_14-9.json....\n",
407
- "/content/text-to-image-prompts/nouns/text\n",
408
- "/content/text-to-image-prompts/nouns/text_encodings\n",
409
- "reading tripple_nouns_1-8.json....\n",
410
- "/content/text-to-image-prompts/nouns/text\n",
411
- "/content/text-to-image-prompts/nouns/text_encodings\n",
412
- "reading tripple_nouns_12-4.json....\n",
413
- "/content/text-to-image-prompts/nouns/text\n",
414
- "/content/text-to-image-prompts/nouns/text_encodings\n",
415
- "reading tripple_nouns_1-2.json....\n",
416
- "/content/text-to-image-prompts/nouns/text\n",
417
- "/content/text-to-image-prompts/nouns/text_encodings\n",
418
- "reading tripple_nouns_14-5.json....\n",
419
- "/content/text-to-image-prompts/nouns/text\n",
420
- "/content/text-to-image-prompts/nouns/text_encodings\n",
421
- "reading tripple_nouns_10-11.json....\n",
422
- "/content/text-to-image-prompts/nouns/text\n",
423
- "/content/text-to-image-prompts/nouns/text_encodings\n",
424
- "reading tripple_nouns_17-9.json....\n",
425
- "/content/text-to-image-prompts/nouns/text\n",
426
- "/content/text-to-image-prompts/nouns/text_encodings\n",
427
- "reading tripple_nouns_17-3.json....\n",
428
- "/content/text-to-image-prompts/nouns/text\n",
429
- "/content/text-to-image-prompts/nouns/text_encodings\n",
430
- "reading tripple_nouns_11-5.json....\n",
431
- "/content/text-to-image-prompts/nouns/text\n",
432
- "/content/text-to-image-prompts/nouns/text_encodings\n",
433
- "reading tripple_nouns_14-2.json....\n",
434
- "/content/text-to-image-prompts/nouns/text\n",
435
- "/content/text-to-image-prompts/nouns/text_encodings\n",
436
- "reading tripple_nouns_11-10.json....\n",
437
- "/content/text-to-image-prompts/nouns/text\n",
438
- "/content/text-to-image-prompts/nouns/text_encodings\n",
439
- "reading tripple_nouns_17-5.json....\n",
440
- "/content/text-to-image-prompts/nouns/text\n",
441
- "/content/text-to-image-prompts/nouns/text_encodings\n",
442
- "reading tripple_nouns_12-8.json....\n",
443
- "/content/text-to-image-prompts/nouns/text\n",
444
- "/content/text-to-image-prompts/nouns/text_encodings\n",
445
- "reading tripple_nouns_13-8.json....\n",
446
- "/content/text-to-image-prompts/nouns/text\n",
447
- "/content/text-to-image-prompts/nouns/text_encodings\n",
448
- "reading tripple_nouns_15-11.json....\n",
449
- "/content/text-to-image-prompts/nouns/text\n",
450
- "/content/text-to-image-prompts/nouns/text_encodings\n",
451
- "reading tripple_nouns_12-10.json....\n",
452
- "/content/text-to-image-prompts/nouns/text\n",
453
- "/content/text-to-image-prompts/nouns/text_encodings\n",
454
- "reading tripple_nouns_12-6.json....\n",
455
- "/content/text-to-image-prompts/nouns/text\n",
456
- "/content/text-to-image-prompts/nouns/text_encodings\n",
457
- "reading tripple_nouns_11-4.json....\n",
458
- "/content/text-to-image-prompts/nouns/text\n",
459
- "/content/text-to-image-prompts/nouns/text_encodings\n",
460
- "reading tripple_nouns_12-11.json....\n",
461
- "/content/text-to-image-prompts/nouns/text\n",
462
- "/content/text-to-image-prompts/nouns/text_encodings\n",
463
- "reading tripple_nouns_15-2.json....\n",
464
- "/content/text-to-image-prompts/nouns/text\n",
465
- "/content/text-to-image-prompts/nouns/text_encodings\n",
466
- "reading tripple_nouns_10-8.json....\n",
467
- "/content/text-to-image-prompts/nouns/text\n",
468
- "/content/text-to-image-prompts/nouns/text_encodings\n",
469
- "reading tripple_nouns_17-6.json....\n",
470
- "/content/text-to-image-prompts/nouns/text\n",
471
- "/content/text-to-image-prompts/nouns/text_encodings\n",
472
- "reading tripple_nouns_16-1.json....\n",
473
- "/content/text-to-image-prompts/nouns/text\n",
474
- "/content/text-to-image-prompts/nouns/text_encodings\n",
475
- "reading tripple_nouns_16-5.json....\n",
476
- "/content/text-to-image-prompts/nouns/text\n",
477
- "/content/text-to-image-prompts/nouns/text_encodings\n",
478
- "reading tripple_nouns_11-6.json....\n",
479
- "/content/text-to-image-prompts/nouns/text\n",
480
- "/content/text-to-image-prompts/nouns/text_encodings\n",
481
- "reading tripple_nouns_17-1.json....\n",
482
- "/content/text-to-image-prompts/nouns/text\n",
483
- "/content/text-to-image-prompts/nouns/text_encodings\n",
484
- "reading tripple_nouns_14-10.json....\n",
485
- "/content/text-to-image-prompts/nouns/text\n",
486
- "/content/text-to-image-prompts/nouns/text_encodings\n",
487
- "reading tripple_nouns_1-11.json....\n",
488
- "/content/text-to-image-prompts/nouns/text\n",
489
- "/content/text-to-image-prompts/nouns/text_encodings\n",
490
- "reading tripple_nouns_10-10.json....\n",
491
- "/content/text-to-image-prompts/nouns/text\n",
492
- "/content/text-to-image-prompts/nouns/text_encodings\n",
493
- "reading tripple_nouns_15-1.json....\n",
494
- "/content/text-to-image-prompts/nouns/text\n",
495
- "/content/text-to-image-prompts/nouns/text_encodings\n",
496
- "reading tripple_nouns_11-1.json....\n",
497
- "/content/text-to-image-prompts/nouns/text\n",
498
- "/content/text-to-image-prompts/nouns/text_encodings\n",
499
- "reading tripple_nouns_11-11.json....\n",
500
- "/content/text-to-image-prompts/nouns/text\n",
501
- "/content/text-to-image-prompts/nouns/text_encodings\n",
502
- "reading tripple_nouns_16-7.json....\n",
503
- "/content/text-to-image-prompts/nouns/text\n",
504
- "/content/text-to-image-prompts/nouns/text_encodings\n",
505
- "reading tripple_nouns_14-6.json....\n",
506
- "/content/text-to-image-prompts/nouns/text\n",
507
- "/content/text-to-image-prompts/nouns/text_encodings\n",
508
- "reading tripple_nouns_1-4.json....\n",
509
- "/content/text-to-image-prompts/nouns/text\n",
510
- "/content/text-to-image-prompts/nouns/text_encodings\n",
511
- "reading tripple_nouns_1-7.json....\n",
512
- "/content/text-to-image-prompts/nouns/text\n",
513
- "/content/text-to-image-prompts/nouns/text_encodings\n",
514
- "reading tripple_nouns_13-5.json....\n",
515
- "/content/text-to-image-prompts/nouns/text\n",
516
- "/content/text-to-image-prompts/nouns/text_encodings\n",
517
- "reading tripple_nouns_15-7.json....\n",
518
- "/content/text-to-image-prompts/nouns/text\n",
519
- "/content/text-to-image-prompts/nouns/text_encodings\n",
520
- "reading tripple_nouns_17-8.json....\n",
521
- "/content/text-to-image-prompts/nouns/text\n",
522
- "/content/text-to-image-prompts/nouns/text_encodings\n",
523
- "reading tripple_nouns_1-1.json....\n",
524
- "/content/text-to-image-prompts/nouns/text\n",
525
- "/content/text-to-image-prompts/nouns/text_encodings\n",
526
- "reading tripple_nouns_16-9.json....\n",
527
- "/content/text-to-image-prompts/nouns/text\n",
528
- "/content/text-to-image-prompts/nouns/text_encodings\n",
529
- "reading tripple_nouns_15-3.json....\n",
530
- "/content/text-to-image-prompts/nouns/text\n",
531
- "/content/text-to-image-prompts/nouns/text_encodings\n",
532
- "reading tripple_nouns_14-8.json....\n",
533
- "/content/text-to-image-prompts/nouns/text\n",
534
- "/content/text-to-image-prompts/nouns/text_encodings\n",
535
- "reading tripple_nouns_15-5.json....\n",
536
- "/content/text-to-image-prompts/nouns/text\n",
537
- "/content/text-to-image-prompts/nouns/text_encodings\n",
538
- "reading tripple_nouns_16-6.json....\n",
539
- "/content/text-to-image-prompts/nouns/text\n",
540
- "/content/text-to-image-prompts/nouns/text_encodings\n",
541
- "reading tripple_nouns_10-5.json....\n",
542
- "/content/text-to-image-prompts/nouns/text\n",
543
- "/content/text-to-image-prompts/nouns/text_encodings\n",
544
- "reading tripple_nouns_17-10.json....\n",
545
- "/content/text-to-image-prompts/nouns/text\n",
546
- "/content/text-to-image-prompts/nouns/text_encodings\n",
547
- "reading tripple_nouns_12-1.json....\n",
548
- "/content/text-to-image-prompts/nouns/text\n",
549
- "/content/text-to-image-prompts/nouns/text_encodings\n",
550
- "reading tripple_nouns_1-5.json....\n",
551
- "/content/text-to-image-prompts/nouns/text\n",
552
- "/content/text-to-image-prompts/nouns/text_encodings\n",
553
- "reading tripple_nouns_13-3.json....\n",
554
- "/content/text-to-image-prompts/nouns/text\n",
555
- "/content/text-to-image-prompts/nouns/text_encodings\n",
556
- "reading tripple_nouns_11-2.json....\n",
557
- "/content/text-to-image-prompts/nouns/text\n",
558
- "/content/text-to-image-prompts/nouns/text_encodings\n",
559
- "reading tripple_nouns_13-10.json....\n",
560
- "/content/text-to-image-prompts/nouns/text\n",
561
- "/content/text-to-image-prompts/nouns/text_encodings\n",
562
- "reading tripple_nouns_10-1.json....\n",
563
- "/content/text-to-image-prompts/nouns/text\n",
564
- "/content/text-to-image-prompts/nouns/text_encodings\n",
565
- "reading tripple_nouns_16-4.json....\n",
566
- "/content/text-to-image-prompts/nouns/text\n",
567
- "/content/text-to-image-prompts/nouns/text_encodings\n",
568
- "reading tripple_nouns_16-8.json....\n",
569
- "/content/text-to-image-prompts/nouns/text\n",
570
- "/content/text-to-image-prompts/nouns/text_encodings\n",
571
- "reading tripple_nouns_10-9.json....\n",
572
- "/content/text-to-image-prompts/nouns/text\n",
573
- "/content/text-to-image-prompts/nouns/text_encodings\n",
574
- "reading tripple_nouns_15-6.json....\n",
575
- "/content/text-to-image-prompts/nouns/text\n",
576
- "/content/text-to-image-prompts/nouns/text_encodings\n",
577
- "reading tripple_nouns_15-4.json....\n",
578
- "/content/text-to-image-prompts/nouns/text\n",
579
- "/content/text-to-image-prompts/nouns/text_encodings\n",
580
- "reading tripple_nouns_15-8.json....\n",
581
- "/content/text-to-image-prompts/nouns/text\n",
582
- "/content/text-to-image-prompts/nouns/text_encodings\n",
583
- "reading tripple_nouns_11-7.json....\n",
584
- "/content/text-to-image-prompts/nouns/text\n",
585
- "/content/text-to-image-prompts/nouns/text_encodings\n",
586
- "reading tripple_nouns_12-9.json....\n",
587
- "/content/text-to-image-prompts/nouns/text\n",
588
- "/content/text-to-image-prompts/nouns/text_encodings\n",
589
- "reading tripple_nouns_13-7.json....\n",
590
- "/content/text-to-image-prompts/nouns/text\n",
591
- "/content/text-to-image-prompts/nouns/text_encodings\n",
592
- "reading tripple_nouns_10-7.json....\n",
593
- "/content/text-to-image-prompts/nouns/text\n",
594
- "/content/text-to-image-prompts/nouns/text_encodings\n",
595
- "reading tripple_nouns_10-2.json....\n",
596
- "/content/text-to-image-prompts/nouns/text\n",
597
- "/content/text-to-image-prompts/nouns/text_encodings\n",
598
- "reading tripple_nouns_14-4.json....\n",
599
- "/content/text-to-image-prompts/nouns/text\n",
600
- "/content/text-to-image-prompts/nouns/text_encodings\n",
601
- "reading 🎼 fusion-t2i-lyrics-41.json....\n",
602
- "/content/text-to-image-prompts/lyrics/text\n",
603
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
604
- "reading 🎼 fusion-t2i-lyrics-19.json....\n",
605
- "/content/text-to-image-prompts/lyrics/text\n",
606
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
607
- "reading 🎼 fusion-t2i-lyrics-25.json....\n",
608
- "/content/text-to-image-prompts/lyrics/text\n",
609
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
610
- "reading 🎼 fusion-t2i-lyrics-20.json....\n",
611
- "/content/text-to-image-prompts/lyrics/text\n",
612
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
613
- "reading 🎼 fusion-t2i-lyrics-37.json....\n",
614
- "/content/text-to-image-prompts/lyrics/text\n",
615
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
616
- "reading 🎼 fusion-t2i-lyrics-34.json....\n",
617
- "/content/text-to-image-prompts/lyrics/text\n",
618
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
619
- "reading 🎼 fusion-t2i-lyrics-22.json....\n",
620
- "/content/text-to-image-prompts/lyrics/text\n",
621
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
622
- "reading 🎼 fusion-t2i-lyrics-38.json....\n",
623
- "/content/text-to-image-prompts/lyrics/text\n",
624
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
625
- "reading 🎼 fusion-t2i-lyrics-35.json....\n",
626
- "/content/text-to-image-prompts/lyrics/text\n",
627
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
628
- "reading 🎼 fusion-t2i-lyrics-40.json....\n",
629
- "/content/text-to-image-prompts/lyrics/text\n",
630
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
631
- "reading 🎼 fusion-t2i-lyrics-27.json....\n",
632
- "/content/text-to-image-prompts/lyrics/text\n",
633
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
634
- "reading 🎼 fusion-t2i-lyrics-10.json....\n",
635
- "/content/text-to-image-prompts/lyrics/text\n",
636
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
637
- "reading 🎼 fusion-t2i-lyrics-17.json....\n",
638
- "/content/text-to-image-prompts/lyrics/text\n",
639
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
640
- "reading 🎼 fusion-t2i-lyrics-23.json....\n",
641
- "/content/text-to-image-prompts/lyrics/text\n",
642
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
643
- "reading 🎼 fusion-t2i-lyrics-5.json....\n",
644
- "/content/text-to-image-prompts/lyrics/text\n",
645
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
646
- "reading 🎼 fusion-t2i-lyrics-18.json....\n",
647
- "/content/text-to-image-prompts/lyrics/text\n",
648
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
649
- "reading 🎼 fusion-t2i-lyrics-15.json....\n",
650
- "/content/text-to-image-prompts/lyrics/text\n",
651
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
652
- "reading 🎼 fusion-t2i-lyrics-24.json....\n",
653
- "/content/text-to-image-prompts/lyrics/text\n",
654
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
655
- "reading 🎼 fusion-t2i-lyrics-30.json....\n",
656
- "/content/text-to-image-prompts/lyrics/text\n",
657
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
658
- "reading 🎼 fusion-t2i-lyrics-31.json....\n",
659
- "/content/text-to-image-prompts/lyrics/text\n",
660
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
661
- "reading 🎼 fusion-t2i-lyrics-6.json....\n",
662
- "/content/text-to-image-prompts/lyrics/text\n",
663
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
664
- "reading 🎼 fusion-t2i-lyrics-33.json....\n",
665
- "/content/text-to-image-prompts/lyrics/text\n",
666
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
667
- "reading 🎼 fusion-t2i-lyrics-2.json....\n",
668
- "/content/text-to-image-prompts/lyrics/text\n",
669
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
670
- "reading 🎼 fusion-t2i-lyrics-43.json....\n",
671
- "/content/text-to-image-prompts/lyrics/text\n",
672
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
673
- "reading 🎼 fusion-t2i-lyrics-11.json....\n",
674
- "/content/text-to-image-prompts/lyrics/text\n",
675
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
676
- "reading 🎼 fusion-t2i-lyrics-29.json....\n",
677
- "/content/text-to-image-prompts/lyrics/text\n",
678
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
679
- "reading 🎼 fusion-t2i-lyrics-9.json....\n",
680
- "/content/text-to-image-prompts/lyrics/text\n",
681
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
682
- "reading 🎼 fusion-t2i-lyrics-26.json....\n",
683
- "/content/text-to-image-prompts/lyrics/text\n",
684
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
685
- "reading 🎼 fusion-t2i-lyrics-13.json....\n",
686
- "/content/text-to-image-prompts/lyrics/text\n",
687
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
688
- "reading 🎼 fusion-t2i-lyrics-32.json....\n",
689
- "/content/text-to-image-prompts/lyrics/text\n",
690
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
691
- "reading 🎼 fusion-t2i-lyrics-28.json....\n",
692
- "/content/text-to-image-prompts/lyrics/text\n",
693
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
694
- "reading 🎼 fusion-t2i-lyrics-8.json....\n",
695
- "/content/text-to-image-prompts/lyrics/text\n",
696
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
697
- "reading 🎼 fusion-t2i-lyrics-12.json....\n",
698
- "/content/text-to-image-prompts/lyrics/text\n",
699
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
700
- "reading 🎼 fusion-t2i-lyrics-3.json....\n",
701
- "/content/text-to-image-prompts/lyrics/text\n",
702
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
703
- "reading 🎼 fusion-t2i-lyrics-14.json....\n",
704
- "/content/text-to-image-prompts/lyrics/text\n",
705
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
706
- "reading 🎼 fusion-t2i-lyrics-36.json....\n",
707
- "/content/text-to-image-prompts/lyrics/text\n",
708
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
709
- "reading 🎼 fusion-t2i-lyrics-16.json....\n",
710
- "/content/text-to-image-prompts/lyrics/text\n",
711
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
712
- "reading 🎼 fusion-t2i-lyrics-7.json....\n",
713
- "/content/text-to-image-prompts/lyrics/text\n",
714
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
715
- "reading 🎼 fusion-t2i-lyrics-42.json....\n",
716
- "/content/text-to-image-prompts/lyrics/text\n",
717
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
718
- "reading 🎼 fusion-t2i-lyrics-39.json....\n",
719
- "/content/text-to-image-prompts/lyrics/text\n",
720
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
721
- "reading 🎼 fusion-t2i-lyrics-21.json....\n",
722
- "/content/text-to-image-prompts/lyrics/text\n",
723
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
724
- "reading 🎼 fusion-t2i-lyrics-1.json....\n",
725
- "/content/text-to-image-prompts/lyrics/text\n",
726
- "/content/text-to-image-prompts/lyrics/text_encodings\n",
727
- "reading 🎼 fusion-t2i-lyrics-4.json....\n",
728
- "/content/text-to-image-prompts/lyrics/text\n",
729
- "/content/text-to-image-prompts/lyrics/text_encodings\n"
730
- ]
731
- }
732
- ]
733
  },
734
  {
735
  "cell_type": "code",
@@ -764,7 +353,7 @@
764
  "metadata": {
765
  "id": "sX2JGqOH5B8g"
766
  },
767
- "execution_count": 19,
768
  "outputs": []
769
  },
770
  {
@@ -833,7 +422,7 @@
833
  "height": 1000
834
  }
835
  },
836
- "execution_count": 20,
837
  "outputs": [
838
  {
839
  "output_type": "display_data",
@@ -894,7 +483,7 @@
894
  "metadata": {
895
  "id": "xc-PbIYF428y"
896
  },
897
- "execution_count": 32,
898
  "outputs": []
899
  },
900
  {
@@ -970,7 +559,7 @@
970
  "base_uri": "https://localhost:8080/"
971
  }
972
  },
973
- "execution_count": 33,
974
  "outputs": [
975
  {
976
  "output_type": "stream",
@@ -1102,7 +691,7 @@
1102
  "height": 1000
1103
  }
1104
  },
1105
- "execution_count": 34,
1106
  "outputs": [
1107
  {
1108
  "output_type": "display_data",
@@ -1155,7 +744,7 @@
1155
  "metadata": {
1156
  "id": "rebogpoyOG8k"
1157
  },
1158
- "execution_count": 29,
1159
  "outputs": []
1160
  },
1161
  {
@@ -1453,11 +1042,11 @@
1453
  "#--------#\n",
1454
  "\n",
1455
  "# User input\n",
1456
- "target = home_directory + 'text-to-image-prompts/nouns/'\n",
1457
  "root_output_folder = home_directory + 'output/'\n",
1458
- "output_folder = root_output_folder + 'nouns/'\n",
1459
- "root_filename = 'tripple_nouns_'\n",
1460
- "NUM_FILES = 20\n",
1461
  "#--------#\n",
1462
  "\n",
1463
  "\n",
@@ -1610,12 +1199,349 @@
1610
  " #----#"
1611
  ],
1612
  "metadata": {
1613
- "id": "9ZiTsF9jV0TV",
1614
- "cellView": "form"
1615
  },
1616
  "execution_count": null,
1617
  "outputs": []
1618
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1619
  {
1620
  "cell_type": "code",
1621
  "source": [
@@ -1699,6 +1625,24 @@
1699
  "execution_count": null,
1700
  "outputs": []
1701
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1702
  {
1703
  "cell_type": "code",
1704
  "source": [
@@ -1771,22 +1715,10 @@
1771
  ],
1772
  "metadata": {
1773
  "cellView": "form",
1774
- "id": "CWlWk0KpuX55",
1775
- "outputId": "418a74c3-f83c-4cfd-8514-437974a84601",
1776
- "colab": {
1777
- "base_uri": "https://localhost:8080/"
1778
- }
1779
  },
1780
  "execution_count": null,
1781
- "outputs": [
1782
- {
1783
- "output_type": "stream",
1784
- "name": "stdout",
1785
- "text": [
1786
- "/content/outputs\n"
1787
- ]
1788
- }
1789
- ]
1790
  },
1791
  {
1792
  "cell_type": "markdown",
 
134
  "#--------#\n",
135
  "\n",
136
  "#default NEG values\n",
137
+ "try: name_NEG\n",
138
+ "except: name_NEG = ''\n",
139
+ "try: image_NEG\n",
140
+ "except: image_NEG = ''\n",
141
+ "try: strength_image_NEG\n",
142
+ "except: strength_image_NEG = 1\n",
143
+ "try: strength_NEG\n",
144
+ "except: strength_NEG = 1\n",
145
+ "try: NUM_VOCAB_ITEMS\n",
146
+ "except: NUM_VOCAB_ITEMS = 0\n",
147
+ "try: using_NEG\n",
148
+ "except: using_NEG = False\n",
149
+ "try: using_image_NEG\n",
150
+ "except: using_image_NEG = False\n",
151
  "#------#\n",
152
+ "\n",
153
+ "def getJSON(path , filename):\n",
154
+ " %cd {path}\n",
155
+ " with open(f'{filename}', 'r') as f:\n",
156
+ " data = json.load(f)\n",
157
+ " #------#\n",
158
+ " print(f'reading {filename}....')\n",
159
+ " _df = pd.DataFrame({'count': data})['count']\n",
160
+ " _prompts = {\n",
161
+ " key : value for key, value in _df.items()\n",
162
+ " }\n",
163
+ " return _prompts\n",
164
  "\n"
165
  ],
166
  "metadata": {
 
168
  "colab": {
169
  "base_uri": "https://localhost:8080/"
170
  },
171
+ "outputId": "bb56eb39-319a-4981-b87e-03745b7d869b"
172
  },
173
+ "execution_count": 1,
174
  "outputs": [
175
  {
176
  "output_type": "stream",
177
  "name": "stdout",
178
  "text": [
179
+ "/content\n",
180
+ "/content\n",
181
+ "Cloning into 'text-to-image-prompts'...\n",
182
+ "remote: Enumerating objects: 2335, done.\u001b[K\n",
183
+ "remote: Counting objects: 100% (2332/2332), done.\u001b[K\n",
184
+ "remote: Compressing objects: 100% (1911/1911), done.\u001b[K\n",
185
+ "remote: Total 2335 (delta 408), reused 2245 (delta 369), pack-reused 3 (from 1)\u001b[K\n",
186
+ "Receiving objects: 100% (2335/2335), 18.25 MiB | 9.15 MiB/s, done.\n",
187
+ "Resolving deltas: 100% (408/408), done.\n",
188
+ "Updating files: 100% (1289/1289), done.\n",
189
+ "Filtering content: 100% (572/572), 2.20 GiB | 42.90 MiB/s, done.\n"
190
  ]
191
  }
192
  ]
 
315
  "NUM_VOCAB_ITEMS = nA\n"
316
  ],
317
  "metadata": {
318
+ "id": "ZMG4CThUAmwW"
 
 
 
 
319
  },
320
+ "execution_count": null,
321
+ "outputs": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
  },
323
  {
324
  "cell_type": "code",
 
353
  "metadata": {
354
  "id": "sX2JGqOH5B8g"
355
  },
356
+ "execution_count": null,
357
  "outputs": []
358
  },
359
  {
 
422
  "height": 1000
423
  }
424
  },
425
+ "execution_count": null,
426
  "outputs": [
427
  {
428
  "output_type": "display_data",
 
483
  "metadata": {
484
  "id": "xc-PbIYF428y"
485
  },
486
+ "execution_count": null,
487
  "outputs": []
488
  },
489
  {
 
559
  "base_uri": "https://localhost:8080/"
560
  }
561
  },
562
+ "execution_count": null,
563
  "outputs": [
564
  {
565
  "output_type": "stream",
 
691
  "height": 1000
692
  }
693
  },
694
+ "execution_count": null,
695
  "outputs": [
696
  {
697
  "output_type": "display_data",
 
744
  "metadata": {
745
  "id": "rebogpoyOG8k"
746
  },
747
+ "execution_count": null,
748
  "outputs": []
749
  },
750
  {
 
1042
  "#--------#\n",
1043
  "\n",
1044
  "# User input\n",
1045
+ "target = home_directory + 'text-to-image-prompts/danbooru/'\n",
1046
  "root_output_folder = home_directory + 'output/'\n",
1047
+ "output_folder = root_output_folder + 'danbooru/'\n",
1048
+ "root_filename = '🎀 fusion-t2i-danbooru-tags'\n",
1049
+ "NUM_FILES = 1\n",
1050
  "#--------#\n",
1051
  "\n",
1052
  "\n",
 
1199
  " #----#"
1200
  ],
1201
  "metadata": {
1202
+ "id": "9ZiTsF9jV0TV"
 
1203
  },
1204
  "execution_count": null,
1205
  "outputs": []
1206
  },
1207
+ {
1208
+ "cell_type": "code",
1209
+ "source": [
1210
+ " from PIL import Image\n",
1211
+ " import requests\n",
1212
+ "\n",
1213
+ " image_url = \"https://generated-images.perchance.org/image/4a16af4ca096845767941e1a1cf7e787be444305f825baa2fe0a6e32268d4538.jpeg\"\n",
1214
+ " image_A = Image.open(requests.get(image_url, stream=True).raw)\n",
1215
+ " #------#\n",
1216
+ " image_A"
1217
+ ],
1218
+ "metadata": {
1219
+ "id": "A5LP8Lfa1gM-"
1220
+ },
1221
+ "execution_count": null,
1222
+ "outputs": []
1223
+ },
1224
+ {
1225
+ "cell_type": "code",
1226
+ "source": [
1227
+ " inputs = tokenizer(text = 'fmfjfjfj', padding=True, return_tensors=\"pt\").to(device)\n",
1228
+ "\n",
1229
+ " inputs"
1230
+ ],
1231
+ "metadata": {
1232
+ "colab": {
1233
+ "base_uri": "https://localhost:8080/"
1234
+ },
1235
+ "id": "8ScBPNT55dUr",
1236
+ "outputId": "a55e757a-64da-409a-ab4f-ae335a5261a1"
1237
+ },
1238
+ "execution_count": 14,
1239
+ "outputs": [
1240
+ {
1241
+ "output_type": "execute_result",
1242
+ "data": {
1243
+ "text/plain": [
1244
+ "{'input_ids': tensor([[49406, 13715, 69, 31130, 31130, 329, 49407]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1]])}"
1245
+ ]
1246
+ },
1247
+ "metadata": {},
1248
+ "execution_count": 14
1249
+ }
1250
+ ]
1251
+ },
1252
+ {
1253
+ "cell_type": "code",
1254
+ "source": [
1255
+ "# @title Process text+image pairings into encodings\n",
1256
+ "import json\n",
1257
+ "import pandas as pd\n",
1258
+ "import os\n",
1259
+ "import shelve\n",
1260
+ "import torch\n",
1261
+ "from safetensors.torch import save_file\n",
1262
+ "import json\n",
1263
+ "from PIL import Image\n",
1264
+ "import requests\n",
1265
+ "\n",
1266
+ "# Determine if this notebook is running on Colab or Kaggle\n",
1267
+ "#Use https://www.kaggle.com/ if Google Colab GPU is busy\n",
1268
+ "home_directory = '/content/'\n",
1269
+ "using_Kaggle = os.environ.get('KAGGLE_URL_BASE','')\n",
1270
+ "if using_Kaggle : home_directory = '/kaggle/working/'\n",
1271
+ "%cd {home_directory}\n",
1272
+ "#-------#\n",
1273
+ "\n",
1274
+ "# Load the data if not already loaded\n",
1275
+ "try:\n",
1276
+ " loaded\n",
1277
+ "except:\n",
1278
+ " %cd {home_directory}\n",
1279
+ " !git clone https://huggingface.co/datasets/codeShare/text-to-image-prompts\n",
1280
+ " loaded = True\n",
1281
+ "#--------#\n",
1282
+ "\n",
1283
+ "# User input\n",
1284
+ "target = home_directory + 'text-to-image-prompts/fusion/'\n",
1285
+ "root_output_folder = home_directory + 'output/'\n",
1286
+ "output_folder = root_output_folder + 'fusion/'\n",
1287
+ "root_filename = 'prompts'\n",
1288
+ "NUM_FILES = 1\n",
1289
+ "#--------#\n",
1290
+ "\n",
1291
+ "# Setup environment\n",
1292
+ "def my_mkdirs(folder):\n",
1293
+ " if os.path.exists(folder)==False:\n",
1294
+ " os.makedirs(folder)\n",
1295
+ "#--------#\n",
1296
+ "output_folder_text = output_folder + 'text/'\n",
1297
+ "output_folder_text = output_folder + 'text/'\n",
1298
+ "output_folder_text_encodings = output_folder + 'text_encodings/'\n",
1299
+ "output_folder_image_encodings = output_folder + 'image_encodings/'\n",
1300
+ "target_raw_text = target + 'raw/text/'\n",
1301
+ "target_raw_images = target + 'raw/images/'\n",
1302
+ "%cd {home_directory}\n",
1303
+ "my_mkdirs(output_folder)\n",
1304
+ "my_mkdirs(output_folder_text)\n",
1305
+ "my_mkdirs(output_folder_text_encodings)\n",
1306
+ "my_mkdirs(output_folder_image_encodings)\n",
1307
+ "#-------#\n",
1308
+ "\n",
1309
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
1310
+ "from transformers import AutoTokenizer\n",
1311
+ "tokenizer = AutoTokenizer.from_pretrained(\"openai/clip-vit-large-patch14\", clean_up_tokenization_spaces = False)\n",
1312
+ "\n",
1313
+ "max_length = tokenizer.model_max_length\n",
1314
+ "\n",
1315
+ "from transformers import CLIPProcessor, CLIPModel\n",
1316
+ "processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\" , clean_up_tokenization_spaces = True)\n",
1317
+ "model = CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\").to(device)\n",
1318
+ "#---------#\n",
1319
+ "for file_index in range(NUM_FILES + 1):\n",
1320
+ " if (file_index < 1): continue\n",
1321
+ "\n",
1322
+ " # Assign name of JSON file to read\n",
1323
+ " filename = f'{root_filename}{file_index}'\n",
1324
+ " if NUM_FILES == 1 : filename = f'{root_filename}'\n",
1325
+ " #--------#\n",
1326
+ "\n",
1327
+ " # Read {filename}.json for text prompts\n",
1328
+ " %cd {target_raw_text}\n",
1329
+ " with open(filename + '.json', 'r') as f:\n",
1330
+ " data = json.load(f)\n",
1331
+ " _df = pd.DataFrame({'count': data})['count']\n",
1332
+ " prompts = {\n",
1333
+ " key : value.replace(\"</w>\",\" \") for key, value in _df.items()\n",
1334
+ " }\n",
1335
+ " index = 0\n",
1336
+ " for key in prompts:\n",
1337
+ " index = index + 1\n",
1338
+ " #----------#\n",
1339
+ " NUM_ITEMS = index\n",
1340
+ " #------#\n",
1341
+ "\n",
1342
+ " # Read {filename}.json for image urls\n",
1343
+ " %cd {target_raw_images}\n",
1344
+ " with open('links.json', 'r') as f:\n",
1345
+ " data = json.load(f)\n",
1346
+ " _df = pd.DataFrame({'count': data})['count']\n",
1347
+ " urls = {\n",
1348
+ " key : value.replace(\"</w>\",\" \") for key, value in _df.items()\n",
1349
+ " }\n",
1350
+ " #-------#\n",
1351
+ "\n",
1352
+ " # Calculate text_encoding for .json file contents and results as .db file\n",
1353
+ " names_dict = {}\n",
1354
+ " text_encoding_dict = {}\n",
1355
+ " image_encoding_dict = {}\n",
1356
+ " segments = {}\n",
1357
+ " index = 0;\n",
1358
+ " subby = 1;\n",
1359
+ " NUM_HEADERS = 2\n",
1360
+ " CHUNKS_SIZE = 2000\n",
1361
+ " _filename = ''\n",
1362
+ " #from google.colab.patches import cv2_imshow\n",
1363
+ "\n",
1364
+ " for _index in range(NUM_ITEMS):\n",
1365
+ " if not (f'{_index}' in prompts) : continue\n",
1366
+ " if (prompts[f'{_index}']==\"SKIP\") : continue\n",
1367
+ " if (index % 100 == 0) : print(index)\n",
1368
+ " if (index == 0 and _index>0) : index = index + 2 #make space for headers\n",
1369
+ " if (_index % (CHUNKS_SIZE-NUM_HEADERS) == 0 and _index > 0) :\n",
1370
+ "\n",
1371
+ " # Write headers in the .json\n",
1372
+ " names_dict[f'{0}'] = f'{_index}'\n",
1373
+ " names_dict[f'{1}'] = f'{filename}-{subby}'\n",
1374
+ "\n",
1375
+ " # Encode the headers into text_encoding and image_encoding\n",
1376
+ " inputs = tokenizer(text = '' + names_dict[f'{0}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
1377
+ " text_features = model.get_text_features(**inputs).to(device)\n",
1378
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
1379
+ " text_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
1380
+ " image_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
1381
+ " inputs = tokenizer(text = '' + names_dict[f'{1}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
1382
+ " text_features = model.get_text_features(**inputs).to(device)\n",
1383
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
1384
+ " text_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
1385
+ " image_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
1386
+ " #-------#\n",
1387
+ "\n",
1388
+ " # Write .json\n",
1389
+ " _filename = f'{filename}-{subby}.json'\n",
1390
+ " %cd {output_folder_text}\n",
1391
+ " print(f'Saving segment {_filename} to {output_folder_text}...')\n",
1392
+ " with open(_filename, 'w') as f:\n",
1393
+ " json.dump(names_dict, f)\n",
1394
+ " #-------#\n",
1395
+ "\n",
1396
+ " # Write .safetensors for text\n",
1397
+ " _filename = f'{filename}-{subby}.safetensors'\n",
1398
+ " %cd {output_folder_text_encodings}\n",
1399
+ " print(f'Saving segment {_filename} to {output_folder_text_encodings}...')\n",
1400
+ " save_file(text_encoding_dict, _filename)\n",
1401
+ " #--------#\n",
1402
+ "\n",
1403
+ " # Write .safetensors for images\n",
1404
+ " _filename = f'{filename}-{subby}.safetensors'\n",
1405
+ " %cd {output_folder_image_encodings}\n",
1406
+ " print(f'Saving segment {_filename} to {output_folder_image_encodings}...')\n",
1407
+ " save_file(image_encoding_dict, _filename)\n",
1408
+ " #--------#\n",
1409
+ "\n",
1410
+ " #Iterate\n",
1411
+ " subby = subby + 1\n",
1412
+ " segments[f'{subby}'] = _filename\n",
1413
+ " text_encoding_dict = {}\n",
1414
+ " image_encoding_dict = {}\n",
1415
+ " names_dict = {}\n",
1416
+ " index = 0\n",
1417
+ " #------#\n",
1418
+ " else:\n",
1419
+ " index = index + 1\n",
1420
+ " #--------#\n",
1421
+ "\n",
1422
+ " #----text-encodings----#\n",
1423
+ " inputs = tokenizer(text = '' + prompts[f'{_index}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
1424
+ " text_features = model.get_text_features(**inputs).to(device)\n",
1425
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
1426
+ " text_encoding_dict[f'{index}'] = text_features.to(torch.device('cpu'))\n",
1427
+ " names_dict[f'{index}'] = prompts[f'{_index}']\n",
1428
+ " #-----#\n",
1429
+ "\n",
1430
+ " #fetch image from url\n",
1431
+ " image_url = urls[f'{_index}']\n",
1432
+ " image_A = Image.open(requests.get(image_url, stream=True).raw)\n",
1433
+ " #------#\n",
1434
+ " #image_A #Display it\n",
1435
+ " #-----#\n",
1436
+ "\n",
1437
+ " #---image-encodings---#\n",
1438
+ " inputs = processor(images=image_A, return_tensors=\"pt\")\n",
1439
+ " image_features = model.get_image_features(**inputs)\n",
1440
+ " image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)\n",
1441
+ " image_encoding_dict[f'{index}'] = image_features.to(torch.device('cpu'))\n",
1442
+ " #-----#\n",
1443
+ " continue\n",
1444
+ " #-----#\n",
1445
+ " #-----#\n",
1446
+ " # Write headers in the .json\n",
1447
+ " names_dict[f'{0}'] = f'{_index}'\n",
1448
+ " names_dict[f'{1}'] = f'{filename}-{subby}'\n",
1449
+ "\n",
1450
+ " # Encode the headers into text_encoding and image_encoding\n",
1451
+ " inputs = tokenizer(text = '' + names_dict[f'{0}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
1452
+ " text_features = model.get_text_features(**inputs).to(device)\n",
1453
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
1454
+ " text_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
1455
+ " image_encoding_dict[f'{0}'] = text_features.to(torch.device('cpu'))\n",
1456
+ " inputs = tokenizer(text = '' + names_dict[f'{1}'], padding=True,truncation=True, return_tensors=\"pt\").to(device)\n",
1457
+ " text_features = model.get_text_features(**inputs).to(device)\n",
1458
+ " text_features = text_features/text_features.norm(p=2, dim=-1, keepdim=True)\n",
1459
+ " text_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
1460
+ " image_encoding_dict[f'{1}'] = text_features.to(torch.device('cpu'))\n",
1461
+ " #-------#\n",
1462
+ "\n",
1463
+ " # Write .json\n",
1464
+ " _filename = f'{filename}-{subby}.json'\n",
1465
+ " %cd {output_folder_text}\n",
1466
+ " print(f'Saving segment {_filename} to {output_folder_text}...')\n",
1467
+ " with open(_filename, 'w') as f:\n",
1468
+ " json.dump(names_dict, f)\n",
1469
+ " #-------#\n",
1470
+ "\n",
1471
+ " # Write .safetensors for text\n",
1472
+ " _filename = f'{filename}-{subby}.safetensors'\n",
1473
+ " %cd {output_folder_text_encodings}\n",
1474
+ " print(f'Saving segment {_filename} to {output_folder_text_encodings}...')\n",
1475
+ " save_file(text_encoding_dict, _filename)\n",
1476
+ " #--------#\n",
1477
+ "\n",
1478
+ " # Write .safetensors for images\n",
1479
+ " _filename = f'{filename}-{subby}.safetensors'\n",
1480
+ " %cd {output_folder_image_encodings}\n",
1481
+ " print(f'Saving segment {_filename} to {output_folder_image_encodings}...')\n",
1482
+ " save_file(image_encoding_dict, _filename)\n",
1483
+ " #--------#\n",
1484
+ "\n",
1485
+ " #Iterate\n",
1486
+ " subby = subby + 1\n",
1487
+ " segments[f'{subby}'] = _filename\n",
1488
+ " text_encoding_dict = {}\n",
1489
+ " image_encoding_dict = {}\n",
1490
+ " names_dict = {}\n",
1491
+ " index = 0\n",
1492
+ " #------#\n",
1493
+ " #----#"
1494
+ ],
1495
+ "metadata": {
1496
+ "colab": {
1497
+ "base_uri": "https://localhost:8080/",
1498
+ "height": 443
1499
+ },
1500
+ "id": "SDKl21yzsyuo",
1501
+ "outputId": "1b056910-5151-4425-b4f2-24e4df842301"
1502
+ },
1503
+ "execution_count": 15,
1504
+ "outputs": [
1505
+ {
1506
+ "output_type": "stream",
1507
+ "name": "stdout",
1508
+ "text": [
1509
+ "/content\n",
1510
+ "/content\n",
1511
+ "/content/text-to-image-prompts/fusion/raw/text\n",
1512
+ "/content/text-to-image-prompts/fusion/raw/images\n",
1513
+ "0\n"
1514
+ ]
1515
+ },
1516
+ {
1517
+ "output_type": "error",
1518
+ "ename": "KeyboardInterrupt",
1519
+ "evalue": "",
1520
+ "traceback": [
1521
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
1522
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
1523
+ "\u001b[0;32m<ipython-input-15-26b9624c4626>\u001b[0m in \u001b[0;36m<cell line: 65>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 183\u001b[0m \u001b[0;31m# Get image features\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 184\u001b[0m \u001b[0minputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mprocessor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mimage_A\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreturn_tensors\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"pt\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 185\u001b[0;31m \u001b[0mimage_features\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_image_features\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 186\u001b[0m \u001b[0mimage_features\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimage_features\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mimage_features\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mp\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkeepdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 187\u001b[0m \u001b[0mimage_encoding_dict\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34mf'{index}'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimage_features\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'cpu'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1524
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/models/clip/modeling_clip.py\u001b[0m in \u001b[0;36mget_image_features\u001b[0;34m(self, pixel_values, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1235\u001b[0m \u001b[0mreturn_dict\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mreturn_dict\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mreturn_dict\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muse_return_dict\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1236\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1237\u001b[0;31m vision_outputs = self.vision_model(\n\u001b[0m\u001b[1;32m 1238\u001b[0m \u001b[0mpixel_values\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpixel_values\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1239\u001b[0m \u001b[0moutput_attentions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moutput_attentions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1525
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1552\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1526
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1560\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1561\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1563\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1564\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1527
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/models/clip/modeling_clip.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, pixel_values, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 1030\u001b[0m \u001b[0mhidden_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpre_layrnorm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1031\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1032\u001b[0;31m encoder_outputs = self.encoder(\n\u001b[0m\u001b[1;32m 1033\u001b[0m \u001b[0minputs_embeds\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1034\u001b[0m \u001b[0moutput_attentions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moutput_attentions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1528
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1552\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1529
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1560\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1561\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1563\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1564\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1530
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/models/clip/modeling_clip.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, inputs_embeds, attention_mask, causal_attention_mask, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[1;32m 811\u001b[0m )\n\u001b[1;32m 812\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 813\u001b[0;31m layer_outputs = encoder_layer(\n\u001b[0m\u001b[1;32m 814\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 815\u001b[0m \u001b[0mattention_mask\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1531
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1552\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1532
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1560\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1561\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1563\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1564\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1533
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/models/clip/modeling_clip.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, attention_mask, causal_attention_mask, output_attentions)\u001b[0m\n\u001b[1;32m 546\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 547\u001b[0m \u001b[0mhidden_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayer_norm1\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 548\u001b[0;31m hidden_states, attn_weights = self.self_attn(\n\u001b[0m\u001b[1;32m 549\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 550\u001b[0m \u001b[0mattention_mask\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mattention_mask\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1534
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1552\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1535
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1560\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1561\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1563\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1564\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1536
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/transformers/models/clip/modeling_clip.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, attention_mask, causal_attention_mask, output_attentions)\u001b[0m\n\u001b[1;32m 463\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 464\u001b[0m \u001b[0mquery_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mq_proj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 465\u001b[0;31m \u001b[0mkey_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mk_proj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 466\u001b[0m \u001b[0mvalue_states\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mv_proj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 467\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
1537
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1552\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1553\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1554\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1555\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1538
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1560\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1561\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1563\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1564\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1539
+ "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/torch/nn/modules/linear.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 117\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlinear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 118\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mextra_repr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
1540
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
1541
+ ]
1542
+ }
1543
+ ]
1544
+ },
1545
  {
1546
  "cell_type": "code",
1547
  "source": [
 
1625
  "execution_count": null,
1626
  "outputs": []
1627
  },
1628
+ {
1629
+ "cell_type": "code",
1630
+ "source": [
1631
+ "#Remove URL Encoding from the fetched Danbooru tags\n",
1632
+ "danboorus = getJSON('/content/text-to-image-prompts/danbooru/raw/','🎀 fusion-t2i-danbooru-tags.json')\n",
1633
+ "from urllib.parse import unquote\n",
1634
+ "for key in danboorus:\n",
1635
+ " danboorus[key] = unquote(danboorus[key])\n",
1636
+ "%cd /content/\n",
1637
+ "with open(f'🎀 fusion-t2i-danbooru-tags', 'w') as f:\n",
1638
+ " json.dump(danboorus, f)"
1639
+ ],
1640
+ "metadata": {
1641
+ "id": "AjSf585hWWMB"
1642
+ },
1643
+ "execution_count": null,
1644
+ "outputs": []
1645
+ },
1646
  {
1647
  "cell_type": "code",
1648
  "source": [
 
1715
  ],
1716
  "metadata": {
1717
  "cellView": "form",
1718
+ "id": "CWlWk0KpuX55"
 
 
 
 
1719
  },
1720
  "execution_count": null,
1721
+ "outputs": []
 
 
 
 
 
 
 
 
1722
  },
1723
  {
1724
  "cell_type": "markdown",