New_file.txt
Browse files- New_file.txt +50 -0
New_file.txt
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import SwinTransformer, SwinTransformerTokenizer
|
3 |
+
import torchvision.transforms as transforms
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
7 |
+
|
8 |
+
# Load the pre-trained Swin Transformer model and tokenizer
|
9 |
+
model_name = "microsoft/Swin-Transformer-base-patch4-in22k"
|
10 |
+
model = SwinTransformer.from_pretrained(model_name)
|
11 |
+
tokenizer = SwinTransformerTokenizer.from_pretrained(model_name)
|
12 |
+
|
13 |
+
# Define a function to preprocess images
|
14 |
+
def preprocess_image(image_path):
|
15 |
+
transform = transforms.Compose([
|
16 |
+
transforms.Resize((224, 224)),
|
17 |
+
transforms.ToTensor(),
|
18 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
19 |
+
])
|
20 |
+
image = Image.open(image_path)
|
21 |
+
image = transform(image).unsqueeze(0) # Add a batch dimension
|
22 |
+
return image
|
23 |
+
|
24 |
+
# Load your ideal subset of images
|
25 |
+
ideal_image_paths = ["ideal_image1.jpg", "ideal_image2.jpg", "ideal_image3.jpg"] # Replace with your ideal image file paths
|
26 |
+
ideal_embeddings = []
|
27 |
+
|
28 |
+
for image_path in ideal_image_paths:
|
29 |
+
image = preprocess_image(image_path)
|
30 |
+
with torch.no_grad():
|
31 |
+
input_ids = tokenizer(image_path, return_tensors="pt").input_ids
|
32 |
+
embedding = model.pixel_values(input_ids).numpy()
|
33 |
+
ideal_embeddings.append(embedding)
|
34 |
+
|
35 |
+
# Load a set of candidate images
|
36 |
+
candidate_image_paths = ["candidate_image1.jpg", "candidate_image2.jpg", "candidate_image3.jpg"] # Replace with your candidate image file paths
|
37 |
+
candidate_embeddings = []
|
38 |
+
|
39 |
+
for image_path in candidate_image_paths:
|
40 |
+
image = preprocess_image(image_path)
|
41 |
+
with torch.no_grad():
|
42 |
+
input_ids = tokenizer(image_path, return_tensors="pt").input_ids
|
43 |
+
embedding = model.pixel_values(input_ids).numpy()
|
44 |
+
candidate_embeddings.append(embedding)
|
45 |
+
|
46 |
+
# Calculate similarities between ideal and candidate images using cosine similarity
|
47 |
+
similarities = cosine_similarity(ideal_embeddings, candidate_embeddings)
|
48 |
+
|
49 |
+
# Print the similarity matrix
|
50 |
+
print(similarities)
|