import cv2 import numpy as np import os import torch from PIL import Image class MergeImagesToTemplate: @classmethod def INPUT_TYPES(s): return { "required": { "image1": ("IMAGE",), "image2": ("IMAGE",), "image3": ("IMAGE",), "image4": ("IMAGE",) } } RETURN_TYPES = ("IMAGE",) FUNCTION = "merge_images" CATEGORY = "image" OUTPUT_NODE = True def merge_images(self, image1, image2, image3, image4): current_directory = os.path.dirname(os.path.abspath(__file__)) template = cv2.imread(current_directory+"/template_cross2.png", cv2.IMREAD_UNCHANGED) scale_top = 0.85 scale_bottom = 1.434 top_1 = (271,9) top_2 = (831,9) bottom_1 = (15,474) bottom_2 = (786,474) def prepare_image(img): img = np.clip(255.0 * img.cpu().numpy().squeeze(), 0, 255).astype(np.uint8) return cv2.cvtColor(img, cv2.COLOR_RGB2BGR) image1 = prepare_image(image1) image2 = prepare_image(image2) image3 = prepare_image(image3) image4 = prepare_image(image4) img1_resized = cv2.resize(image1, (int(512 * scale_top), int(512 * scale_top))) img2_resized = cv2.resize(image2, (int(512 * scale_top), int(512 * scale_top))) img3_resized = cv2.resize(image3, (int(512 * scale_bottom), int(512 * scale_bottom))) img4_resized = cv2.resize(image4, (int(512 * scale_bottom), int(512 * scale_bottom))) background = np.zeros((template.shape[0], template.shape[1], 3), dtype=np.uint8) background[:] = (255, 255, 255) background[top_1[1]:top_1[1]+img1_resized.shape[0], top_1[0]:top_1[0]+img1_resized.shape[1]] = img1_resized background[top_2[1]:top_2[1]+img2_resized.shape[0], top_2[0]:top_2[0]+img2_resized.shape[1]] = img2_resized background[bottom_1[1]:bottom_1[1]+img3_resized.shape[0], bottom_1[0]:bottom_1[0]+img3_resized.shape[1]] = img3_resized background[bottom_2[1]:bottom_2[1]+img4_resized.shape[0], bottom_2[0]:bottom_2[0]+img4_resized.shape[1]] = img4_resized alpha_channel = template[:, :, 3] / 255.0 for c in range(0, 3): background[:, :, c] = background[:, :, c] * (1 - alpha_channel) + template[:, :, c] * alpha_channel cv2.imwrite(current_directory+'/result3.png', background) background_rgba = cv2.cvtColor(background, cv2.COLOR_BGR2RGBA) result = torch.from_numpy(background_rgba.astype(np.float32) / 255.0).unsqueeze(0) return (result,) NODE_CLASS_MAPPINGS = {"MergeImagesToTemplate": MergeImagesToTemplate}