|
import torch |
|
from torch.utils.data import Dataset, DataLoader |
|
|
|
|
|
class CarShadowDataset(Dataset): |
|
def __init__(self, root_dir, transform=None): |
|
self.root_dir = root_dir |
|
self.transform = transform |
|
self.image_paths = [] |
|
|
|
|
|
for phase in ['train', 'val', 'test']: |
|
car_folder = os.path.join(root_dir, phase, 'car') |
|
shadow_folder = os.path.join(root_dir, phase, 'shadow') |
|
|
|
for filename in os.listdir(car_folder): |
|
car_path = os.path.join(car_folder, filename) |
|
shadow_path = os.path.join(shadow_folder, filename.split('.')[0] + '_shadow.jpg') |
|
self.image_paths.append((car_path, shadow_path)) |
|
|
|
def __len__(self): |
|
return len(self.image_paths) |
|
|
|
def __getitem__(self, idx): |
|
car_path, shadow_path = self.image_paths[idx] |
|
car_image = load_image(car_path) |
|
shadow_image = load_image(shadow_path) |
|
|
|
if self.transform: |
|
car_image = self.transform(car_image) |
|
shadow_image = self.transform(shadow_image) |
|
|
|
return car_image, shadow_image |
|
|
|
|
|
def load_image(path): |
|
|
|
|
|
|
|
|
|
|
|
train_data = DataLoader(CarShadowDataset(root_dir='dataset/train', transform=your_transform), batch_size=32, shuffle=True) |
|
val_data = DataLoader(CarShadowDataset(root_dir='dataset/val', transform=your_transform), batch_size=32) |
|
|
|
|
|
for car_image, shadow_image in train_data: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def train_discriminator(d_optimizer, real_images, fake_images, real_labels, fake_labels): |
|
|
|
d_optimizer.zero_grad() |
|
|
|
|
|
d_real_output = discriminator(real_images, real_images) |
|
d_fake_output = discriminator(real_images, fake_images) |
|
|
|
|
|
d_real_loss = criterion(d_real_output, torch.ones_like(d_real_output)) |
|
d_fake_loss = criterion(d_fake_output, torch.zeros_like(d_fake_output)) |
|
d_loss = (d_real_loss + d_fake_loss) / 2 |
|
|
|
|
|
d_loss.backward() |
|
d_optimizer.step() |
|
|
|
|
|
return d_loss.item() |
|
|
|
|
|
def train_generator(g_optimizer, real_images, fake_images): |
|
|
|
g_optimizer.zero_grad() |
|
|
|
|
|
g_fake_output = discriminator(real_images, fake_images) |
|
|
|
|
|
g_loss = criterion(g_fake_output, torch.ones_like(g_fake_output)) |
|
|
|
|
|
g_loss.backward() |
|
g_optimizer.step() |
|
|
|
|
|
return g_loss.item() |
|
|
|
|
|
for epoch in range(epochs): |
|
for i, (real_images, real_shadows) in enumerate(train_data): |
|
|
|
fake_shadows = generator(real_images) |
|
|
|
|
|
d_loss = train_discriminator(d_optimizer, real_images, fake_shadows, torch.ones(real_images.size(0)), torch.zeros(real_images.size(0))) |
|
|
|
|
|
g_loss = train_generator(g_optimizer, real_images, fake_shadows) |
|
|
|
|
|
if i % 100 == 0: |
|
print(f'Epoch [{epoch+1}/{epochs}], Step [{i+1}/{len(train_data)}], D_loss: {d_loss:.4f}, G_loss: {g_loss:.4f}') |
|
|
|
I'd like to commend my colleague, Debashish, for his outstanding performance and invaluable contributions to our team. Debashish's deep understanding and expertise in machine learning concepts have been instrumental in driving our projects forward. He is always enthusiastic about experimenting with new ideas and technologies, which has led to innovative solutions and advancements in our work. Notably, Debashish developed a highly efficient tiny image classifier tailored for mobile applications and successfully integrated this ML model into our mobile app. Additionally, his efforts were crucial in creating a proof of concept (POC) for an ML-integrated mobile app using React-Native. Debashish's dedication and technical prowess have significantly enhanced our capabilities and project outcomes. |
|
|