Use 1 caption per image
Browse files- run_hybrid_clip.py +2 -2
run_hybrid_clip.py
CHANGED
@@ -211,7 +211,7 @@ class ImageTextDataset(VisionDataset):
|
|
211 |
self,
|
212 |
root: str,
|
213 |
file_path: str,
|
214 |
-
captions_per_image=
|
215 |
transform: Optional[Callable] = None,
|
216 |
target_transform: Optional[Callable] = None,
|
217 |
transforms: Optional[Callable] = None,
|
@@ -352,7 +352,7 @@ def main():
|
|
352 |
train_dataset = ImageTextDataset(
|
353 |
data_args.data_dir,
|
354 |
data_args.train_file,
|
355 |
-
captions_per_image=
|
356 |
transform=preprocess,
|
357 |
)
|
358 |
|
|
|
211 |
self,
|
212 |
root: str,
|
213 |
file_path: str,
|
214 |
+
captions_per_image=1,
|
215 |
transform: Optional[Callable] = None,
|
216 |
target_transform: Optional[Callable] = None,
|
217 |
transforms: Optional[Callable] = None,
|
|
|
352 |
train_dataset = ImageTextDataset(
|
353 |
data_args.data_dir,
|
354 |
data_args.train_file,
|
355 |
+
captions_per_image=1,
|
356 |
transform=preprocess,
|
357 |
)
|
358 |
|