Datasets:
Tasks:
Visual Question Answering
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
medical
License:
File size: 1,363 Bytes
d90b4c1 33455bf d90b4c1 33455bf d90b4c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
"""This script de-duplicates the data provided by the PathVQA authors,
creates an "imagefolder" dataset and pushes it to the Hugging Face Hub.
"""
import re
import os
import shutil
import pickle
import datasets
import pandas as pd
for split in ["train", "val", "test"]:
os.makedirs(f"data/{split}/", exist_ok=True)
# load the image-question-answer triplets
data = pd.DataFrame(pickle.load(open(f"pvqa/qas/{split}/{split}_qa.pkl", "rb")))
# drop the duplicate image-question-answer triplets
data = data.drop_duplicates(ignore_index=True)
# perform some basic data cleaning/normalization
f = lambda x: re.sub(' +', ' ', str(x).lower()).replace(" ?", "?").strip()
data["question"] = data["question"].apply(f)
data["answer"] = data["answer"].apply(f)
# copy the images using unique file names
data.insert(0, "file_name", "")
for i, row in data.iterrows():
file_name = f"img_{i}.jpg"
data["file_name"].iloc[i] = file_name
shutil.copyfile(src=f"pvqa/images/{split}/{row['image']}.jpg", dst=f"data/{split}/{file_name}")
_ = data.pop("image")
# save the metadata
data.to_csv(f"data/{split}/metadata.csv", index=False)
# push the dataset to the hub
dataset = datasets.load_dataset("imagefolder", data_dir="data/")
dataset.push_to_hub("flaviagiammarino/path-vqa")
|