{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import os\n", "os.environ['HF_TOKEN'] = 'hf_………………………'\n", "train_file = 'source_dataset/train_captions.csv'\n", "validation_file = 'source_dataset/valid_captions.csv'\n", "train_concepts_file = 'source_dataset/train_concepts_manual.csv'\n", "validation_concepts_file = 'source_dataset/valid_concepts_manual.csv'\n", "test_file = 'source_dataset/test_captions.csv'\n", "test_concepts_file = 'source_dataset/test_concepts_manual.csv'\n", "dataset_name = 'eltorio/ROCOv2'" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Login to Hugging Face" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Hugging Face token found in environment variable\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured.\n" ] } ], "source": [ "from huggingface_hub import login\n", "import os\n", "\n", "HF_TOKEN = \"\"\n", "\n", "if os.environ.get('HF_TOKEN') is not None:\n", " HF_TOKEN = os.environ.get('HF_TOKEN')\n", " print(f\"Hugging Face token found in environment variable\")\n", "try:\n", " import google.colab\n", " from google.colab import userdata\n", " if (userdata.get('HF_TOKEN') is not None) and (HF_TOKEN == \"\"):\n", " HF_TOKEN = userdata.get('HF_TOKEN')\n", " else:\n", " raise ValueError(\"Please set your Hugging Face token in the user data panel, or pass it as an environment variable\")\n", "except ModuleNotFoundError:\n", " if HF_TOKEN is None:\n", " raise ValueError(\"Please set your Hugging Face token in the user data panel, or pass it as an environment variable\")\n", "\n", "login(\n", " token=HF_TOKEN,\n", " add_to_git_credential=True\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from datasets import load_dataset, Dataset, Image as HFImage, concatenate_datasets\n", "import datasets\n", "from PIL import Image\n", "import pandas as pd\n", "import io\n", "\n", "# load image in the DataFrame\n", "def load_image(image_id, image_path='train'):\n", " image_path = os.path.join(f\"source_dataset/{image_path}\", f\"{image_id}.jpg\")\n", " image_jpg= Image.open(image_path)\n", " image_bytes = io.BytesIO()\n", " image_jpg.save(image_bytes, format='PNG') # Save as PNG\n", " # Replace PIL image with a new PNG image created from the bytes\n", " return image_bytes.getvalue()\n", "\n", "# Function to apply load_cui with progress tracking\n", "def apply_with_progress(df, func, column, nb, image_path='train'):\n", " result = []\n", " for i, value in enumerate(df[column]):\n", " result.append(func(value, image_path))\n", " if (i + 1) % nb == 0:\n", " print(f\"Processed {i + 1} rows\")\n", " return result\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Create the train split" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'C1306645'" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Load the CUI CSV file into a pandas DataFrame\n", "train_concept_unique_identifier_df = pd.read_csv(train_concepts_file)\n", "\n", "# load CUI to the train_df DataFrame by looking up the CUI in the concept_unique_dentifier_df\n", "# concept_unique_dentifier_df is a DataFrame that contains the mapping between the image ID and the CUIs\n", "def load_train_cui(image_id, image_path='train'):\n", " cuis = train_concept_unique_identifier_df[train_concept_unique_identifier_df['ID'] == image_id]['CUIs']\n", " split = str(cuis.values[0]).split(';')\n", " return split\n", "\n", "# Load a CSV file into a pandas DataFrame\n", "train_df = pd.read_csv(train_file)\n", "train_df.rename(columns={'ID': 'image_id', 'Caption': 'caption'}, inplace=True)\n", "train_df['image'] = apply_with_progress(train_df, load_image, 'image_id',100)\n", "train_df = train_df[['image', 'image_id', 'caption']]\n", "train_df['cui'] = apply_with_progress(train_df, load_train_cui, 'image_id',1000)\n", "train_dataset = Dataset.from_pandas(train_df).cast_column(\"image\", HFImage())\n", "train_dataset.save_to_disk('train_dataset')\n", "# train_dataset.push_to_hub(dataset_name)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Create the validation split" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Processed 100 rows\n", "Processed 200 rows\n", "Processed 300 rows\n", "Processed 400 rows\n", "Processed 500 rows\n", "Processed 600 rows\n", "Processed 700 rows\n", "Processed 800 rows\n", "Processed 900 rows\n", "Processed 1000 rows\n", "Processed 1100 rows\n", "Processed 1200 rows\n", "Processed 1300 rows\n", "Processed 1400 rows\n", "Processed 1500 rows\n", "Processed 1600 rows\n", "Processed 1700 rows\n", "Processed 1800 rows\n", "Processed 1900 rows\n", "Processed 2000 rows\n", "Processed 2100 rows\n", "Processed 2200 rows\n", "Processed 2300 rows\n", "Processed 2400 rows\n", "Processed 2500 rows\n", "Processed 2600 rows\n", "Processed 2700 rows\n", "Processed 2800 rows\n", "Processed 2900 rows\n", "Processed 3000 rows\n", "Processed 3100 rows\n", "Processed 3200 rows\n", "Processed 3300 rows\n", "Processed 3400 rows\n", "Processed 3500 rows\n", "Processed 3600 rows\n", "Processed 3700 rows\n", "Processed 3800 rows\n", "Processed 3900 rows\n", "Processed 4000 rows\n", "Processed 4100 rows\n", "Processed 4200 rows\n", "Processed 4300 rows\n", "Processed 4400 rows\n", "Processed 4500 rows\n", "Processed 4600 rows\n", "Processed 4700 rows\n", "Processed 4800 rows\n", "Processed 4900 rows\n", "Processed 5000 rows\n", "Processed 5100 rows\n", "Processed 5200 rows\n", "Processed 5300 rows\n", "Processed 5400 rows\n", "Processed 5500 rows\n", "Processed 5600 rows\n", "Processed 5700 rows\n", "Processed 5800 rows\n", "Processed 5900 rows\n", "Processed 6000 rows\n", "Processed 6100 rows\n", "Processed 6200 rows\n", "Processed 6300 rows\n", "Processed 6400 rows\n", "Processed 6500 rows\n", "Processed 6600 rows\n", "Processed 6700 rows\n", "Processed 6800 rows\n", "Processed 6900 rows\n", "Processed 7000 rows\n", "Processed 7100 rows\n", "Processed 7200 rows\n", "Processed 7300 rows\n", "Processed 7400 rows\n", "Processed 7500 rows\n", "Processed 7600 rows\n", "Processed 7700 rows\n", "Processed 7800 rows\n", "Processed 7900 rows\n", "Processed 8000 rows\n", "Processed 8100 rows\n", "Processed 8200 rows\n", "Processed 8300 rows\n", "Processed 8400 rows\n", "Processed 8500 rows\n", "Processed 8600 rows\n", "Processed 8700 rows\n", "Processed 8800 rows\n", "Processed 8900 rows\n", "Processed 9000 rows\n", "Processed 9100 rows\n", "Processed 9200 rows\n", "Processed 9300 rows\n", "Processed 9400 rows\n", "Processed 9500 rows\n", "Processed 9600 rows\n", "Processed 9700 rows\n", "Processed 9800 rows\n", "Processed 9900 rows\n", "Processed 1000 rows\n", "Processed 2000 rows\n", "Processed 3000 rows\n", "Processed 4000 rows\n", "Processed 5000 rows\n", "Processed 6000 rows\n", "Processed 7000 rows\n", "Processed 8000 rows\n", "Processed 9000 rows\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "51d6b43247154559ab65ddb1cfe9fd95", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Saving the dataset (0/6 shards): 0%| | 0/9904 [00:00