dialog2flow-dataset / dialog2flow-dataset.py
sergioburdisso's picture
Update dialog2flow-dataset.py
5008aa3 verified
raw
history blame
7.04 kB
"""
Copyright (c) 2024, Idiap Research Institute.
All rights reserved.
SPDX-License-Identifier: MIT License
For full license text, see the LICENSE file in the repo root
"""
#!/usr/bin/env python3
import os
import json
import datasets
from datasets import (GeneratorBasedBuilder,
BuilderConfig,
SplitGenerator,
DatasetInfo,
Features,
Value,
Version)
logger = datasets.logging.get_logger(__name__)
datasets.logging.disable_progress_bar()
_VERSION = Version("1.0.0")
_CITATION = """
@inproceedings{burdisso-etal-2024-dialog2flow,
title = "Dialog2Flow: Pre-training Soft-Contrastive Action-Driven Sentence Embeddings for Automatic Dialog Flow Extraction",
author = "Burdisso, Sergio and
Madikeri, Srikanth and
Motlicek, Petr",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami",
publisher = "Association for Computational Linguistics",
}
"""
DATASETS_PRETRAIN = ["dialog-acts", "slots", "dialog-actions"]
DATASETS_DS = {
'ABCD': ['test', 'train', 'val'],
'BiTOD': ['test', 'train', 'val'],
'DSTC2-Clean': ['test', 'train', 'val'],
'Disambiguation': ['test', 'train', 'val'],
'FRAMES': ['test', 'train'],
'HDSA-Dialog': ['test', 'train', 'val'],
'GECOR': ['train'],
'KETOD': ['test', 'train', 'val'],
'MS-DC': ['train'],
'MULTIWOZ2_2': ['test', 'train', 'val'],
'MulDoGO': ['test', 'train', 'val'],
'MultiWOZ_2.1': ['test', 'train', 'val'],
'SGD': ['test', 'train', 'val'],
'SimJointMovie': ['test', 'train', 'val'],
'SimJointRestaurant': ['test', 'train', 'val'],
'Taskmaster1': ['test', 'train', 'val'],
'Taskmaster2': ['train'],
'Taskmaster3': ['test', 'train', 'val'],
'WOZ2_0': ['test', 'train', 'val'],
# 'SimJointGEN': ['test', 'train', 'val'],
}
DATASETS = list(DATASETS_DS.keys()) + DATASETS_PRETRAIN
SPLIT2NAME = {
"train": datasets.Split.TRAIN,
"val": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
class Dialog2FlowConfig(BuilderConfig):
"""BuilderConfig for Dialog2Flow."""
def __init__(self, name, citation, url, **kwargs):
"""BuilderConfig for Dialog2Flow.
Args:
extra_features: `list[string]`, list of the features that will appear in the
feature dict. Should not include "label".
data_url: `string`, url to download the zip file from.
citation: `string`, citation for the data set.
url: `string`, url for information about the data set.
label_classes: `list[string]`, the list of classes for the label if the
label is present as a string. Non-string labels will be cast to either
'False' or 'True'.
**kwargs: keyword arguments forwarded to super.
"""
super(Dialog2FlowConfig, self).__init__(version=_VERSION, **kwargs)
self.name = name
self.citation = citation
self.url = url
class Dialog2FlowBuilder(GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = Dialog2FlowConfig
BUILDER_CONFIGS = []
for dataset in DATASETS:
BUILDER_CONFIGS.append(
Dialog2FlowConfig(
name=dataset,
description="",
citation=_CITATION,
url="https://github.com/idiap/dialog2flow",
))
DEFAULT_CONFIG_NAME = "dialog-actions"
def _info(self):
if self.config.name in DATASETS_PRETRAIN:
features = {"utterance": Value("string"), "label": Value("string")}
else:
features = {"dialog": [
{
"speaker": Value("string"),
"text": Value("string"),
"domains": [
Value("string")
],
"labels": {
"dialog_acts": {
"acts" : [Value("string")],
"main_acts" : [Value("string")],
"original_acts" : [Value("string")],
},
"slots": [Value("string")],
"intents": [Value("string")]
}
}
]}
return DatasetInfo(
description="",
features=Features(features),
homepage=self.config.url,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name in DATASETS_PRETRAIN:
# TODO
file_path = dl_manager.download({
"train": "train.csv", # full
"val": "eval.csv", # few shot subset
"test": "test.csv", # SpokenWOZ
})
splits = [
SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"file_path": file_path["train"],
"split": datasets.Split.TRAIN,
},
),
SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"file_path": file_path["val"],
"split": datasets.Split.VALIDATION,
},
),
SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"file_path": file_path["test"],
"split": datasets.Split.TEST,
},
)
]
else:
splits = []
file_path = dl_manager.download({
"train": os.path.join(self.config.name, "data.json")
})
split_names = DATASETS_DS[self.config.name]
for split_name in split_names:
splits.append(
SplitGenerator(
name=SPLIT2NAME[split_name],
gen_kwargs={
"file_path": file_path["train"],
"split": SPLIT2NAME[split_name],
"split_name": split_name
},
)
)
return splits
def _load_json(self, file_path):
with open(file_path, encoding="utf-8") as f:
data = json.load(f)
return data
def _generate_examples(self, file_path, split, split_name=None):
if split_name is not None:
data = self._load_json(file_path)
data = [(dial_id, dial) for dial_id, dial in data["dialogs"].items() if split_name in dial_id]
logger.info(f"generating {len(data)} examples from = {split}")
for dial_id, dial in data:
yield dial_id, {"dialog": dial}
else:
pass