Datasets:

Languages:
English
ArXiv:
License:
File size: 6,460 Bytes
86b7615
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
from zipfile import ZipFile, ZIP_DEFLATED
from shutil import rmtree
import json
import os
from tqdm import tqdm
from collections import Counter
from pprint import pprint
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
import re

topic_map = {
    1: "Ordinary Life", 
    2: "School Life", 
    3: "Culture & Education",
    4: "Attitude & Emotion", 
    5: "Relationship", 
    6: "Tourism", 
    7: "Health", 
    8: "Work", 
    9: "Politics", 
    10: "Finance"
}

act_map = {
    1: "inform", 
    2: "question", 
    3: "directive", 
    4: "commissive"
}

emotion_map = {
    0: "no emotion", 
    1: "anger", 
    2: "disgust", 
    3: "fear", 
    4: "happiness", 
    5: "sadness", 
    6: "surprise"
}

def preprocess():
    original_data_dir = 'ijcnlp_dailydialog'
    new_data_dir = 'data'

    if not os.path.exists(original_data_dir):
        original_data_zip = 'ijcnlp_dailydialog.zip'
        if not os.path.exists(original_data_zip):
            raise FileNotFoundError(f'cannot find original data {original_data_zip} in dailydialog/, should manually download ijcnlp_dailydialog.zip from http://yanran.li/files/ijcnlp_dailydialog.zip')
        else:
            archive = ZipFile(original_data_zip)
            archive.extractall()

    os.makedirs(new_data_dir, exist_ok=True)

    dataset = 'dailydialog'
    splits = ['train', 'validation', 'test']
    dialogues_by_split = {split:[] for split in splits}
    dial2topics = {}
    with open(os.path.join(original_data_dir, 'dialogues_text.txt')) as dialog_file, \
        open(os.path.join(original_data_dir, 'dialogues_topic.txt')) as topic_file:
        for dialog, topic in zip(dialog_file, topic_file):
            topic = int(topic.strip())
            dialog = dialog.replace(' __eou__ ', ' ')
            if dialog in dial2topics:
                dial2topics[dialog].append(topic)
            else:
                dial2topics[dialog] = [topic]

    global topic_map, act_map, emotion_map

    ontology = {'domains': {x:{'description': '', 'slots': {}} for x in topic_map.values()},
                'intents': {x:{'description': ''} for x in act_map.values()},
                'state': {},
                'dialogue_acts': {
                    "categorical": [],
                    "non-categorical": [],
                    "binary": {}
                }}

    detokenizer = TreebankWordDetokenizer()

    for data_split in splits:
        archive = ZipFile(os.path.join(original_data_dir, f'{data_split}.zip'))
        with archive.open(f'{data_split}/dialogues_{data_split}.txt') as dialog_file, \
            archive.open(f'{data_split}/dialogues_act_{data_split}.txt') as act_file, \
            archive.open(f'{data_split}/dialogues_emotion_{data_split}.txt') as emotion_file:
            for dialog_line, act_line, emotion_line in tqdm(zip(dialog_file, act_file, emotion_file)):
                if not dialog_line.strip():
                    break
                utts = dialog_line.decode().split("__eou__")[:-1]
                acts = act_line.decode().split(" ")[:-1]
                emotions = emotion_line.decode().split(" ")[:-1]
                assert (len(utts) == len(acts) == len(emotions)), "Different turns btw dialogue & emotion & action"

                topics = dial2topics[dialog_line.decode().replace(' __eou__ ', ' ')]
                topic = Counter(topics).most_common(1)[0][0]
                domain = topic_map[topic]
                
                dialogue_id = f'{dataset}-{data_split}-{len(dialogues_by_split[data_split])}'
                dialogue = {
                    'dataset': dataset,
                    'data_split': data_split,
                    'dialogue_id': dialogue_id,
                    'original_id': f'{data_split}-{len(dialogues_by_split[data_split])}',
                    'domains': [domain],
                    'turns': []
                }

                for utt, act, emotion in zip(utts, acts, emotions):
                    speaker = 'user' if len(dialogue['turns']) % 2 == 0 else 'system'
                    intent = act_map[int(act)]
                    emotion = emotion_map[int(emotion)]
                    # re-tokenize
                    utt = ' '.join([detokenizer.detokenize(word_tokenize(s)) for s in sent_tokenize(utt)])
                    # replace with common apostrophe
                    utt = utt.replace(' ’ ', "'")
                    # add space after full-stop
                    utt = re.sub('\.(?!com)(\w)', lambda x: '. '+x.group(1), utt)

                    dialogue['turns'].append({
                        'speaker': speaker,
                        'utterance': utt.strip(),
                        'utt_idx': len(dialogue['turns']),
                        'dialogue_acts': {
                            'binary': [{
                                'intent': intent, 
                                'domain': '', 
                                'slot': ''
                            }],
                            'categorical': [],
                            'non-categorical': [],
                        },
                        'emotion': emotion,
                    })

                    ontology["dialogue_acts"]['binary'].setdefault((intent, '', ''), {})
                    ontology["dialogue_acts"]['binary'][(intent, '', '')][speaker] = True

                dialogues_by_split[data_split].append(dialogue)

    ontology["dialogue_acts"]['binary'] = sorted([str({'user': speakers.get('user', False), 'system': speakers.get('system', False), 'intent':da[0],'domain':da[1], 'slot':da[2]}) for da, speakers in ontology["dialogue_acts"]['binary'].items()])
    dialogues = dialogues_by_split['train']+dialogues_by_split['validation']+dialogues_by_split['test']
    json.dump(dialogues[:10], open(f'dummy_data.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
    json.dump(ontology, open(f'{new_data_dir}/ontology.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
    json.dump(dialogues, open(f'{new_data_dir}/dialogues.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
    with ZipFile('data.zip', 'w', ZIP_DEFLATED) as zf:
        for filename in os.listdir(new_data_dir):
            zf.write(f'{new_data_dir}/{filename}')
    rmtree(original_data_dir)
    rmtree(new_data_dir)
    return dialogues, ontology


if __name__ == '__main__':
    preprocess()