Datasets:

Languages:
English
ArXiv:
License:
zhuqi commited on
Commit
86b7615
1 Parent(s): 8418ac0

Upload preprocess.py

Browse files
Files changed (1) hide show
  1. preprocess.py +156 -0
preprocess.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from zipfile import ZipFile, ZIP_DEFLATED
2
+ from shutil import rmtree
3
+ import json
4
+ import os
5
+ from tqdm import tqdm
6
+ from collections import Counter
7
+ from pprint import pprint
8
+ from nltk.tokenize import sent_tokenize, word_tokenize
9
+ from nltk.tokenize.treebank import TreebankWordDetokenizer
10
+ import re
11
+
12
+ topic_map = {
13
+ 1: "Ordinary Life",
14
+ 2: "School Life",
15
+ 3: "Culture & Education",
16
+ 4: "Attitude & Emotion",
17
+ 5: "Relationship",
18
+ 6: "Tourism",
19
+ 7: "Health",
20
+ 8: "Work",
21
+ 9: "Politics",
22
+ 10: "Finance"
23
+ }
24
+
25
+ act_map = {
26
+ 1: "inform",
27
+ 2: "question",
28
+ 3: "directive",
29
+ 4: "commissive"
30
+ }
31
+
32
+ emotion_map = {
33
+ 0: "no emotion",
34
+ 1: "anger",
35
+ 2: "disgust",
36
+ 3: "fear",
37
+ 4: "happiness",
38
+ 5: "sadness",
39
+ 6: "surprise"
40
+ }
41
+
42
+ def preprocess():
43
+ original_data_dir = 'ijcnlp_dailydialog'
44
+ new_data_dir = 'data'
45
+
46
+ if not os.path.exists(original_data_dir):
47
+ original_data_zip = 'ijcnlp_dailydialog.zip'
48
+ if not os.path.exists(original_data_zip):
49
+ raise FileNotFoundError(f'cannot find original data {original_data_zip} in dailydialog/, should manually download ijcnlp_dailydialog.zip from http://yanran.li/files/ijcnlp_dailydialog.zip')
50
+ else:
51
+ archive = ZipFile(original_data_zip)
52
+ archive.extractall()
53
+
54
+ os.makedirs(new_data_dir, exist_ok=True)
55
+
56
+ dataset = 'dailydialog'
57
+ splits = ['train', 'validation', 'test']
58
+ dialogues_by_split = {split:[] for split in splits}
59
+ dial2topics = {}
60
+ with open(os.path.join(original_data_dir, 'dialogues_text.txt')) as dialog_file, \
61
+ open(os.path.join(original_data_dir, 'dialogues_topic.txt')) as topic_file:
62
+ for dialog, topic in zip(dialog_file, topic_file):
63
+ topic = int(topic.strip())
64
+ dialog = dialog.replace(' __eou__ ', ' ')
65
+ if dialog in dial2topics:
66
+ dial2topics[dialog].append(topic)
67
+ else:
68
+ dial2topics[dialog] = [topic]
69
+
70
+ global topic_map, act_map, emotion_map
71
+
72
+ ontology = {'domains': {x:{'description': '', 'slots': {}} for x in topic_map.values()},
73
+ 'intents': {x:{'description': ''} for x in act_map.values()},
74
+ 'state': {},
75
+ 'dialogue_acts': {
76
+ "categorical": [],
77
+ "non-categorical": [],
78
+ "binary": {}
79
+ }}
80
+
81
+ detokenizer = TreebankWordDetokenizer()
82
+
83
+ for data_split in splits:
84
+ archive = ZipFile(os.path.join(original_data_dir, f'{data_split}.zip'))
85
+ with archive.open(f'{data_split}/dialogues_{data_split}.txt') as dialog_file, \
86
+ archive.open(f'{data_split}/dialogues_act_{data_split}.txt') as act_file, \
87
+ archive.open(f'{data_split}/dialogues_emotion_{data_split}.txt') as emotion_file:
88
+ for dialog_line, act_line, emotion_line in tqdm(zip(dialog_file, act_file, emotion_file)):
89
+ if not dialog_line.strip():
90
+ break
91
+ utts = dialog_line.decode().split("__eou__")[:-1]
92
+ acts = act_line.decode().split(" ")[:-1]
93
+ emotions = emotion_line.decode().split(" ")[:-1]
94
+ assert (len(utts) == len(acts) == len(emotions)), "Different turns btw dialogue & emotion & action"
95
+
96
+ topics = dial2topics[dialog_line.decode().replace(' __eou__ ', ' ')]
97
+ topic = Counter(topics).most_common(1)[0][0]
98
+ domain = topic_map[topic]
99
+
100
+ dialogue_id = f'{dataset}-{data_split}-{len(dialogues_by_split[data_split])}'
101
+ dialogue = {
102
+ 'dataset': dataset,
103
+ 'data_split': data_split,
104
+ 'dialogue_id': dialogue_id,
105
+ 'original_id': f'{data_split}-{len(dialogues_by_split[data_split])}',
106
+ 'domains': [domain],
107
+ 'turns': []
108
+ }
109
+
110
+ for utt, act, emotion in zip(utts, acts, emotions):
111
+ speaker = 'user' if len(dialogue['turns']) % 2 == 0 else 'system'
112
+ intent = act_map[int(act)]
113
+ emotion = emotion_map[int(emotion)]
114
+ # re-tokenize
115
+ utt = ' '.join([detokenizer.detokenize(word_tokenize(s)) for s in sent_tokenize(utt)])
116
+ # replace with common apostrophe
117
+ utt = utt.replace(' ’ ', "'")
118
+ # add space after full-stop
119
+ utt = re.sub('\.(?!com)(\w)', lambda x: '. '+x.group(1), utt)
120
+
121
+ dialogue['turns'].append({
122
+ 'speaker': speaker,
123
+ 'utterance': utt.strip(),
124
+ 'utt_idx': len(dialogue['turns']),
125
+ 'dialogue_acts': {
126
+ 'binary': [{
127
+ 'intent': intent,
128
+ 'domain': '',
129
+ 'slot': ''
130
+ }],
131
+ 'categorical': [],
132
+ 'non-categorical': [],
133
+ },
134
+ 'emotion': emotion,
135
+ })
136
+
137
+ ontology["dialogue_acts"]['binary'].setdefault((intent, '', ''), {})
138
+ ontology["dialogue_acts"]['binary'][(intent, '', '')][speaker] = True
139
+
140
+ dialogues_by_split[data_split].append(dialogue)
141
+
142
+ ontology["dialogue_acts"]['binary'] = sorted([str({'user': speakers.get('user', False), 'system': speakers.get('system', False), 'intent':da[0],'domain':da[1], 'slot':da[2]}) for da, speakers in ontology["dialogue_acts"]['binary'].items()])
143
+ dialogues = dialogues_by_split['train']+dialogues_by_split['validation']+dialogues_by_split['test']
144
+ json.dump(dialogues[:10], open(f'dummy_data.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
145
+ json.dump(ontology, open(f'{new_data_dir}/ontology.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
146
+ json.dump(dialogues, open(f'{new_data_dir}/dialogues.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
147
+ with ZipFile('data.zip', 'w', ZIP_DEFLATED) as zf:
148
+ for filename in os.listdir(new_data_dir):
149
+ zf.write(f'{new_data_dir}/{filename}')
150
+ rmtree(original_data_dir)
151
+ rmtree(new_data_dir)
152
+ return dialogues, ontology
153
+
154
+
155
+ if __name__ == '__main__':
156
+ preprocess()