Datasets:

Languages:
English
ArXiv:
License:
zhuqi commited on
Commit
2754782
1 Parent(s): fe68fed

Upload preprocess.py

Browse files
Files changed (1) hide show
  1. preprocess.py +315 -0
preprocess.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from zipfile import ZipFile, ZIP_DEFLATED
2
+ import json
3
+ import os
4
+ import copy
5
+ import zipfile
6
+ from tqdm import tqdm
7
+ import re
8
+ from collections import Counter
9
+ from shutil import rmtree
10
+ from convlab.util.file_util import read_zipped_json, write_zipped_json
11
+ from pprint import pprint
12
+ import random
13
+
14
+
15
+ descriptions = {
16
+ "uber_lyft": {
17
+ "uber_lyft": "order a car for a ride inside a city",
18
+ "location.from": "pickup location",
19
+ "location.to": "destination of the ride",
20
+ "type.ride": "type of ride",
21
+ "num.people": "number of people",
22
+ "price.estimate": "estimated cost of the ride",
23
+ "duration.estimate": "estimated duration of the ride",
24
+ "time.pickup": "time of pickup",
25
+ "time.dropoff": "time of dropoff",
26
+ },
27
+ "movie_ticket": {
28
+ "movie_ticket": "book movie tickets for a film",
29
+ "name.movie": "name of the movie",
30
+ "name.theater": "name of the theater",
31
+ "num.tickets": "number of tickets",
32
+ "time.start": "start time of the movie",
33
+ "location.theater": "location of the theater",
34
+ "price.ticket": "price of the ticket",
35
+ "type.screening": "type of the screening",
36
+ "time.end": "end time of the movie",
37
+ "time.duration": "duration of the movie",
38
+ },
39
+ "restaurant_reservation": {
40
+ "restaurant_reservation": "searching for a restaurant and make reservation",
41
+ "name.restaurant": "name of the restaurant",
42
+ "name.reservation": "name of the person who make the reservation",
43
+ "num.guests": "number of guests",
44
+ "time.reservation": "time of the reservation",
45
+ "type.seating": "type of the seating",
46
+ "location.restaurant": "location of the restaurant",
47
+ },
48
+ "coffee_ordering": {
49
+ "coffee_ordering": "order a coffee drink from either Starbucks or Peets for pick up",
50
+ "location.store": "location of the coffee store",
51
+ "name.drink": "name of the drink",
52
+ "size.drink": "size of the drink",
53
+ "num.drink": "number of drinks",
54
+ "type.milk": "type of the milk",
55
+ "preference": "user preference of the drink",
56
+ },
57
+ "pizza_ordering": {
58
+ "pizza_ordering": "order a pizza",
59
+ "name.store": "name of the pizza store",
60
+ "name.pizza": "name of the pizza",
61
+ "size.pizza": "size of the pizza",
62
+ "type.topping": "type of the topping",
63
+ "type.crust": "type of the crust",
64
+ "preference": "user preference of the pizza",
65
+ "location.store": "location of the pizza store",
66
+ },
67
+ "auto_repair": {
68
+ "auto_repair": "set up an auto repair appointment with a repair shop",
69
+ "name.store": "name of the repair store",
70
+ "name.customer": "name of the customer",
71
+ "date.appt": "date of the appointment",
72
+ "time.appt": "time of the appointment",
73
+ "reason.appt": "reason of the appointment",
74
+ "name.vehicle": "name of the vehicle",
75
+ "year.vehicle": "year of the vehicle",
76
+ "location.store": "location of the repair store",
77
+ }
78
+ }
79
+
80
+ def normalize_domain_name(domain):
81
+ if domain == 'auto':
82
+ return 'auto_repair'
83
+ elif domain == 'pizza':
84
+ return 'pizza_ordering'
85
+ elif domain == 'coffee':
86
+ return 'coffee_ordering'
87
+ elif domain == 'uber':
88
+ return 'uber_lyft'
89
+ elif domain == 'restaurant':
90
+ return 'restaurant_reservation'
91
+ elif domain == 'movie':
92
+ return 'movie_ticket'
93
+ assert 0
94
+
95
+
96
+ def format_turns(ori_turns):
97
+ # delete invalid turns and merge continuous turns
98
+ new_turns = []
99
+ previous_speaker = None
100
+ utt_idx = 0
101
+ for i, turn in enumerate(ori_turns):
102
+ speaker = 'system' if turn['speaker'] == 'ASSISTANT' else 'user'
103
+ turn['speaker'] = speaker
104
+ if turn['text'] == '(deleted)':
105
+ continue
106
+ if not previous_speaker:
107
+ # first turn
108
+ assert speaker != previous_speaker
109
+ if speaker != previous_speaker:
110
+ # switch speaker
111
+ previous_speaker = speaker
112
+ new_turns.append(copy.deepcopy(turn))
113
+ utt_idx += 1
114
+ else:
115
+ # continuous speaking of the same speaker
116
+ last_turn = new_turns[-1]
117
+ # skip repeated turn
118
+ if turn['text'] in ori_turns[i-1]['text']:
119
+ continue
120
+ # merge continuous turns
121
+ index_shift = len(last_turn['text']) + 1
122
+ last_turn['text'] += ' '+turn['text']
123
+ if 'segments' in turn:
124
+ last_turn.setdefault('segments', [])
125
+ for segment in turn['segments']:
126
+ segment['start_index'] += index_shift
127
+ segment['end_index'] += index_shift
128
+ last_turn['segments'] += turn['segments']
129
+ return new_turns
130
+
131
+
132
+ def preprocess():
133
+ original_data_dir = 'Taskmaster-master'
134
+ new_data_dir = 'data'
135
+
136
+ if not os.path.exists(original_data_dir):
137
+ original_data_zip = 'master.zip'
138
+ if not os.path.exists(original_data_zip):
139
+ raise FileNotFoundError(f'cannot find original data {original_data_zip} in tm1/, should manually download master.zip from https://github.com/google-research-datasets/Taskmaster/archive/refs/heads/master.zip')
140
+ else:
141
+ archive = ZipFile(original_data_zip)
142
+ archive.extractall()
143
+
144
+ os.makedirs(new_data_dir, exist_ok=True)
145
+
146
+ ontology = {'domains': {},
147
+ 'intents': {
148
+ 'inform': {'description': 'inform the value of a slot or general information.'},
149
+ 'accept': {'description': 'accept the value of a slot or a transaction'},
150
+ 'reject': {'description': 'reject the value of a slot or a transaction'}
151
+ },
152
+ 'state': {},
153
+ 'dialogue_acts': {
154
+ "categorical": {},
155
+ "non-categorical": {},
156
+ "binary": {}
157
+ }}
158
+ global descriptions
159
+ ori_ontology = {}
160
+ for _, item in json.load(open(os.path.join(original_data_dir, "TM-1-2019/ontology.json"))).items():
161
+ ori_ontology[item["id"]] = item
162
+
163
+ for domain, item in ori_ontology.items():
164
+ ontology['domains'][domain] = {'description': descriptions[domain][domain], 'slots': {}}
165
+ ontology['state'][domain] = {}
166
+ for slot in item['required']+item['optional']:
167
+ ontology['domains'][domain]['slots'][slot] = {
168
+ 'description': descriptions[domain][slot],
169
+ 'is_categorical': False,
170
+ 'possible_values': [],
171
+ }
172
+ ontology['state'][domain][slot] = ''
173
+
174
+ dataset = 'tm1'
175
+ splits = ['train', 'validation', 'test']
176
+ dialogues_by_split = {split:[] for split in splits}
177
+ dialog_files = ["TM-1-2019/self-dialogs.json", "TM-1-2019/woz-dialogs.json"]
178
+ for file_idx, filename in enumerate(dialog_files):
179
+ data = json.load(open(os.path.join(original_data_dir, filename)))
180
+ if file_idx == 0:
181
+ # original split for self dialogs
182
+ dial_id2split = {}
183
+ for data_split in ['train', 'dev', 'test']:
184
+ with open(os.path.join(original_data_dir, f"TM-1-2019/train-dev-test/{data_split}.csv")) as f:
185
+ for line in f:
186
+ dial_id = line.split(',')[0]
187
+ dial_id2split[dial_id] = data_split if data_split != 'dev' else 'validation'
188
+ else:
189
+ # random split for woz dialogs 8:1:1
190
+ random.seed(42)
191
+ dial_ids = [d['conversation_id'] for d in data]
192
+ random.shuffle(dial_ids)
193
+ dial_id2split = {}
194
+ for dial_id in dial_ids[:int(0.8*len(dial_ids))]:
195
+ dial_id2split[dial_id] = 'train'
196
+ for dial_id in dial_ids[int(0.8*len(dial_ids)):int(0.9*len(dial_ids))]:
197
+ dial_id2split[dial_id] = 'validation'
198
+ for dial_id in dial_ids[int(0.9*len(dial_ids)):]:
199
+ dial_id2split[dial_id] = 'test'
200
+
201
+ for d in tqdm(data, desc='processing taskmaster-{}'.format(filename)):
202
+ # delete empty dialogs and invalid dialogs
203
+ if len(d['utterances']) == 0:
204
+ continue
205
+ if len(set([t['speaker'] for t in d['utterances']])) == 1:
206
+ continue
207
+ data_split = dial_id2split[d["conversation_id"]]
208
+ dialogue_id = f'{dataset}-{data_split}-{len(dialogues_by_split[data_split])}'
209
+ cur_domains = [normalize_domain_name(d["instruction_id"].split('-', 1)[0])]
210
+ assert len(cur_domains) == 1 and cur_domains[0] in ontology['domains']
211
+ domain = cur_domains[0]
212
+ dialogue = {
213
+ 'dataset': dataset,
214
+ 'data_split': data_split,
215
+ 'dialogue_id': dialogue_id,
216
+ 'original_id': d["conversation_id"],
217
+ 'domains': cur_domains,
218
+ 'turns': []
219
+ }
220
+ turns = format_turns(d['utterances'])
221
+ prev_state = {}
222
+ prev_state.setdefault(domain, copy.deepcopy(ontology['state'][domain]))
223
+
224
+ for utt_idx, uttr in enumerate(turns):
225
+ speaker = uttr['speaker']
226
+ turn = {
227
+ 'speaker': speaker,
228
+ 'utterance': uttr['text'],
229
+ 'utt_idx': utt_idx,
230
+ 'dialogue_acts': {
231
+ 'binary': [],
232
+ 'categorical': [],
233
+ 'non-categorical': [],
234
+ },
235
+ }
236
+ in_span = [0] * len(turn['utterance'])
237
+
238
+ if 'segments' in uttr:
239
+ # sort the span according to the length
240
+ segments = sorted(uttr['segments'], key=lambda x: len(x['text']))
241
+ for segment in segments:
242
+ # Each conversation was annotated by two workers.
243
+ # only keep the first annotation for the span
244
+ item = segment['annotations'][0]
245
+ intent = 'inform' # default intent
246
+ slot = item['name'].split('.', 1)[-1]
247
+ if slot.endswith('.accept') or slot.endswith('.reject'):
248
+ # intent=accept/reject
249
+ intent = slot[-6:]
250
+ slot = slot[:-7]
251
+ if slot not in ontology['domains'][domain]['slots']:
252
+ # no slot, only general reference to a transaction, binary dialog act
253
+ turn['dialogue_acts']['binary'].append({
254
+ 'intent': intent,
255
+ 'domain': domain,
256
+ 'slot': '',
257
+ })
258
+ else:
259
+ assert turn['utterance'][segment['start_index']:segment['end_index']] == segment['text']
260
+ # skip overlapped spans, keep the shortest one
261
+ if sum(in_span[segment['start_index']: segment['end_index']]) > 0:
262
+ continue
263
+ else:
264
+ in_span[segment['start_index']: segment['end_index']] = [1]*(segment['end_index']-segment['start_index'])
265
+ turn['dialogue_acts']['non-categorical'].append({
266
+ 'intent': intent,
267
+ 'domain': domain,
268
+ 'slot': slot,
269
+ 'value': segment['text'],
270
+ 'start': segment['start_index'],
271
+ 'end': segment['end_index']
272
+ })
273
+
274
+ turn['dialogue_acts']['non-categorical'] = sorted(turn['dialogue_acts']['non-categorical'], key=lambda x: x['start'])
275
+
276
+ bdas = set()
277
+ for da in turn['dialogue_acts']['binary']:
278
+ da_tuple = (da['intent'], da['domain'], da['slot'],)
279
+ bdas.add(da_tuple)
280
+ turn['dialogue_acts']['binary'] = [{'intent':bda[0],'domain':bda[1],'slot':bda[2]} for bda in sorted(bdas)]
281
+ # add to dialogue_acts dictionary in the ontology
282
+ for da_type in turn['dialogue_acts']:
283
+ das = turn['dialogue_acts'][da_type]
284
+ for da in das:
285
+ ontology["dialogue_acts"][da_type].setdefault((da['intent'], da['domain'], da['slot']), {})
286
+ ontology["dialogue_acts"][da_type][(da['intent'], da['domain'], da['slot'])][speaker] = True
287
+
288
+ for da in turn['dialogue_acts']['non-categorical']:
289
+ slot, value = da['slot'], da['value']
290
+ assert slot in prev_state[domain]
291
+ # not add reject slot-value into state
292
+ if da['intent'] != 'reject':
293
+ prev_state[domain][slot] = value
294
+
295
+ if speaker == 'user':
296
+ turn['state'] = copy.deepcopy(prev_state)
297
+
298
+ dialogue['turns'].append(turn)
299
+ dialogues_by_split[data_split].append(dialogue)
300
+
301
+ for da_type in ontology['dialogue_acts']:
302
+ ontology["dialogue_acts"][da_type] = sorted([str({'user': speakers.get('user', False), 'system': speakers.get('system', False), 'intent':da[0],'domain':da[1], 'slot':da[2]}) for da, speakers in ontology["dialogue_acts"][da_type].items()])
303
+ dialogues = dialogues_by_split['train']+dialogues_by_split['validation']+dialogues_by_split['test']
304
+ json.dump(dialogues[:10], open(f'dummy_data.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
305
+ json.dump(ontology, open(f'{new_data_dir}/ontology.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
306
+ json.dump(dialogues, open(f'{new_data_dir}/dialogues.json', 'w', encoding='utf-8'), indent=2, ensure_ascii=False)
307
+ with ZipFile('data.zip', 'w', ZIP_DEFLATED) as zf:
308
+ for filename in os.listdir(new_data_dir):
309
+ zf.write(f'{new_data_dir}/{filename}')
310
+ rmtree(original_data_dir)
311
+ rmtree(new_data_dir)
312
+ return dialogues, ontology
313
+
314
+ if __name__ == '__main__':
315
+ preprocess()