File size: 81,711 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
{
    "paper_id": "W10-0403",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T05:07:56.091993Z"
    },
    "title": "Grammaticality Judgement in a Word Completion Task",
    "authors": [
        {
            "first": "Alfred",
            "middle": [],
            "last": "Renaud",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Quillsoft Ltd",
                "location": {
                    "addrLine": "150 Kilgour Road 2416 Queen Street East Toronto",
                    "postCode": "M4G 1R8, M2A 1N1",
                    "settlement": "Toronto",
                    "region": "ON, ON",
                    "country": "Canada, Canada"
                }
            },
            "email": "[email protected]"
        },
        {
            "first": "Fraser",
            "middle": [],
            "last": "Shein",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Quillsoft Ltd",
                "location": {
                    "addrLine": "150 Kilgour Road 2416 Queen Street East Toronto",
                    "postCode": "M4G 1R8, M2A 1N1",
                    "settlement": "Toronto",
                    "region": "ON, ON",
                    "country": "Canada, Canada"
                }
            },
            "email": "[email protected]"
        },
        {
            "first": "Vivian",
            "middle": [],
            "last": "Tsang",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Quillsoft Ltd",
                "location": {
                    "addrLine": "150 Kilgour Road 2416 Queen Street East Toronto",
                    "postCode": "M4G 1R8, M2A 1N1",
                    "settlement": "Toronto",
                    "region": "ON, ON",
                    "country": "Canada, Canada"
                }
            },
            "email": "[email protected]"
        },
        {
            "first": "Bloorview",
            "middle": [
                "Kids"
            ],
            "last": "Rehab",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Quillsoft Ltd",
                "location": {
                    "addrLine": "150 Kilgour Road 2416 Queen Street East Toronto",
                    "postCode": "M4G 1R8, M2A 1N1",
                    "settlement": "Toronto",
                    "region": "ON, ON",
                    "country": "Canada, Canada"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "In this paper, we present findings from a human judgement task we conducted on the effectiveness of syntax filtering in a word completion task. Human participants were asked to review a series of incomplete sentences and identify which words from accompanying lists extend the expressions in a grammatically appropriate way. The accompanying word lists were generated by two word completion systems (our own plus a third-party commercial system) where the ungrammatical items were filtered out. Overall, participants agreed more, to a statistically significant degree, with the syntax-filtered systems than with baseline. However, further analysis suggests that syntax filtering alone does not necessarily improve the overall acceptability and usability of the word completion output. Given that word completion is typically employed in applications to aid writing, unlike other NLP tasks, accounting for the role of writer vs. reader becomes critical. Evaluating word completion and, more generally, applications for alternative and augmentative communication (AAC) will be discussed.",
    "pdf_parse": {
        "paper_id": "W10-0403",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "In this paper, we present findings from a human judgement task we conducted on the effectiveness of syntax filtering in a word completion task. Human participants were asked to review a series of incomplete sentences and identify which words from accompanying lists extend the expressions in a grammatically appropriate way. The accompanying word lists were generated by two word completion systems (our own plus a third-party commercial system) where the ungrammatical items were filtered out. Overall, participants agreed more, to a statistically significant degree, with the syntax-filtered systems than with baseline. However, further analysis suggests that syntax filtering alone does not necessarily improve the overall acceptability and usability of the word completion output. Given that word completion is typically employed in applications to aid writing, unlike other NLP tasks, accounting for the role of writer vs. reader becomes critical. Evaluating word completion and, more generally, applications for alternative and augmentative communication (AAC) will be discussed.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Writers often need help from others to help with spelling and grammar. For persons with physical or learning disabilities, writing can be very stressful because of a greater reliance on the assistance of others. Software tools such as word completion are now commonly used to reduce the physical and cognitive load of completing a word or a sentence and thereby reducing a writer's dependence on others. But can such tools be as effective as a human with adequate linguistic knowledge? While it is hardly possible to completely emulate a human tutor or a communication partner, the purpose of this research is to investigate how much linguistic knowledge is necessary to ensure the usability of word completion. Here, we will focus on the grammaticality of word completion.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Word completion facilitates text entry by suggesting a list of words that can follow a given linguistic context. If the desired word is in the list, the user can select that word with a mouse click or a keystroke, thereby saving the effort of typing the remaining letters of the word. Otherwise, the user can continue typing while the software continues to display new lists of words based on that input.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Completion",
                "sec_num": "1.1"
            },
            {
                "text": "For example, consider a user wants to type \"That girl by the benches\u2026\" After each letter the user manually enters, a system would return a list of potential next words. Say, the next letter the user enters is \"w.\" A system may offer the following choices: a) was, b) were, c) with, d) where, e) wrapped. By suggesting words in any given context, word completion can assist in the composition of well-formed text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Completion",
                "sec_num": "1.1"
            },
            {
                "text": "Typical word completion systems suggest words by exploiting n-gram Markov statistical models (Bentrup, 1987) . These systems probabilistically determine the current word in a sentence given the previous n-1 words as context, based on a pre-generated n-gram language model derived from a corpus. With n typically being of low or-der (two or three, due to sparse data and computational issues), one consequence is that the applicability of suggested words beyond a certain word distance may become somewhat arbitrary. Further design improvements for word completion depend on the user population and the intended use. For example, the demand on the system to have a sophisticated language model may depend on whether the intent is to primarily reduce the physical or cognitive load of entering text. Evaluation approaches can elucidate on design and implementation issues for providing meaningful word choices.",
                "cite_spans": [
                    {
                        "start": 93,
                        "end": 108,
                        "text": "(Bentrup, 1987)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Word Completion",
                "sec_num": "1.1"
            },
            {
                "text": "A number of studies have been carried out to evaluate the efficacy of word completion systems. Koester (1994) measured time savings, which is the reduction in time that the user takes to generate a particular text with the aid of a word completion system compared to the time taken without it. The rationale for this measure is that any word completion system imposes a cognitive load on its users, whereby they now need to 1) change their focus between the target document and the word list display, and possibly between the screen and keyboard; 2) visually scan the word list to decide whether their intended word is present; and 3) select the intended word with the keyboard or mouse. Others have also examined similar visualcognitive issues of using word completion (e.g., Tam and Wells, 2009) . The overall approach implicitly defines a user-centred approach to evaluation by having human subjects simulate the actual writing process (usually in a copying, not writing task). Thus, results depend on the abilities and preferences of individual subjects.",
                "cite_spans": [
                    {
                        "start": 95,
                        "end": 109,
                        "text": "Koester (1994)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 777,
                        "end": 797,
                        "text": "Tam and Wells, 2009)",
                        "ref_id": "BIBREF19"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approaches",
                "sec_num": "1.2"
            },
            {
                "text": "System-based evaluation measures exist, the most common of which is keystroke savings. This measures the reduction in the number of keystrokes needed to produce a given text with the aid of a word completion system. Keystroke savings is an important factor for users with physical disabilities who have difficulty working with a keyboard for which it is desirable to keep the number of keystrokes to a minimum. A complementary measure, completion keystrokes, determines how quickly a given word is predicted by counting the number of characters required to reach completion. Completion keystrokes differs from keystroke savings in that the latter counts the letters remaining in the word.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approaches",
                "sec_num": "1.2"
            },
            {
                "text": "In contrast to the previous two measures, both of which measure at the character level, hit rate measures at the word level by calculating the ratio of the number of words correctly predicted to the total number of words predicted. Given a sufficiently large lexicon, hit rate can be as high as 100% if every letter of every word is manually entered to its completion. As this can be misleading, hit rate is more typically measured with reference to the number of characters already typed in order to assess the system's demand on the user.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approaches",
                "sec_num": "1.2"
            },
            {
                "text": "These objective measures address motor load independent of cognitive load. With the exception of time savings, these measures can be benchmarked automatically by simulating the writing process by using existing texts.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approaches",
                "sec_num": "1.2"
            },
            {
                "text": "A shortcoming of these objective measures is that they focus on the reduction on the user's physical demand by simulating the entering of an already written text, and effectively ignore consideration of word choices other than the unique intended word. In reality, the actual writing process depends also on the quality of the entire group of suggested word choices with respect to the intended content. Renaud (2002) addressed this shortcoming by arguing that the syntactic and semantic relations between words can impact on choice-making at the target word. He introduced two measures, validity and appropriateness, measuring grammatical consistency and semantic relevance of all system output, respectively. The former measure calculates the proportion of a system's suggested words that is syntactically acceptable. The latter focuses on the proportion of relevant output based on lexical and domain semantics. Renaud compared a number of commercial systems and found a positive correlation between the new and existing measures. This finding also lends additional support to Wood's (1996) finding that offering syntactically and semantically appropriate choices improves performance. (Note that the converse may not hold true.)",
                "cite_spans": [
                    {
                        "start": 404,
                        "end": 417,
                        "text": "Renaud (2002)",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 1080,
                        "end": 1093,
                        "text": "Wood's (1996)",
                        "ref_id": "BIBREF21"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approaches",
                "sec_num": "1.2"
            },
            {
                "text": "For the remainder of this paper, we will put our emphasis on the impact of linguistic content (here, grammaticality) on the quality of word completion. The paper is organized as follows. In the next section, we will describe the need to incorporate syntactic information in word completion. In sections 3 and 4, we will describe our human judgement task evaluating the grammaticality of word completion. Based on our analysis, we will return to the evaluation issue in section 5 and discuss how grammaticality alone does not address the larger usability issue of word completion. Here, we propose that the word completion task, unlike traditional NLP tasks, requires both the reader's and writer's perspectives, which impacts the interpretation of our evaluation, and in turn impacts design decisions. In section 6, we will conclude by offering a more inclusive perspective on AAC.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approaches",
                "sec_num": "1.2"
            },
            {
                "text": "As shown earlier, many evaluation methods have focused on 1) the proportion of key-presses normally required during a typing session that the user need not to manually enter and 2) the proportion of text words in a typing session that the system is able correctly to predict. For a user with a learning disability or language difficulties, a greater concern is that all presented words be valid, logical, and free of grammatical errors. Current state-of-the-art systems suffer by suggesting words that are often syntactically implausible while excluding more justifiable but less probable suggestions (cf. our example in section 1). A user may be confused by inappropriate suggestions, even if correct suggestions are also present.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Demand for Syntactic Filtering",
                "sec_num": "2"
            },
            {
                "text": "To quantify the importance of syntax in word completion, we compare the average hit rate scores (over all words) with the hit rate scores at points in sentences we consider as syntactically critical (see section 3 for their selection). Nantais et al. (2001) reported an overall hit rate of approximately 56% using bigram word completion after entering the first letter of a word across a large document. However, at the word location where it is crucial to maintain correct syntactic relation with the existing sentence fragment, hit rates are often much lower. In our study situation, the hit rate is at best 39%-these syntactic challenges tend to be semantically contentful and thus present difficulties to human subjects. Likewise, the systems are expected to struggle with them. Without a clear understanding of content specific issues during writing, examining time and keystroke savings alone does not reveal the increased difficulty a user faces at these word positions. We will return to these issues in section 5.",
                "cite_spans": [
                    {
                        "start": 236,
                        "end": 257,
                        "text": "Nantais et al. (2001)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The Demand for Syntactic Filtering",
                "sec_num": "2"
            },
            {
                "text": "Knowledge of syntax can be obtained by first tagging each dictionary word with its part of speech, such as noun or adjective. This information may then be used in either a probabilistic or a symbolic manner. Systems may reason probabilistically by combining tag n-gram models, where the part-ofspeech tags for the previous n-1 words in a sentence are used to predict the tag for the current word, with word n-gram models that cue the resulting part(s) of speech to find words proper (Hunnicutt and Carlberger, 2001 ). Fazly and Hirst (2003) introduced two algorithms for combining tag trigrams with word bigrams. The first algorithm involved conditional independence assumptions between word and tag models, and the second algorithm involved a weighted linear combination of the two models.",
                "cite_spans": [
                    {
                        "start": 483,
                        "end": 514,
                        "text": "(Hunnicutt and Carlberger, 2001",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 518,
                        "end": 540,
                        "text": "Fazly and Hirst (2003)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building Syntactic Knowledge",
                "sec_num": "2.1"
            },
            {
                "text": "A fundamental limitation to this approach is that low-order probabilistic language models can only account for relationships between closely colocated words. Symbolic syntactic prediction guided by a grammar, on the other hand, can deal with long-distance word relationships of arbitrary depth by applying rules that govern how words from syntactic categories can be joined, to assign all sentence words to a category. This approach uses knowledge of English grammar to analyze the structure of the sentence in progress and determine the applicable syntactic categories (e.g., noun, verb), along with other features (e.g., singular, past participle), to which the currently typed/predicted word must belong. In this way a word completion system is able to suggest words that are grammatically consistent with the active sentence fragment.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building Syntactic Knowledge",
                "sec_num": "2.1"
            },
            {
                "text": "As such, research closer in nature to our work involves parsers that process the input sentence incrementally as each word is entered. Wood's (1996) augmented phrase-structure grammar showed that symbolic syntactic prediction can improve overall performance when combined with statistical orthographic prediction. McCoy (1998) used the augmented transition network or ATN (Woods, 1970) formalism to find candidate word categories from which to generate word lists. Gustavii and Pettersson (2003) used a chart parser to re-rank, or filter, word lists by grammatical value. These parsing algorithms manipulate some data structure that represents, and im-poses ordering on, syntactic constituents of sentences. Recently, we have been developing a syntax module (Renaud et al., 2010) based on an ATN-style parser, which can facilitate both increasing the level of correctness in parses through grammar correction, and modifying the information collected during parsing for a particular application (Newman, 2007) . Specifically, this system filters words provided by n-gram completion such that the word list only shows words that fit an acceptable grammatical structure. It operates on a longer list of the same frequencyranked words our core predictor generates. Under this setup, our syntax module can influence the final list shown to the user by demoting implausible words that otherwise would have been displayed and replacing them with plausible words that otherwise would not. Our rationale for using a symbolic vs. a probabilistic parser in word completion is beyond the scope of the current paper.",
                "cite_spans": [
                    {
                        "start": 135,
                        "end": 148,
                        "text": "Wood's (1996)",
                        "ref_id": "BIBREF21"
                    },
                    {
                        "start": 372,
                        "end": 385,
                        "text": "(Woods, 1970)",
                        "ref_id": "BIBREF22"
                    },
                    {
                        "start": 465,
                        "end": 495,
                        "text": "Gustavii and Pettersson (2003)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 758,
                        "end": 779,
                        "text": "(Renaud et al., 2010)",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 994,
                        "end": 1008,
                        "text": "(Newman, 2007)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building Syntactic Knowledge",
                "sec_num": "2.1"
            },
            {
                "text": "To evaluate the impact of syntactic filtering on word completion, we devised a human judgment task where human subjects were asked to judge the grammatical acceptability of a word offered by word completion software, with or without syntactic filtering. Given a partial sentence and a leading prefix for the next word, word completion software presents a number of choices for the potential next word. Although the goal is to assess the grammaticality of predicted words with or without syntactic filtering, the intent is to assess whether the inclusion of syntactic heuristics in the word completion algorithm improves the quality of word choices.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammaticality Judgement Experiment",
                "sec_num": "3"
            },
            {
                "text": "In our experiment, we compared three different word completion systems: our baseline completion system (WordQ\u00ae * , henceforth \"baseline\"), our word completion system with syntax filtering (\"System B\"). We also included a third-party commercial word completion system with syntax filtering built-in (Co:Writer\u00ae \u2020 , \"System C\"). In * http://www.wordq.com; our baseline system uses a bigram language model trained on a corpus of well-edited text. \u2020 http://www.donjohnston.com/products/cowriter/index.html each system, we inputted a partial sentence plus the leading character for the next word. Each system returned a list of five choices for the potential next word. Our subjects were asked to judge the grammatical acceptability of each word (binary decision: yes or no).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "3.1"
            },
            {
                "text": "It is worth noting that the more letters are manually inserted, the narrower the search space becomes for the next word. Nantais et al. (2001) suggested that after inserting two characters, the hit rate via automatic means can be as high as 72%; the hit rate for humans is likely much higher. Given that our goal is to examine the grammaticality of word choices and not hit rate, providing only one leading letter allows sufficient ambiguity on what the potential next word is, which in turn allows for a range of grammatical choices for our judgement task.",
                "cite_spans": [
                    {
                        "start": 121,
                        "end": 142,
                        "text": "Nantais et al. (2001)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Experimental Setup",
                "sec_num": "3.1"
            },
            {
                "text": "We selected our test sentences from Canadian news sources (Toronto Star and the Globe and Mail), which are considered reliably grammatical. We chose a total of 138 sentences. \u2021 Each sentence was truncated into a fragment containing the first x-1 words and the first character of the x th word, where x ranges from three to ten inclusive. The truncation position x was deliberately selected to include a variety of grammatical challenges.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "3.2"
            },
            {
                "text": "We divided the sentence fragments into nine types of grammatical challenges: 1) subject-verb agreement; 2) subject-verb agreement in questionasking; 3) subject-verb agreement within a relative clause; 4) appositives; 5) verb sequence (auxiliary verb-main verb agreement); 6) case agreement; 7) non-finite clauses; 8) number agreement; and 9) others.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "3.2"
            },
            {
                "text": "For example, the sentence \"That girl by the benches was in my high school\" from section 1.1 can be used to test the system's ability to recognize subject-verb agreement if we truncate the sentence to produce the fragment \"That girl by the benches w___.\" Here, subject-verb agreement should be decided against the subject \"girl\" and not the (tempting) subject \"benches.\" \u2021 We did not pick a larger number of sentences due to the time constraint in our experimental setup. The rationale is to avoid over-fatiguing our human subjects (approximately an hour per session). Based on our pilot study, we were able to fit 140 sentences over three one-hour sessions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "3.2"
            },
            {
                "text": "After the initial selection process, we reduced our collection to 123 partial sentences. Because the sentences were not evenly distributed across the nine categories, we divided the sentences into three sets such that the frequency distribution of the sentence types was the same for all three sets (41 sentences per set). The three word completion systems were each assigned a different set. \u00a7",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Sentence Selection",
                "sec_num": "3.2"
            },
            {
                "text": "We fed each partial sentence into the corresponding system to produce a word list for grammatical judgement. Recall our example earlier, given five word choices per partial sentence, for each word choice, our subjects were asked to judge its grammatical acceptability (yes or no).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammaticality Judgements",
                "sec_num": "3.3"
            },
            {
                "text": "We recruited 14 human subjects, all native speakers of English with a university education. Each subject was presented all 123 sentences covering the three systems, in a paper-based task. The sentence order was randomized and the subjects were unaware of which system produced what list. Given that each system produced a list of five options for each partial sentence, each subject produced 5\u00d741=205 judgements for each system. There were 14 sets of such judgements in total.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Grammaticality Judgements",
                "sec_num": "3.3"
            },
            {
                "text": "Our primary objective is to examine the subjects' agreement with the system, and whether the subjects generally agree among themselves. Our rationale is this. If the subjects generally agree with one another, then there is an overall agreement on the perception of grammaticality in word completion. If this is indeed the case, we then need to examine how and why our subjects agree or disagree with the systems. Otherwise, if there is low inter-subject agreement, aside from issues related to the experimental setup, we need to reconsider whether offering grammatical word completion choices is indeed practical and possible.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Analysis",
                "sec_num": "4"
            },
            {
                "text": "We first calculated individual participant agreement with the output of each system (i.e., \u00a7 We initially to used three different sets, i.e., one set per system, to avoid a sampling \"fluke\" of different grammatical difficulties/categories. However, for exactly the same reason, we also tested our system using the two sets for the other two systems for ease of comparison. See section 4.1 for details. averaged over all participants). The baseline scored 68%. System B scored 72% and System C scored 74%. Thus, an early important result was that syntax assistance in general, independent of particular approach or algorithm, does appear to improve subject agreement in a word completion task. (Note that we treat system C as a black box as we are not privy to its algorithms, which are not published.)",
                "cite_spans": [
                    {
                        "start": 91,
                        "end": 92,
                        "text": "\u00a7",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Analysis",
                "sec_num": "4"
            },
            {
                "text": "Overall, the grammaticality of a given test word (i.e., averaged over all test words) had an average agreement of 85%, or by 12 of the 14 participants. The percentage agreement for each system was 84% for the baseline, 87% for system B, and 86% for system C. If at least two-thirds of the participants (10 of 14) agreed on the grammaticality of a particular test word, we considered the collective opinion to be consistent for that word and declared a consensus. Participants reached consensus on 77% of the test words for the baseline, 82% of the test words for system B, and 80% of the test words for system C.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Analysis",
                "sec_num": "4"
            },
            {
                "text": "Next, we calculated consensus participant agreement for each system. This measure was different from the previous in that we considered only those cases where 10 or more of the 14 participants agreed with one another on the grammaticality of a system's test word and discarded all other cases. In 75% of the consensus cases for the baseline, the subjects agreed with the system (by approving on the grammaticality); in the other 25% of the consensus cases the subjects disagreed with the system. System B scored 78% on the consensus agreement and system C scored 81%.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Analysis",
                "sec_num": "4"
            },
            {
                "text": "A repeated-measures analysis of variance (ANOVA) was performed on the data. For both individual and consensus participant agreement, each of Systems B and C outperformed the baseline system (statistically significant, p<.05), while the difference between the two systems with syntax awareness was not statistically significant.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Analysis",
                "sec_num": "4"
            },
            {
                "text": "To summarize our findings, our subjects generally found the output grammatically more acceptable if syntactic assistance was built in (72% and 74% over 68% in raw participant agreement; 78% and 81% over 75% in consensus agreement). The behaviour of our System B generally was in line with the behaviour of the third-party System C. Finally, the agreement among subjects for all systems was quite high (~85%) and is considered reliable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results and Analysis",
                "sec_num": "4"
            },
            {
                "text": "To further understand the behaviour of our own system (in contrast to our subjects' judgements), we create two new systems, A' and C' based on the output of the baseline system and the thirdparty System C. Recall that the sentence set used in each system is mutually exclusive from the set used in another system. Therefore, this setup introduces an additional set of 41 sentences \u00d7 5 predicted words \u00d7 2 systems = 410 judgements.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Subject Agreement with Other Systems",
                "sec_num": "4.1"
            },
            {
                "text": "Our setup is simple: we feed into our parser each of the sentence fragments for the corresponding system, along with each predicted word originally produced. If our parser accepts the word, the analysis remains unchanged. Otherwise, we count it as a \"negative\" result, which we explain below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Subject Agreement with Other Systems",
                "sec_num": "4.1"
            },
            {
                "text": "Consider again our earlier example, \"The girl by the benches w___.\" Say system C' produces the following options: a) was, b) were, c) with, d) where, e) wrapped. We then attempt to generate a partial parse using the partial sentence with each predicted word, i.e., \"The girl by the benches was,\" \"The girl by the benches were,\" and so on. If, for instance, our parser could not generate a parse for \"The girl by the benches where,\" then we would treat the word choice \"where\" as not approved for the purpose of recalculating subject agreement. So if any subjects had approved its grammaticality (i.e., considered it a grammatical next word), then we counted it as a disagreement (between the parser and the human judge), otherwise, we considered it an agreement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Subject Agreement with Other Systems",
                "sec_num": "4.1"
            },
            {
                "text": "Consider the following example. One partial sentence for System C was \"Japanese farmers immediately pick the shoots which a[m]\u2026\" Only 1 of 14 judges agreed with it. System C' also flagged \"am\" as ungrammatical. Now 13 judges agreed with it.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Subject Agreement with Other Systems",
                "sec_num": "4.1"
            },
            {
                "text": "On the other hand, consider this partial sentence originally from the baseline system, \"The reason we are doing these i[nclude]\u2026\" where 10 judges said yes but our parser could not generate a parse. In this case, A' scores 4 on agreement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Subject Agreement with Other Systems",
                "sec_num": "4.1"
            },
            {
                "text": "Overall, A' overrode 10 decisions and scored 71% agreement as a result. That is a 3% improvement over the baseline 68% score. Nine of the 10 reversed consensus in a positive direction and 1 (example above) reversed consensus in a negative direction. In comparison, C' overrode 6 decisions, and scored 76% (2.0% improvement over the original 74%). Five of 6 cases reversed consensus, all in a positive direction. (The other case reversed a non-consensus in a positive direction.) Given that the theoretical maximum agreements for the two systems are 84% and 86% (i.e., regardless of polarity), there is considerable increase in the subject agreement.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Subject Agreement with Other Systems",
                "sec_num": "4.1"
            },
            {
                "text": "It is worth noting that many subjects made the number agreement mistake due to proximity. In the previous example, \"The reason we are doing these i[nclude]\u2026\", the subjects made the incorrect agreement linking \"include\" to \"these\" instead of linking to \"the reason.\" While these cases are not prevalent, this is one reason (among many) that the theoretical maximum agreement is not 100%.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Subject Agreement with Other Systems",
                "sec_num": "4.1"
            },
            {
                "text": "Although the agreement between the systems and the subjects were high, no system achieved perfect agreement-many words were considered ungrammatical extensions of the partial sentences. We see two possible explanations: 1) the disagreeable output was erroneous; or 2) the disagreeable output was grammatical but judged as ungrammatical under certain conditions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "System's vs. Subjects' Perspective",
                "sec_num": "4.2"
            },
            {
                "text": "We manually examined the parse trees of the \"disagreeable\" cases from our system. Interestingly, in most cases, we found there exists a reasonable parse tree leading to a grammatical sentence. We thus conclude that grammaticality judgements of partial sentences might not completely reflect the underlying improvement of the word completion quality. That is, discrepancies between human and computer judgement need not point to a poor quality syntax filter; instead, it may indicate that the system is exhibiting correct behaviour but simply disagrees with subjects on the particular grammatical cases in question. In such cases, subjects' disagreement with the system does not provide sufficient grounds for making modifications to the system's behaviour. Rather, it is worth examining the factors leading to the subjects' perception of a word as an ungrammatical extension of a partial sentence.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "System's vs. Subjects' Perspective",
                "sec_num": "4.2"
            },
            {
                "text": "Overall, our results indicate that our subjects agree with the grammaticality of word completion more when syntactic filtering is used than not.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "That said, in light of the disagreeable cases, we believe that the quality of word completion may not be so straightforwardly evaluated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "5"
            },
            {
                "text": "Take this example, \"The plane carrying the soldiers a___.\" The next word \"are\" was unanimously considered ungrammatical by our human judges. Consider the following full sentence version of it: \"The plane carrying the soldiers are contemplating is too difficult a task.\" In this case, the subject is \"the plane carrying\" (as an activity), the relative clause is \"the soldiers are contemplating\", and finally, the verb phrase is \"is too difficult a task.\" This sentence may be difficult to interpret but a meaningful interpretation is possible syntactically and semantically.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Selectional Restriction",
                "sec_num": "5.1"
            },
            {
                "text": "Consider the following variation, \"The political situation the soldiers a___.\" In this case, it is not difficult to conceive that \"are\" is a possible next word, as in \"The political situation the soldiers are discussing is getting worse.\" The syntactic construction is [noun phrase] [relative clause] [verb phrase]. Both partial sentences have a potential grammatical parse. Why then is one considered grammatical and the other not?",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Selectional Restriction",
                "sec_num": "5.1"
            },
            {
                "text": "Sentences that induce midpoint reading difficulties in humans are well known in psycholinguistics and are referred to as garden-path sentences (Frazier, 1978) . Reading \"the plane carrying the soldiers\" induces an expectation in the reader's mind that the sentence is about the plane doing the carrying, and not about the carrying of the plane by the soldiers, leading to a \"short circuit\" at the word \"are.\"",
                "cite_spans": [
                    {
                        "start": 143,
                        "end": 158,
                        "text": "(Frazier, 1978)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Selectional Restriction",
                "sec_num": "5.1"
            },
            {
                "text": "In linguistics and CL, one aspect of this phenomenon, selectional restriction, has been explored previously (most notably Levin, 1993 and Resnik, 1995) . Selectional restriction is defined as the semantics of a verb restricting the type of words and phrases that can occur as its arguments. Essentially, the meaning of the verb makes an impact on what is possible syntactically and semantically. What we observe here is a generalized case where it is no longer only about a verb placing syntactic and semantic restrictions on its surrounding words. Instead, we observe how a word or a number of words influencing the semantic interpretation, and in turn impacting on the per-ception of grammaticality of the next word (cf. hit rate issues in section 2).",
                "cite_spans": [
                    {
                        "start": 122,
                        "end": 137,
                        "text": "Levin, 1993 and",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 138,
                        "end": 151,
                        "text": "Resnik, 1995)",
                        "ref_id": "BIBREF18"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Selectional Restriction",
                "sec_num": "5.1"
            },
            {
                "text": "Although our original intent was to study the grammaticality of word completion, ultimately the question is what impacts on the quality of word completion. It is without a doubt that the grammaticality of the next word suggestions impacts on the perception of the quality of word completion. However, we believe the key hinges on whose perspective of quality is considered, which then becomes a usability issue.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approach",
                "sec_num": "5.2"
            },
            {
                "text": "Recall that word completion is designed to aid the writing process. The curious part of our evaluation was that we devised it as a grammaticality judgement task via reading. Is grammaticality different when one is reading vs. writing? We consider this issue in two ways.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Approach",
                "sec_num": "5.2"
            },
            {
                "text": "Let us revisit our garden-path example:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Partial Sentences vs. Full Sentences",
                "sec_num": null
            },
            {
                "text": "1a. The plane carrying the soldiers a[re]\u2026 1b. The plane carrying the soldiers are contemplating is not that difficult a task. 2a. The political situation the soldiers a[re]\u2026 2b. The political situation the soldiers are losing sleep over is getting worse. In sentences 1a and 2a, readers have no choice but to judge the grammaticality of \"are\" based on the existing partial sentence. Depending on the reader's creativity, one may or may not anticipate potential full sentences such as 1b and 2b. In contrast, consider an alternative experimental setup where the readers were offered full sentences such as 1b and 2b and were asked to judge the grammaticality of \"are.\" Given the complexity of the sentences (selectional restriction aside), the readers would have no choice but to consider the existence of a relative clause, which should increase the likelihood of evaluating \"are\" as a grammatical component of the sentence.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Partial Sentences vs. Full Sentences",
                "sec_num": null
            },
            {
                "text": "Now we have observed the potential impact on grammaticality judgements of a potential next word when reading a partial sentence vs. a full sentence. That said, it needs emphasizing that the key issue is to evaluate the quality of a suggested next word given a partial sentence, not grammaticality in complete isolation. When a user uses word completion, he/she is actively engaged in the writing process. No software can truly predict the intent of the writer; the full sentence is waiting to be written and cannot be written a priori.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reading vs. Writing",
                "sec_num": null
            },
            {
                "text": "Consider someone who is in the process of writing the sentence \"The plane carrying the sol-diers\u2026\" Is this writer likely to be debating in his/her head whether the sentence is about the plane that does the carrying or \"plane carrying\" as an activity? Clearly, the writer's intent is clear to the writer him/herself. In contrast, a sentence may be perfectly grammatical and semantically reasonable, yet a reader may still find it ambiguous and/or difficult to read. In other words, the perception of grammaticality of a next word depends on the task (reading vs. writing). This is not to say that our evaluation task is compromised as a result. Despite that the general grammar rules do not change, our reading judgements depending on the context (e.g., partial vs. full sentence) suggests that the reading perspective only provide a partial picture on the quality of output that is intended for a writing task. In our case, higher quality syntactic filtering (e.g., our parser here) may not lead to greater usability.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Reading vs. Writing",
                "sec_num": null
            },
            {
                "text": "In this paper, we have shown that the quality of word completions depends on the perspective one takes. Considering that AAC is to aid someone in producing content for communication, i.e., for third-party consumption, the reading-writing dichotomy is too serious an issue to ignore. This issue has received some CL attention (Morris, 2004 (Morris, , 2010 Hirst, 2008 Hirst, , 2009 but has not been discussed in the AAC literature . The question remains, how do we then evaluate, and more generally, design and use an AAC application?",
                "cite_spans": [
                    {
                        "start": 325,
                        "end": 338,
                        "text": "(Morris, 2004",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 339,
                        "end": 354,
                        "text": "(Morris, , 2010",
                        "ref_id": "BIBREF13"
                    },
                    {
                        "start": 355,
                        "end": 366,
                        "text": "Hirst, 2008",
                        "ref_id": "BIBREF4"
                    },
                    {
                        "start": 367,
                        "end": 380,
                        "text": "Hirst, , 2009",
                        "ref_id": "BIBREF5"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Concluding Remarks",
                "sec_num": "6"
            },
            {
                "text": "We believe the issue is far from clear. Take our current focus-grammaticality of word completion. If the form of the content produced is ungrammatical or difficult to read from the perspective of a reader, you risk having the reader misunderstand the writer's intent. However, from the writer's perspective, unless he/she is perceptive of the interpretation problems with his/her potential readers, there is no incentive to produce content as such; the writer can only produce content based on his/her previous linguistic experience.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Concluding Remarks",
                "sec_num": "6"
            },
            {
                "text": "One may argue that corpus statistics may best capture human linguistic behaviour. For example, hit rate statistics using existing corpora is one such way of assessing the quality of word completion. However, corpora tell only one half of the story-only the writing half is captured, the interpretation issues from the reading side are rarely captured, if at all.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Concluding Remarks",
                "sec_num": "6"
            },
            {
                "text": "More important, the design of word completion is setup in a way that the task consists of both a reading component and a writing one-the appropriateness of suggested words is assessed by the writer via reading during the writing task. In fact, this is not merely a case of reading vs. writing, but rather, an issue of relevance depending on the linguistic context as well as the user's perception of it. Traditionally, researchers in CL and psycholinguistics have attempted to deal with human processing of linguistic content at various levels (cf. the CUNY Conference on Human Sentence Processing, e.g., Merlo and Stevenson, 2002) . However, no computational means is truly privy to the content behind the linguistic form. Content, ultimately, resides in the reader's or the writer's head, i.e., intent. The question remains how best to design AAC to aid someone to communicate this content.",
                "cite_spans": [
                    {
                        "start": 605,
                        "end": 631,
                        "text": "Merlo and Stevenson, 2002)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Concluding Remarks",
                "sec_num": "6"
            },
            {
                "text": "In summary, in our grammaticality judgement task, incorporating syntax in word completion improves the perceived quality of word choices. That said, it is unclear how quality relates to usability. Indeed, the evaluation is far from conclusive in that it only captures the reader's perspective and not the writer's. Currently, we are not aware of the existence of a purely writer-based evaluation for grammaticality of word completion (see Lesher et al., 2002 for one curious attempt). More generally, the reader-writer (or speakerlistener) dichotomy is unexplored in AAC research and should be considered more seriously because communication (as text, speech, or otherwise) involves multiple people producing and consuming content, where the perception of content differs considerably. The challenge of AAC may lie in bridging the gap between production and consumption where communication is neither only about communicating intent nor making interpretations.",
                "cite_spans": [
                    {
                        "start": 439,
                        "end": 458,
                        "text": "Lesher et al., 2002",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Concluding Remarks",
                "sec_num": "6"
            }
        ],
        "back_matter": [
            {
                "text": "This project is funded by Quillsoft Ltd. We also wish to thank Jiafei Niu (University of Toronto) for conducting our usability study and Frank Rudzicz (University of Toronto) for providing helpful comments.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Exploiting Word Frequencies and their Sequential Dependencies",
                "authors": [
                    {
                        "first": "John",
                        "middle": [],
                        "last": "Bentrup",
                        "suffix": ""
                    }
                ],
                "year": 1987,
                "venue": "Proceedings of RESNA 10 th Annual Conference",
                "volume": "",
                "issue": "",
                "pages": "121--122",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "John Bentrup. 1987. Exploiting Word Frequencies and their Sequential Dependencies. Proceedings of RESNA 10 th Annual Conference, 121-122.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Testing the Efficacy of Part-of-Speech Information in Word Completion",
                "authors": [
                    {
                        "first": "Afsaneh",
                        "middle": [],
                        "last": "Fazly",
                        "suffix": ""
                    },
                    {
                        "first": "Graeme",
                        "middle": [],
                        "last": "Hirst",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the 2003 EACL Workshop on Language Modeling for Text Entry Methods",
                "volume": "",
                "issue": "",
                "pages": "9--16",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Afsaneh Fazly and Graeme Hirst. 2003. Testing the Efficacy of Part-of-Speech Information in Word Completion. Proceedings of the 2003 EACL Work- shop on Language Modeling for Text Entry Meth- ods, 9-16.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "On Comprehending Sentences: Syntactic Parsing Strategies",
                "authors": [
                    {
                        "first": "Lyn",
                        "middle": [],
                        "last": "Frazier",
                        "suffix": ""
                    }
                ],
                "year": 1978,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lyn Frazier. 1978. On Comprehending Sentences: Syn- tactic Parsing Strategies. Ph.D. Thesis, University of Connecticut.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "A Swedish Grammar for Word Prediction",
                "authors": [
                    {
                        "first": "Ebba",
                        "middle": [],
                        "last": "Gustavii",
                        "suffix": ""
                    },
                    {
                        "first": "Eva",
                        "middle": [],
                        "last": "Pettersson",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Ebba Gustavii and Eva Pettersson. 2003. A Swedish Grammar for Word Prediction. Master's Thesis, Department of Linguistics, Uppsala University.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "The Future of Text-Meaning in Computational Linguistics",
                "authors": [
                    {
                        "first": "Graeme",
                        "middle": [],
                        "last": "Hirst",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of the 11th International Conference on Text, Speech and Dialogue",
                "volume": "",
                "issue": "",
                "pages": "1--9",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Graeme Hirst. 2008. The Future of Text-Meaning in Computational Linguistics. In Proceedings of the 11th International Conference on Text, Speech and Dialogue, 1-9.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Limitations of the Philosophy of Language Understanding Implicit in Computational Linguistics",
                "authors": [
                    {
                        "first": "Graeme",
                        "middle": [],
                        "last": "Hirst",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proceedings of the Seventh European Conference on Computing and Philosophy",
                "volume": "",
                "issue": "",
                "pages": "108--109",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Graeme Hirst. 2009. Limitations of the Philosophy of Language Understanding Implicit in Computational Linguistics. In Proceedings of the Seventh European Conference on Computing and Philosophy, 108- 109.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Improving Word Prediction Using Markov Models and Heuristic Methods",
                "authors": [
                    {
                        "first": "Sheri",
                        "middle": [],
                        "last": "Hunnicutt",
                        "suffix": ""
                    },
                    {
                        "first": "Johan",
                        "middle": [],
                        "last": "Carlberger",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Augmentative and Alternative Communication",
                "volume": "17",
                "issue": "4",
                "pages": "255--264",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Sheri Hunnicutt and Johan Carlberger. 2001. Improv- ing Word Prediction Using Markov Models and Heuristic Methods. Augmentative and Alternative Communication, 17(4):255-264.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "User Performance with Augmentative Communication Systems: Measurements and Models",
                "authors": [
                    {
                        "first": "Heidi",
                        "middle": [],
                        "last": "Koester",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Heidi Koester. 1994. User Performance with Augmen- tative Communication Systems: Measurements and Models. Ph.D. thesis, University of Michigan.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Limits of Human Word Prediction Performance",
                "authors": [
                    {
                        "first": "Gregory",
                        "middle": [
                            "W"
                        ],
                        "last": "Lesher",
                        "suffix": ""
                    },
                    {
                        "first": "Bryan",
                        "middle": [
                            "J"
                        ],
                        "last": "Moulton",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [
                            "Jeffery"
                        ],
                        "last": "Higginbotham",
                        "suffix": ""
                    },
                    {
                        "first": "Brenna",
                        "middle": [],
                        "last": "Alsofrom",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of 2002 CSUN Conference",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gregory W. Lesher, Bryan J. Moulton, D. Jeffery Higginbotham, and Brenna Alsofrom. 2002. Limits of Human Word Prediction Performance. In Pro- ceedings of 2002 CSUN Conference.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "English Verb Classes and Alternations: A Preliminary Investigation",
                "authors": [
                    {
                        "first": "Beth",
                        "middle": [],
                        "last": "Levin",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Beth Levin. 1993. English Verb Classes and Alterna- tions: A Preliminary Investigation. University of Chicago Press.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "The Intelligent Word Prediction Project",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Kathleen",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Mccoy",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kathleen F. McCoy. 1998. The Intelligent Word Pre- diction Project. University of Delaware.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "The Lexical Basis of Sentence Processing: Formal, Computational and Experimental Issues",
                "authors": [],
                "year": 2002,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Paola Merlo and Suzanne Stevenson, Eds. 2002. The Lexical Basis of Sentence Processing: Formal, Computational and Experimental Issues. John Ben- jamins Publishing Company.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Readers' Interpretations of Lexical Cohesion in Text. Conference of the Canadian Association for Information Science",
                "authors": [
                    {
                        "first": "Jane",
                        "middle": [],
                        "last": "Morris",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jane Morris. 2004. Readers' Interpretations of Lexical Cohesion in Text. Conference of the Canadian As- sociation for Information Science, Winnipeg, Mani- toba.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Individual Differences in the Interpretation of Text: Implications for Information Science",
                "authors": [
                    {
                        "first": "Jane",
                        "middle": [],
                        "last": "Morris",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Journal of the American Society for Information Science and Technology",
                "volume": "61",
                "issue": "1",
                "pages": "141--149",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jane Morris. 2010. Individual Differences in the Inter- pretation of Text: Implications for Information Sci- ence. Journal of the American Society for Informa- tion Science and Technology, 61(1):141-149.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Efficacy of the word prediction algorithm in WordQ",
                "authors": [
                    {
                        "first": "Tom",
                        "middle": [],
                        "last": "Nantais",
                        "suffix": ""
                    },
                    {
                        "first": "Fraser",
                        "middle": [],
                        "last": "Shein",
                        "suffix": ""
                    },
                    {
                        "first": "Mattias",
                        "middle": [],
                        "last": "Johansson",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings of the 2001 RESNA Annual Conference",
                "volume": "",
                "issue": "",
                "pages": "77--79",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Tom Nantais, Fraser Shein, and Mattias Johansson. 2001. Efficacy of the word prediction algorithm in WordQ. In Proceedings of the 2001 RESNA Annual Conference, 77-79.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "RH: A Retro Hybrid Parser",
                "authors": [
                    {
                        "first": "Paula",
                        "middle": [
                            "S"
                        ],
                        "last": "Newman",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the 2007 NAACL Conference, Companion",
                "volume": "",
                "issue": "",
                "pages": "121--124",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Paula S. Newman. 2007. RH: A Retro Hybrid Parser. In Proceedings of the 2007 NAACL Conference, Companion, 121-124.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Diagnostic Evaluation Measures for Improving Performance of Word Prediction Systems",
                "authors": [
                    {
                        "first": "Alfred",
                        "middle": [],
                        "last": "Renaud",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Alfred Renaud. 2002. Diagnostic Evaluation Measures for Improving Performance of Word Prediction Sys- tems. Master's Thesis, School of Computer Science, University of Waterloo.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "A Symbolic Approach to Parsing in the Context of Word Completion",
                "authors": [
                    {
                        "first": "Alfred",
                        "middle": [],
                        "last": "Renaud",
                        "suffix": ""
                    },
                    {
                        "first": "Fraser",
                        "middle": [],
                        "last": "Shein",
                        "suffix": ""
                    },
                    {
                        "first": "Vivian",
                        "middle": [],
                        "last": "Tsang",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Preparation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Alfred Renaud, Fraser Shein, and Vivian Tsang. 2010. A Symbolic Approach to Parsing in the Context of Word Completion. In Preparation.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "Selectional Constraints: An Information-Theoretic Model and its Computational Realization",
                "authors": [
                    {
                        "first": "Philip",
                        "middle": [],
                        "last": "Resnik",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Cognition",
                "volume": "61",
                "issue": "",
                "pages": "127--125",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Philip Resnik. 1995. Selectional Constraints: An In- formation-Theoretic Model and its Computational Realization. Cognition, 61:127-125.",
                "links": null
            },
            "BIBREF19": {
                "ref_id": "b19",
                "title": "Evaluating the Benefits of Displaying Word Prediction Lists on a Personal Digital Assistant at the Keyboard Level",
                "authors": [
                    {
                        "first": "Cynthia",
                        "middle": [],
                        "last": "Tam",
                        "suffix": ""
                    },
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Wells",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Assistive Technology",
                "volume": "21",
                "issue": "",
                "pages": "105--114",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Cynthia Tam and David Wells. 2009. Evaluating the Benefits of Displaying Word Prediction Lists on a Personal Digital Assistant at the Keyboard Level. Assistive Technology, 21:105-114.",
                "links": null
            },
            "BIBREF20": {
                "ref_id": "b20",
                "title": "An Ecological Perspective of Communication With or Without AAC Use",
                "authors": [
                    {
                        "first": "Vivian",
                        "middle": [],
                        "last": "Tsang",
                        "suffix": ""
                    },
                    {
                        "first": "Kelvin",
                        "middle": [],
                        "last": "Leung",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Vivian Tsang and Kelvin Leung. 2010. An Ecological Perspective of Communication With or Without AAC Use. In Preparation.",
                "links": null
            },
            "BIBREF21": {
                "ref_id": "b21",
                "title": "Syntactic Pre-Processing in Single-Word Prediction for Disabled People",
                "authors": [
                    {
                        "first": "Matthew",
                        "middle": [],
                        "last": "Wood",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Matthew Wood. 1996. Syntactic Pre-Processing in Single-Word Prediction for Disabled People. Ph.D. Thesis, Department of Computer Science, Univer- sity of Bristol.",
                "links": null
            },
            "BIBREF22": {
                "ref_id": "b22",
                "title": "Transition Network Grammars for Natural Language Analysis",
                "authors": [
                    {
                        "first": "William",
                        "middle": [],
                        "last": "Woods",
                        "suffix": ""
                    }
                ],
                "year": 1970,
                "venue": "Communications of the ACM",
                "volume": "13",
                "issue": "10",
                "pages": "591--606",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "William Woods. 1970. Transition Network Grammars for Natural Language Analysis. Communications of the ACM, 13(10):591-606.",
                "links": null
            }
        },
        "ref_entries": {}
    }
}