poccio commited on
Commit
d1f912d
1 Parent(s): cf31151

initial commit

Browse files
Files changed (1) hide show
  1. app.py +121 -123
app.py CHANGED
@@ -54,136 +54,134 @@ def main(
54
  unsafe_allow_html=True,
55
  )
56
 
57
- def model_demo():
58
- @st.cache(allow_output_mutation=True)
59
- def load_resources(inventory_path):
60
-
61
- # load nlp
62
- nlp = spacy.load("en_core_web_sm")
63
- extend_config = dict(
64
- checkpoint_path=model_checkpoint_path,
65
- mentions_inventory_path=inventory_path,
66
- device=cuda_device,
67
- tokens_per_batch=10_000,
68
- )
69
- nlp.add_pipe("extend", after="ner", config=extend_config)
70
-
71
- # mock call to load resources
72
- nlp(examples[0])
73
-
74
- # return
75
- return nlp
76
-
77
- # read input
78
- placeholder = st.selectbox(
79
- "Examples",
80
- options=examples,
81
- index=0,
82
- )
83
- input_text = st.text_area("Input text to entity-disambiguate", placeholder)
84
-
85
- # custom inventory
86
- uploaded_inventory_path = st.file_uploader(
87
- "[Optional] Upload custom inventory (tsv file, mention \\t desc1 \\t desc2 \\t)",
88
- accept_multiple_files=False,
89
- type=["tsv"],
90
- )
91
- if uploaded_inventory_path is not None:
92
- inventory_path = f"data/inventories/{uploaded_inventory_path.name}"
93
- with open(inventory_path, "wb") as f:
94
- f.write(uploaded_inventory_path.getbuffer())
95
- else:
96
- inventory_path = default_inventory_path
97
-
98
- # load model and color generator
99
- nlp = load_resources(inventory_path)
100
- color_generator = get_md_200_random_color_generator()
101
-
102
- if st.button("Disambiguate", key="classify"):
103
-
104
- # tag sentence
105
- time_start = time.perf_counter()
106
- doc = nlp(input_text)
107
- time_end = time.perf_counter()
108
-
109
- # extract entities
110
- entities = {}
111
- for ent in doc.ents:
112
- if ent._.disambiguated_entity is not None:
113
- entities[ent.start_char] = (
114
- ent.start_char,
115
- ent.end_char,
116
- ent.text,
117
- ent._.disambiguated_entity,
118
- )
119
-
120
- # create annotated html components
121
-
122
- annotated_html_components = []
123
-
124
- assert all(any(t.idx == _s for t in doc) for _s in entities)
125
- it = iter(list(doc))
126
- while True:
127
- try:
128
- t = next(it)
129
- except StopIteration:
130
- break
131
- if t.idx in entities:
132
- _start, _end, _text, _entity = entities[t.idx]
133
- while t.idx + len(t) != _end:
134
- t = next(it)
135
- annotated_html_components.append(
136
- str(annotation(*(_text, _entity, color_generator())))
137
- )
138
- else:
139
- annotated_html_components.append(str(html.escape(t.text)))
140
-
141
- st.markdown(
142
- "\n".join(
143
- [
144
- "<div>",
145
- *annotated_html_components,
146
- "<p></p>"
147
- f'<div style="text-align: right"><p style="color: gray">Time: {(time_end - time_start):.2f}s</p></div>'
148
- "</div>",
149
- ]
150
- ),
151
- unsafe_allow_html=True,
152
- )
153
-
154
- def hiw():
155
- st.markdown("ExtEnD frames Entity Disambiguation as a text extraction problem:")
156
- st.image(
157
- "data/repo-assets/extend_formulation.png", caption="ExtEnD Formulation"
158
- )
159
- st.markdown(
160
- """
161
- Given the sentence *After a long fight Superman saved Metropolis*, where *Superman* is the mention
162
- to disambiguate, ExtEnD first concatenates the descriptions of all the possible candidates of *Superman* in the
163
- inventory and then selects the span whose description best suits the mention in its context.
164
-
165
- To convert this task to end2end entity linking, as we do in *Model demo*, we leverage spaCy
166
- (more specifically, its NER) and run ExtEnD on each named entity spaCy identifies
167
- (if the corresponding mention is contained in the inventory).
168
  """
169
- )
170
 
171
- def abstract():
172
- st.write(
173
- """
174
- Local models for Entity Disambiguation (ED) have today become extremely powerful, in most part thanks to the advent of large pre-trained language models. However, despite their significant performance achievements, most of these approaches frame ED through classification formulations that have intrinsic limitations, both computationally and from a modeling perspective. In contrast with this trend, here we propose EXTEND, a novel local formulation for ED where we frame this task as a text extraction problem, and present two Transformer-based architectures that implement it. Based on experiments in and out of domain, and training over two different data regimes, we find our approach surpasses all its competitors in terms of both data efficiency and raw performance. EXTEND outperforms its alternatives by as few as 6 F 1 points on the more constrained of the two data regimes and, when moving to the other higher-resourced regime, sets a new state of the art on 4 out of 6 benchmarks under consideration, with average improvements of 0.7 F 1 points overall and 1.1 F 1 points out of domain. In addition, to gain better insights from our results, we also perform a fine-grained evaluation of our performances on different classes of label frequency, along with an ablation study of our architectural choices and an error analysis. We release our code and models for research purposes at https:// github.com/SapienzaNLP/extend.
 
 
175
 
176
- Link to full paper: https://www.researchgate.net/publication/359392427_ExtEnD_Extractive_Entity_Disambiguation
177
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  )
 
 
 
 
 
 
 
179
 
180
- tabs = dict(
181
- model=("Model demo", model_demo),
182
- hiw=("How it works", hiw),
183
- abstract=("Abstract", abstract),
 
184
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
- tabbed_navigation(tabs, "model")
187
 
188
 
189
  if __name__ == "__main__":
 
54
  unsafe_allow_html=True,
55
  )
56
 
57
+ # description
58
+ st.write(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  """
60
+ Local models for Entity Disambiguation (ED) have today become extremely powerful, in most part thanks to the advent of large pre-trained language models. However, despite their significant performance achievements, most of these approaches frame ED through classification formulations that have intrinsic limitations, both computationally and from a modeling perspective. In contrast with this trend, here we propose EXTEND, a novel local formulation for ED where we frame this task as a text extraction problem, and present two Transformer-based architectures that implement it. Based on experiments in and out of domain, and training over two different data regimes, we find our approach surpasses all its competitors in terms of both data efficiency and raw performance. EXTEND outperforms its alternatives by as few as 6 F 1 points on the more constrained of the two data regimes and, when moving to the other higher-resourced regime, sets a new state of the art on 4 out of 6 benchmarks under consideration, with average improvements of 0.7 F 1 points overall and 1.1 F 1 points out of domain. In addition, to gain better insights from our results, we also perform a fine-grained evaluation of our performances on different classes of label frequency, along with an ablation study of our architectural choices and an error analysis. We release our code and models for research purposes at https:// github.com/SapienzaNLP/extend.
61
 
62
+ Link to full paper: https://www.researchgate.net/publication/359392427_ExtEnD_Extractive_Entity_Disambiguation
63
+ Link to GitHub paper: https://github.com/SapienzaNLP/extend
64
+ """
65
+ )
66
+ st.markdown("""
67
+ ## How it works
68
 
69
+ ExtEnD frames Entity Disambiguation as a text extraction problem:
70
+ """)
71
+ st.image(
72
+ "data/repo-assets/extend_formulation.png", caption="ExtEnD Formulation"
73
+ )
74
+ st.markdown(
75
+ """
76
+ Given the sentence *After a long fight Superman saved Metropolis*, where *Superman* is the mention
77
+ to disambiguate, ExtEnD first concatenates the descriptions of all the possible candidates of *Superman* in the
78
+ inventory and then selects the span whose description best suits the mention in its context.
79
+
80
+ To convert this task to end2end entity linking, as we do in *Model demo*, we leverage spaCy
81
+ (more specifically, its NER) and run ExtEnD on each named entity spaCy identifies
82
+ (if the corresponding mention is contained in the inventory).
83
+ """
84
+ )
85
+
86
+ # demo
87
+ st.markdown("## Demo")
88
+
89
+ @st.cache(allow_output_mutation=True)
90
+ def load_resources(inventory_path):
91
+
92
+ # load nlp
93
+ nlp = spacy.load("en_core_web_sm")
94
+ extend_config = dict(
95
+ checkpoint_path=model_checkpoint_path,
96
+ mentions_inventory_path=inventory_path,
97
+ device=cuda_device,
98
+ tokens_per_batch=10_000,
99
  )
100
+ nlp.add_pipe("extend", after="ner", config=extend_config)
101
+
102
+ # mock call to load resources
103
+ nlp(examples[0])
104
+
105
+ # return
106
+ return nlp
107
 
108
+ # read input
109
+ placeholder = st.selectbox(
110
+ "Examples",
111
+ options=examples,
112
+ index=0,
113
  )
114
+ input_text = st.text_area("Input text to entity-disambiguate", placeholder)
115
+
116
+ # custom inventory
117
+ uploaded_inventory_path = st.file_uploader(
118
+ "[Optional] Upload custom inventory (tsv file, mention \\t desc1 \\t desc2 \\t)",
119
+ accept_multiple_files=False,
120
+ type=["tsv"],
121
+ )
122
+ if uploaded_inventory_path is not None:
123
+ inventory_path = f"data/inventories/{uploaded_inventory_path.name}"
124
+ with open(inventory_path, "wb") as f:
125
+ f.write(uploaded_inventory_path.getbuffer())
126
+ else:
127
+ inventory_path = default_inventory_path
128
+
129
+ # load model and color generator
130
+ nlp = load_resources(inventory_path)
131
+ color_generator = get_md_200_random_color_generator()
132
+
133
+ if st.button("Disambiguate", key="classify"):
134
+
135
+ # tag sentence
136
+ time_start = time.perf_counter()
137
+ doc = nlp(input_text)
138
+ time_end = time.perf_counter()
139
+
140
+ # extract entities
141
+ entities = {}
142
+ for ent in doc.ents:
143
+ if ent._.disambiguated_entity is not None:
144
+ entities[ent.start_char] = (
145
+ ent.start_char,
146
+ ent.end_char,
147
+ ent.text,
148
+ ent._.disambiguated_entity,
149
+ )
150
+
151
+ # create annotated html components
152
+
153
+ annotated_html_components = []
154
+
155
+ assert all(any(t.idx == _s for t in doc) for _s in entities)
156
+ it = iter(list(doc))
157
+ while True:
158
+ try:
159
+ t = next(it)
160
+ except StopIteration:
161
+ break
162
+ if t.idx in entities:
163
+ _start, _end, _text, _entity = entities[t.idx]
164
+ while t.idx + len(t) != _end:
165
+ t = next(it)
166
+ annotated_html_components.append(
167
+ str(annotation(*(_text, _entity, color_generator())))
168
+ )
169
+ else:
170
+ annotated_html_components.append(str(html.escape(t.text)))
171
+
172
+ st.markdown(
173
+ "\n".join(
174
+ [
175
+ "<div>",
176
+ *annotated_html_components,
177
+ "<p></p>"
178
+ f'<div style="text-align: right"><p style="color: gray">Time: {(time_end - time_start):.2f}s</p></div>'
179
+ "</div>",
180
+ ]
181
+ ),
182
+ unsafe_allow_html=True,
183
+ )
184
 
 
185
 
186
 
187
  if __name__ == "__main__":