Ankur Goyal
commited on
Commit
•
1f7bede
1
Parent(s):
02daaaf
Remove model in anticipation of PR #18407
Browse files- config.json +0 -4
- configuration_layoutlm.py +0 -3
- modeling_layoutlm.py +0 -147
config.json
CHANGED
@@ -3,10 +3,6 @@
|
|
3 |
"architectures": [
|
4 |
"LayoutLMForQuestionAnswering"
|
5 |
],
|
6 |
-
"auto_map": {
|
7 |
-
"AutoConfig": "configuration_layoutlm.LayoutLMConfig",
|
8 |
-
"AutoModelForQuestionAnswering": "modeling_layoutlm.LayoutLMForQuestionAnswering"
|
9 |
-
},
|
10 |
"custom_pipelines": {
|
11 |
"document-question-answering": {
|
12 |
"impl": "pipeline_document_question_answering.DocumentQuestionAnsweringPipeline",
|
|
|
3 |
"architectures": [
|
4 |
"LayoutLMForQuestionAnswering"
|
5 |
],
|
|
|
|
|
|
|
|
|
6 |
"custom_pipelines": {
|
7 |
"document-question-answering": {
|
8 |
"impl": "pipeline_document_question_answering.DocumentQuestionAnsweringPipeline",
|
configuration_layoutlm.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
# This model just uses the existing LayoutLMConfig which is just imported
|
2 |
-
# as a thin wrapper
|
3 |
-
from transformers.models.layoutlm.configuration_layoutlm import LayoutLMConfig
|
|
|
|
|
|
|
|
modeling_layoutlm.py
DELETED
@@ -1,147 +0,0 @@
|
|
1 |
-
# NOTE: This code is currently under review for inclusion in the main
|
2 |
-
# huggingface/transformers repository:
|
3 |
-
# https://github.com/huggingface/transformers/pull/18407
|
4 |
-
""" PyTorch LayoutLM model."""
|
5 |
-
|
6 |
-
|
7 |
-
import math
|
8 |
-
from typing import Optional, Tuple, Union
|
9 |
-
|
10 |
-
import torch
|
11 |
-
from torch import nn
|
12 |
-
from torch.nn import CrossEntropyLoss
|
13 |
-
|
14 |
-
from transformers.modeling_outputs import QuestionAnsweringModelOutput
|
15 |
-
from transformers.models.layoutlm import LayoutLMModel, LayoutLMPreTrainedModel
|
16 |
-
|
17 |
-
|
18 |
-
class LayoutLMForQuestionAnswering(LayoutLMPreTrainedModel):
|
19 |
-
def __init__(self, config, has_visual_segment_embedding=True):
|
20 |
-
super().__init__(config)
|
21 |
-
self.num_labels = config.num_labels
|
22 |
-
|
23 |
-
self.layoutlm = LayoutLMModel(config)
|
24 |
-
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
|
25 |
-
|
26 |
-
# Initialize weights and apply final processing
|
27 |
-
self.post_init()
|
28 |
-
|
29 |
-
def get_input_embeddings(self):
|
30 |
-
return self.layoutlm.embeddings.word_embeddings
|
31 |
-
|
32 |
-
def forward(
|
33 |
-
self,
|
34 |
-
input_ids: Optional[torch.LongTensor] = None,
|
35 |
-
bbox: Optional[torch.LongTensor] = None,
|
36 |
-
attention_mask: Optional[torch.FloatTensor] = None,
|
37 |
-
token_type_ids: Optional[torch.LongTensor] = None,
|
38 |
-
position_ids: Optional[torch.LongTensor] = None,
|
39 |
-
head_mask: Optional[torch.FloatTensor] = None,
|
40 |
-
inputs_embeds: Optional[torch.FloatTensor] = None,
|
41 |
-
start_positions: Optional[torch.LongTensor] = None,
|
42 |
-
end_positions: Optional[torch.LongTensor] = None,
|
43 |
-
output_attentions: Optional[bool] = None,
|
44 |
-
output_hidden_states: Optional[bool] = None,
|
45 |
-
return_dict: Optional[bool] = None,
|
46 |
-
) -> Union[Tuple, QuestionAnsweringModelOutput]:
|
47 |
-
r"""
|
48 |
-
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
49 |
-
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
50 |
-
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
51 |
-
are not taken into account for computing the loss.
|
52 |
-
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
53 |
-
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
54 |
-
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
55 |
-
are not taken into account for computing the loss.
|
56 |
-
|
57 |
-
Returns:
|
58 |
-
|
59 |
-
Example:
|
60 |
-
|
61 |
-
In this example below, we give the LayoutLMv2 model an image (of texts) and ask it a question. It will give us
|
62 |
-
a prediction of what it thinks the answer is (the span of the answer within the texts parsed from the image).
|
63 |
-
|
64 |
-
```python
|
65 |
-
>>> from transformers import AutoTokenizer, LayoutLMForQuestionAnswering
|
66 |
-
>>> from datasets import load_dataset
|
67 |
-
>>> import torch
|
68 |
-
|
69 |
-
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased", add_prefix_space=True)
|
70 |
-
>>> model = LayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased")
|
71 |
-
|
72 |
-
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
|
73 |
-
>>> example = dataset[0]
|
74 |
-
>>> question = "what's his name?"
|
75 |
-
>>> words = example["tokens"]
|
76 |
-
>>> boxes = example["bboxes"]
|
77 |
-
|
78 |
-
>>> encoding = tokenizer(
|
79 |
-
... question.split(), words, is_split_into_words=True, return_token_type_ids=True, return_tensors="pt"
|
80 |
-
... )
|
81 |
-
>>> bbox = []
|
82 |
-
>>> for i, s, w in zip(encoding.input_ids[0], encoding.sequence_ids(0), encoding.word_ids(0)):
|
83 |
-
... if s == 1:
|
84 |
-
... bbox.append(boxes[w])
|
85 |
-
... elif i == tokenizer.sep_token_id:
|
86 |
-
... bbox.append([1000] * 4)
|
87 |
-
... else:
|
88 |
-
... bbox.append([0] * 4)
|
89 |
-
>>> encoding["bbox"] = torch.tensor([bbox])
|
90 |
-
|
91 |
-
>>> outputs = model(**encoding)
|
92 |
-
>>> loss = outputs.loss
|
93 |
-
>>> start_scores = outputs.start_logits
|
94 |
-
>>> end_scores = outputs.end_logits
|
95 |
-
```
|
96 |
-
"""
|
97 |
-
|
98 |
-
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
99 |
-
|
100 |
-
outputs = self.layoutlm(
|
101 |
-
input_ids=input_ids,
|
102 |
-
bbox=bbox,
|
103 |
-
attention_mask=attention_mask,
|
104 |
-
token_type_ids=token_type_ids,
|
105 |
-
position_ids=position_ids,
|
106 |
-
head_mask=head_mask,
|
107 |
-
inputs_embeds=inputs_embeds,
|
108 |
-
output_attentions=output_attentions,
|
109 |
-
output_hidden_states=output_hidden_states,
|
110 |
-
return_dict=return_dict,
|
111 |
-
)
|
112 |
-
|
113 |
-
sequence_output = outputs[0]
|
114 |
-
|
115 |
-
logits = self.qa_outputs(sequence_output)
|
116 |
-
start_logits, end_logits = logits.split(1, dim=-1)
|
117 |
-
start_logits = start_logits.squeeze(-1).contiguous()
|
118 |
-
end_logits = end_logits.squeeze(-1).contiguous()
|
119 |
-
|
120 |
-
total_loss = None
|
121 |
-
if start_positions is not None and end_positions is not None:
|
122 |
-
# If we are on multi-GPU, split add a dimension
|
123 |
-
if len(start_positions.size()) > 1:
|
124 |
-
start_positions = start_positions.squeeze(-1)
|
125 |
-
if len(end_positions.size()) > 1:
|
126 |
-
end_positions = end_positions.squeeze(-1)
|
127 |
-
# sometimes the start/end positions are outside our model inputs, we ignore these terms
|
128 |
-
ignored_index = start_logits.size(1)
|
129 |
-
start_positions = start_positions.clamp(0, ignored_index)
|
130 |
-
end_positions = end_positions.clamp(0, ignored_index)
|
131 |
-
|
132 |
-
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
133 |
-
start_loss = loss_fct(start_logits, start_positions)
|
134 |
-
end_loss = loss_fct(end_logits, end_positions)
|
135 |
-
total_loss = (start_loss + end_loss) / 2
|
136 |
-
|
137 |
-
if not return_dict:
|
138 |
-
output = (start_logits, end_logits) + outputs[2:]
|
139 |
-
return ((total_loss,) + output) if total_loss is not None else output
|
140 |
-
|
141 |
-
return QuestionAnsweringModelOutput(
|
142 |
-
loss=total_loss,
|
143 |
-
start_logits=start_logits,
|
144 |
-
end_logits=end_logits,
|
145 |
-
hidden_states=outputs.hidden_states,
|
146 |
-
attentions=outputs.attentions,
|
147 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|