Tom Aarsen
commited on
Commit
•
cc70901
1
Parent(s):
74d4380
Add return_dict to NomicBertModel; required for Sentence Transformers
Browse files
modeling_hf_nomic_bert.py
CHANGED
@@ -1069,6 +1069,7 @@ class NomicBertModel(NomicBertPreTrainedModel):
|
|
1069 |
position_ids=None,
|
1070 |
token_type_ids=None,
|
1071 |
attention_mask=None,
|
|
|
1072 |
):
|
1073 |
if token_type_ids is None:
|
1074 |
token_type_ids = torch.zeros_like(input_ids)
|
@@ -1080,7 +1081,7 @@ class NomicBertModel(NomicBertPreTrainedModel):
|
|
1080 |
|
1081 |
attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.shape)
|
1082 |
sequence_output = self.encoder(
|
1083 |
-
hidden_states, attention_mask=attention_mask
|
1084 |
)
|
1085 |
|
1086 |
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|
|
|
1069 |
position_ids=None,
|
1070 |
token_type_ids=None,
|
1071 |
attention_mask=None,
|
1072 |
+
return_dict=None,
|
1073 |
):
|
1074 |
if token_type_ids is None:
|
1075 |
token_type_ids = torch.zeros_like(input_ids)
|
|
|
1081 |
|
1082 |
attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.shape)
|
1083 |
sequence_output = self.encoder(
|
1084 |
+
hidden_states, attention_mask=attention_mask, return_dict=return_dict,
|
1085 |
)
|
1086 |
|
1087 |
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
|