|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import torch.distributed as dist |
|
|
|
|
|
|
|
|
|
|
|
|
|
import transformers |
|
from transformers import RobertaTokenizer, AutoModel, PreTrainedModel |
|
from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel, RobertaModel, RobertaLMHead |
|
from transformers.models.bert.modeling_bert import BertPreTrainedModel, BertModel, BertLMPredictionHead |
|
from transformers.activations import gelu |
|
from transformers.file_utils import ( |
|
add_code_sample_docstrings, |
|
add_start_docstrings, |
|
add_start_docstrings_to_model_forward, |
|
replace_return_docstrings, |
|
) |
|
from transformers.modeling_outputs import SequenceClassifierOutput, BaseModelOutputWithPoolingAndCrossAttentions |
|
|
|
glm_model = None |
|
|
|
def init_glm(path): |
|
global glm_model |
|
glm_model = AutoModel.from_pretrained(path, trust_remote_code=True).to("cuda:0") |
|
for param in glm_model.parameters(): |
|
param.requires_grad = False |
|
|
|
|
|
|
|
class MLPLayer(nn.Module): |
|
""" |
|
Head for getting sentence representations over RoBERTa/BERT's CLS representation. |
|
""" |
|
|
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
|
|
self.fc = nn.Linear(config.hidden_size, 1536) |
|
self.activation = nn.Tanh() |
|
|
|
def forward(self, features, **kwargs): |
|
x = self.dense(features) |
|
x = self.fc(x) |
|
x = self.activation(x) |
|
|
|
return x |
|
|
|
class Similarity(nn.Module): |
|
""" |
|
Dot product or cosine similarity |
|
""" |
|
|
|
def __init__(self, temp): |
|
super().__init__() |
|
self.temp = temp |
|
self.cos = nn.CosineSimilarity(dim=-1) |
|
|
|
def forward(self, x, y): |
|
return self.cos(x, y) / self.temp |
|
|
|
|
|
class Pooler(nn.Module): |
|
""" |
|
Parameter-free poolers to get the sentence embedding |
|
'cls': [CLS] representation with BERT/RoBERTa's MLP pooler. |
|
'cls_before_pooler': [CLS] representation without the original MLP pooler. |
|
'avg': average of the last layers' hidden states at each token. |
|
'avg_top2': average of the last two layers. |
|
'avg_first_last': average of the first and the last layers. |
|
""" |
|
|
|
def __init__(self, pooler_type): |
|
super().__init__() |
|
self.pooler_type = pooler_type |
|
assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", |
|
"avg_first_last"], "unrecognized pooling type %s" % self.pooler_type |
|
|
|
def forward(self, attention_mask, outputs): |
|
last_hidden = outputs.last_hidden_state |
|
|
|
hidden_states = outputs.hidden_states |
|
|
|
if self.pooler_type in ['cls_before_pooler', 'cls']: |
|
return last_hidden[:, 0] |
|
elif self.pooler_type == "avg": |
|
return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)) |
|
elif self.pooler_type == "avg_first_last": |
|
first_hidden = hidden_states[1] |
|
last_hidden = hidden_states[-1] |
|
pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum( |
|
1) / attention_mask.sum(-1).unsqueeze(-1) |
|
return pooled_result |
|
elif self.pooler_type == "avg_top2": |
|
second_last_hidden = hidden_states[-2] |
|
last_hidden = hidden_states[-1] |
|
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum( |
|
1) / attention_mask.sum(-1).unsqueeze(-1) |
|
return pooled_result |
|
else: |
|
raise NotImplementedError |
|
|
|
|
|
def cl_init(cls, config): |
|
""" |
|
Contrastive learning class init function. |
|
""" |
|
cls.pooler_type = cls.model_args.pooler_type |
|
cls.pooler = Pooler(cls.model_args.pooler_type) |
|
if cls.model_args.pooler_type == "cls": |
|
cls.mlp = MLPLayer(config) |
|
cls.sim = Similarity(temp=cls.model_args.temp) |
|
cls.init_weights() |
|
|
|
|
|
def cl_forward(cls, |
|
encoder, |
|
input_ids=None, |
|
attention_mask=None, |
|
token_type_ids=None, |
|
position_ids=None, |
|
head_mask=None, |
|
inputs_embeds=None, |
|
labels=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
mlm_input_ids=None, |
|
mlm_labels=None, |
|
): |
|
return_dict = return_dict if return_dict is not None else cls.config.use_return_dict |
|
ori_input_ids = input_ids |
|
batch_size = input_ids.size(0) |
|
|
|
|
|
num_sent = input_ids.size(1) |
|
|
|
mlm_outputs = None |
|
|
|
input_ids = input_ids.view((-1, input_ids.size(-1))) |
|
attention_mask = attention_mask.view((-1, attention_mask.size(-1))) |
|
if token_type_ids is not None: |
|
token_type_ids = token_type_ids.view((-1, token_type_ids.size(-1))) |
|
|
|
if inputs_embeds is not None: |
|
input_ids = None |
|
|
|
|
|
outputs = encoder( |
|
input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
output_attentions=output_attentions, |
|
output_hidden_states=True if cls.model_args.pooler_type in ['avg_top2', 'avg_first_last'] else False, |
|
return_dict=True, |
|
) |
|
|
|
|
|
if mlm_input_ids is not None: |
|
mlm_input_ids = mlm_input_ids.view((-1, mlm_input_ids.size(-1))) |
|
mlm_outputs = encoder( |
|
mlm_input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
output_attentions=output_attentions, |
|
output_hidden_states=True if cls.model_args.pooler_type in ['avg_top2', 'avg_first_last'] else False, |
|
return_dict=True, |
|
) |
|
|
|
|
|
pooler_output = cls.pooler(attention_mask, outputs) |
|
pooler_output = pooler_output.view((batch_size, num_sent, pooler_output.size(-1))) |
|
|
|
|
|
if cls.pooler_type == "cls": |
|
|
|
pooler_output = cls.mlp(pooler_output) |
|
|
|
|
|
z1, z2 = pooler_output[:, 0], pooler_output[:, 1] |
|
|
|
|
|
|
|
tensor_left, tensor_right = simcse.mse_loss.giveMeBatchEmbeddings(simcse.mse_loss.global_num, |
|
simcse.readEmbeddings.data) |
|
simcse.mse_loss.global_num += 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if num_sent == 3: |
|
z3 = pooler_output[:, 2] |
|
|
|
|
|
if dist.is_initialized() and cls.training: |
|
|
|
if num_sent >= 3: |
|
z3_list = [torch.zeros_like(z3) for _ in range(dist.get_world_size())] |
|
dist.all_gather(tensor_list=z3_list, tensor=z3.contiguous()) |
|
z3_list[dist.get_rank()] = z3 |
|
z3 = torch.cat(z3_list, 0) |
|
|
|
|
|
z1_list = [torch.zeros_like(z1) for _ in range(dist.get_world_size())] |
|
z2_list = [torch.zeros_like(z2) for _ in range(dist.get_world_size())] |
|
|
|
dist.all_gather(tensor_list=z1_list, tensor=z1.contiguous()) |
|
dist.all_gather(tensor_list=z2_list, tensor=z2.contiguous()) |
|
|
|
|
|
|
|
z1_list[dist.get_rank()] = z1 |
|
z2_list[dist.get_rank()] = z2 |
|
|
|
z1 = torch.cat(z1_list, 0) |
|
z2 = torch.cat(z2_list, 0) |
|
|
|
ziang_loss = F.mse_loss(z1, tensor_left) + F.mse_loss(z2, tensor_right) |
|
|
|
|
|
softmax_row, softmax_col = simcse.mse_loss.giveMeMatrix(tensor_left, tensor_right) |
|
softmax_row_model, softmax_col_model = simcse.mse_loss.giveMeMatrix(z1,z2) |
|
|
|
ziang_labels = torch.tensor([i for i in range(32)], device='cuda:0') |
|
|
|
""" |
|
this is cross entropy loss |
|
""" |
|
row_loss = F.cross_entropy(softmax_row, ziang_labels) |
|
col_loss = F.cross_entropy(softmax_col, ziang_labels) |
|
softmax_loss = (row_loss + col_loss) / 2 |
|
|
|
""" |
|
this is KL div loss |
|
""" |
|
KL_row_loss = F.kl_div(softmax_row_model.log(), softmax_row, reduction='batchmean') |
|
KL_col_loss = F.kl_div(softmax_col_model.log(), softmax_col, reduction='batchmean') |
|
KL_loss = (KL_row_loss + KL_col_loss) / 2 |
|
|
|
ziang_loss = KL_loss + ziang_loss + softmax_loss |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cos_sim = cls.sim(z1.unsqueeze(1), z2.unsqueeze(0)) |
|
|
|
|
|
if num_sent >= 3: |
|
z1_z3_cos = cls.sim(z1.unsqueeze(1), z3.unsqueeze(0)) |
|
cos_sim = torch.cat([cos_sim, z1_z3_cos], 1) |
|
|
|
labels = torch.arange(cos_sim.size(0)).long().to(cls.device) |
|
loss_fct = nn.CrossEntropyLoss() |
|
|
|
|
|
if num_sent == 3: |
|
|
|
z3_weight = cls.model_args.hard_negative_weight |
|
weights = torch.tensor( |
|
[[0.0] * (cos_sim.size(-1) - z1_z3_cos.size(-1)) + [0.0] * i + [z3_weight] + [0.0] * ( |
|
z1_z3_cos.size(-1) - i - 1) for i in range(z1_z3_cos.size(-1))] |
|
).to(cls.device) |
|
cos_sim = cos_sim + weights |
|
|
|
loss = loss_fct(cos_sim, labels) |
|
|
|
|
|
if mlm_outputs is not None and mlm_labels is not None: |
|
mlm_labels = mlm_labels.view(-1, mlm_labels.size(-1)) |
|
prediction_scores = cls.lm_head(mlm_outputs.last_hidden_state) |
|
masked_lm_loss = loss_fct(prediction_scores.view(-1, cls.config.vocab_size), mlm_labels.view(-1)) |
|
loss = loss + cls.model_args.mlm_weight * masked_lm_loss |
|
|
|
if not return_dict: |
|
output = (cos_sim,) + outputs[2:] |
|
return ((loss,) + output) if loss is not None else output |
|
|
|
|
|
|
|
return SequenceClassifierOutput( |
|
|
|
loss=ziang_loss, |
|
logits=cos_sim, |
|
hidden_states=outputs.hidden_states, |
|
|
|
) |
|
|
|
|
|
def sentemb_forward( |
|
cls, |
|
encoder, |
|
input_ids=None, |
|
attention_mask=None, |
|
token_type_ids=None, |
|
position_ids=None, |
|
head_mask=None, |
|
inputs_embeds=None, |
|
labels=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
): |
|
return_dict = return_dict if return_dict is not None else cls.config.use_return_dict |
|
|
|
if inputs_embeds is not None: |
|
input_ids = None |
|
|
|
outputs = encoder( |
|
input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
output_attentions=output_attentions, |
|
output_hidden_states=True if cls.pooler_type in ['avg_top2', 'avg_first_last'] else False, |
|
return_dict=True, |
|
) |
|
|
|
pooler_output = cls.pooler(attention_mask, outputs) |
|
if cls.pooler_type == "cls" and not cls.model_args.mlp_only_train: |
|
pooler_output = cls.mlp(pooler_output) |
|
|
|
if not return_dict: |
|
return (outputs[0], pooler_output) + outputs[2:] |
|
|
|
return BaseModelOutputWithPoolingAndCrossAttentions( |
|
pooler_output=pooler_output, |
|
last_hidden_state=outputs.last_hidden_state, |
|
hidden_states=outputs.hidden_states, |
|
) |
|
|
|
|
|
class BertForCL(BertPreTrainedModel): |
|
_keys_to_ignore_on_load_missing = [r"position_ids"] |
|
|
|
def __init__(self, config, *model_args, **model_kargs): |
|
super().__init__(config) |
|
self.model_args = model_kargs["model_args"] |
|
self.bert = BertModel(config, add_pooling_layer=False) |
|
|
|
if self.model_args.do_mlm: |
|
self.lm_head = BertLMPredictionHead(config) |
|
|
|
if self.model_args.init_embeddings_model: |
|
if "glm" in self.model_args.init_embeddings_model: |
|
init_glm(self.model_args.init_embeddings_model) |
|
self.fc = nn.Linear(glm_model.config.hidden_size, config.hidden_size) |
|
else: |
|
raise NotImplementedError |
|
|
|
cl_init(self, config) |
|
|
|
def forward(self, |
|
input_ids=None, |
|
attention_mask=None, |
|
token_type_ids=None, |
|
position_ids=None, |
|
head_mask=None, |
|
inputs_embeds=None, |
|
labels=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
sent_emb=False, |
|
mlm_input_ids=None, |
|
mlm_labels=None, |
|
): |
|
if self.model_args.init_embeddings_model: |
|
input_ids_for_glm = input_ids.view((-1, input_ids.size(-1))) |
|
attention_mask_for_glm = attention_mask.view((-1, attention_mask.size(-1))) |
|
if token_type_ids is not None: |
|
token_type_ids_for_glm = token_type_ids.view((-1, token_type_ids.size(-1))) |
|
|
|
outputs_from_glm = glm_model(input_ids_for_glm, |
|
attention_mask=attention_mask_for_glm, |
|
token_type_ids=token_type_ids_for_glm, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
labels=labels, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
|
|
inputs_embeds = self.fc(outputs_from_glm.last_hidden_state) |
|
|
|
if sent_emb: |
|
return sentemb_forward(self, self.bert, |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
labels=labels, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
else: |
|
return cl_forward(self, self.bert, |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
labels=labels, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
mlm_input_ids=mlm_input_ids, |
|
mlm_labels=mlm_labels, |
|
) |
|
|
|
|
|
class RobertaForCL(RobertaPreTrainedModel): |
|
_keys_to_ignore_on_load_missing = [r"position_ids"] |
|
|
|
def __init__(self, config, *model_args, **model_kargs): |
|
super().__init__(config) |
|
self.model_args = model_kargs["model_args"] |
|
self.roberta = RobertaModel(config, add_pooling_layer=False) |
|
|
|
if self.model_args.do_mlm: |
|
self.lm_head = RobertaLMHead(config) |
|
|
|
if self.model_args.init_embeddings_model: |
|
if "glm" in self.model_args.init_embeddings_model: |
|
init_glm(self.model_args.init_embeddings_model) |
|
self.fc = nn.Linear(glm_model.config.hidden_size, config.hidden_size) |
|
else: |
|
raise NotImplementedError |
|
|
|
cl_init(self, config) |
|
|
|
def forward(self, |
|
input_ids=None, |
|
attention_mask=None, |
|
token_type_ids=None, |
|
position_ids=None, |
|
head_mask=None, |
|
inputs_embeds=None, |
|
labels=None, |
|
output_attentions=None, |
|
output_hidden_states=None, |
|
return_dict=None, |
|
sent_emb=False, |
|
mlm_input_ids=None, |
|
mlm_labels=None, |
|
): |
|
|
|
if self.model_args.init_embeddings_model and not sent_emb: |
|
input_ids_for_glm = input_ids.view((-1, input_ids.size(-1))) |
|
attention_mask_for_glm = attention_mask.view((-1, attention_mask.size(-1))) |
|
if token_type_ids is not None: |
|
token_type_ids_for_glm = token_type_ids.view((-1, token_type_ids.size(-1))) |
|
|
|
outputs_from_glm = glm_model(input_ids_for_glm, |
|
attention_mask=attention_mask_for_glm, |
|
token_type_ids=token_type_ids_for_glm, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
labels=labels, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
|
|
inputs_embeds = self.fc(outputs_from_glm.last_hidden_state) |
|
|
|
if sent_emb: |
|
return sentemb_forward(self, self.roberta, |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
labels=labels, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
else: |
|
return cl_forward(self, self.roberta, |
|
input_ids=input_ids, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
labels=labels, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
mlm_input_ids=mlm_input_ids, |
|
mlm_labels=mlm_labels, |
|
) |
|
|
|
|