File size: 7,094 Bytes
4e38daf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import torch
from torch import nn
import en_core_web_sm
from transformers import AutoModel, TrainingArguments, Trainer, RobertaTokenizer, RobertaModel
from transformers import AutoTokenizer
model_checkpoint = "ehsanaghaei/SecureBERT"
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, add_prefix_space=True)
roberta_model = RobertaModel.from_pretrained(model_checkpoint).to(device)
event_nugget_list = ['B-Phishing',
'I-Phishing',
'O',
'B-DiscoverVulnerability',
'B-Ransom',
'I-Ransom',
'B-Databreach',
'I-DiscoverVulnerability',
'B-PatchVulnerability',
'I-PatchVulnerability',
'I-Databreach']
nlp = en_core_web_sm.load()
pos_spacy_tag_list = ["ADJ","ADP","ADV","AUX","CCONJ","DET","INTJ","NOUN","NUM","PART","PRON","PROPN","PUNCT","SCONJ","SYM","VERB","SPACE","X"]
ner_spacy_tag_list = [bio + entity for entity in list(nlp.get_pipe('ner').labels) for bio in ["B-", "I-"]] + ["O"]
dep_spacy_tag_list = list(nlp.get_pipe("parser").labels)
class CustomRobertaWithPOS(nn.Module):
def __init__(self, num_classes_realis):
super(CustomRobertaWithPOS, self).__init__()
self.num_classes_realis = num_classes_realis
self.pos_embed = nn.Embedding(len(pos_spacy_tag_list), 16)
self.ner_embed = nn.Embedding(len(ner_spacy_tag_list), 8)
self.dep_embed = nn.Embedding(len(dep_spacy_tag_list), 8)
self.depth_embed = nn.Embedding(17, 8)
self.nugget_embed = nn.Embedding(len(event_nugget_list), 8)
self.roberta = roberta_model
self.dropout1 = nn.Dropout(0.2)
self.fc1 = nn.Linear(self.roberta.config.hidden_size + 48, self.num_classes_realis)
def forward(self, input_ids, attention_mask, pos_spacy, ner_spacy, dep_spacy, depth_spacy, ner_tags):
outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask)
last_hidden_output = outputs.last_hidden_state
pos_mask = pos_spacy != -100
pos_embed_masked = self.pos_embed(pos_spacy[pos_mask])
pos_embed = torch.zeros((pos_spacy.shape[0], pos_spacy.shape[1], 16), dtype=torch.float).to(device)
pos_embed[pos_mask] = pos_embed_masked
ner_mask = ner_spacy != -100
ner_embed_masked = self.ner_embed(ner_spacy[ner_mask])
ner_embed = torch.zeros((ner_spacy.shape[0], ner_spacy.shape[1], 8), dtype=torch.float).to(device)
ner_embed[ner_mask] = ner_embed_masked
dep_mask = dep_spacy != -100
dep_embed_masked = self.dep_embed(dep_spacy[dep_mask])
dep_embed = torch.zeros((dep_spacy.shape[0], dep_spacy.shape[1], 8), dtype=torch.float).to(device)
dep_embed[dep_mask] = dep_embed_masked
depth_mask = depth_spacy != -100
depth_embed_masked = self.depth_embed(depth_spacy[depth_mask])
depth_embed = torch.zeros((depth_spacy.shape[0], depth_spacy.shape[1], 8), dtype=torch.float).to(device)
depth_embed[dep_mask] = depth_embed_masked
nugget_mask = ner_tags != -100
nugget_embed_masked = self.nugget_embed(ner_tags[nugget_mask])
nugget_embed = torch.zeros((ner_tags.shape[0], ner_tags.shape[1], 8), dtype=torch.float).to(device)
nugget_embed[dep_mask] = nugget_embed_masked
features_concat = torch.cat((last_hidden_output, pos_embed, ner_embed, dep_embed, depth_embed, nugget_embed), 2).to(device)
features_concat = self.dropout1(features_concat)
features_concat = self.dropout1(features_concat)
logits = self.fc1(features_concat)
return logits
def get_entity_for_realis_from_idx(start_idx, end_idx, event_nuggets):
event_nuggets_idxs = [(nugget["startOffset"], nugget["endOffset"]) for nugget in event_nuggets]
for idx, (nugget_start, nugget_end) in enumerate(event_nuggets_idxs):
if (start_idx == nugget_start and end_idx == nugget_end) or (start_idx == nugget_start and end_idx <= nugget_end) or (start_idx == nugget_start and end_idx > nugget_end) or (end_idx == nugget_end and start_idx < nugget_start) or (start_idx <= nugget_start and end_idx <= nugget_end and end_idx > nugget_start):
return "B-" + event_nuggets[idx]["subtype"]
elif (start_idx > nugget_start and end_idx <= nugget_end) or (start_idx > nugget_start and start_idx < nugget_end):
return "I-" + event_nuggets[idx]["subtype"]
return "O"
def tokenize_and_align_labels_with_pos_ner_realis(examples, tokenizer, ner_names, label_all_tokens = True):
tokenized_inputs = tokenizer(examples["tokens"], padding='max_length', truncation=True, is_split_into_words=True)
#tokenized_inputs.pop('input_ids')
labels = []
nuggets = []
ner_spacy = []
pos_spacy = []
dep_spacy = []
depth_spacy = []
for i, (nugget, pos, ner, dep, depth) in enumerate(zip(examples["ner_tags"], examples["pos_spacy"], examples["ner_spacy"], examples["dep_spacy"], examples["depth_spacy"])):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
nugget_ids = []
ner_spacy_ids = []
pos_spacy_ids = []
dep_spacy_ids = []
depth_spacy_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
nugget_ids.append(-100)
ner_spacy_ids.append(-100)
pos_spacy_ids.append(-100)
dep_spacy_ids.append(-100)
depth_spacy_ids.append(-100)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
nugget_ids.append(nugget[word_idx])
ner_spacy_ids.append(ner[word_idx])
pos_spacy_ids.append(pos[word_idx])
dep_spacy_ids.append(dep[word_idx])
depth_spacy_ids.append(depth[word_idx])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
nugget_ids.append(nugget[word_idx] if label_all_tokens else -100)
ner_spacy_ids.append(ner[word_idx] if label_all_tokens else -100)
pos_spacy_ids.append(pos[word_idx] if label_all_tokens else -100)
dep_spacy_ids.append(dep[word_idx] if label_all_tokens else -100)
depth_spacy_ids.append(depth[word_idx] if label_all_tokens else -100)
previous_word_idx = word_idx
nuggets.append(nugget_ids)
ner_spacy.append(ner_spacy_ids)
pos_spacy.append(pos_spacy_ids)
dep_spacy.append(dep_spacy_ids)
depth_spacy.append(depth_spacy_ids)
tokenized_inputs["ner_tags"] = nuggets
tokenized_inputs["pos_spacy"] = pos_spacy
tokenized_inputs["ner_spacy"] = ner_spacy
tokenized_inputs["dep_spacy"] = dep_spacy
tokenized_inputs["depth_spacy"] = depth_spacy
return tokenized_inputs
|