File size: 1,736 Bytes
f73dc21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import pandas as pd
import torch
from transformers import AutoTokenizer, AutoModel, set_seed
from tqdm import tqdm

from model import MimicTransformer
set_seed(42)

def read_model(model, path):
    model.load_state_dict(torch.load(path, map_location=torch.device('cuda')), strict=False)
    return model

model_path = 'checkpoint_0_9113.bin'
mimic = MimicTransformer(cutoff=512)
mimic = read_model(model=mimic, path=model_path)
mimic.eval()
mimic.cuda()
tokenizer = mimic.tokenizer

summaries = pd.read_csv('all_summaries.csv')['SUMMARIES']

def mean_pooling(model_output, attention_mask):
    token_embeddings = model_output[0] #First element of model_output contains all token embeddings
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

def get_model_outputs(text):
    inputs = tokenizer(text, return_tensors='pt', padding='max_length', max_length=512, truncation=True).to('cuda')
    outputs = mimic(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, drg_labels=None)
    # pooled = mean_pooling(outputs[0][0], inputs['attention_mask'])
    pooled = outputs[0][0]
    normalized = pooled/pooled.norm(dim=1)[:,None]
    return normalized

return_tensors = torch.zeros(size=(10000, 738))

for i, summary in tqdm(enumerate(summaries[:10000])):
    res = get_model_outputs(text=summary)
    return_tensors[i, :] = res.detach().cpu()
# sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
# sentence_embeddings = sentence_embeddings/sentence_embeddings.norm(dim=1)[:,None]

torch.save(return_tensors, f='discharge_embeddings.pt')