ukr-models commited on
Commit
4a3e459
1 Parent(s): f6b2ab2

Upload get_predictions.py

Browse files
Files changed (1) hide show
  1. get_predictions.py +60 -0
get_predictions.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tokenize_uk
2
+ import torch
3
+
4
+ def get_word_predictions(model, tokenizer, texts, is_split_to_words=False, device='cpu'):
5
+ words_res = []
6
+ y_res = []
7
+
8
+ if not is_split_to_words:
9
+ texts = [tokenize_uk.tokenize_words(text) for text in texts]
10
+
11
+ for text in texts:
12
+ size = len(text)
13
+ idx_list = [idx + 1 for idx, val in enumerate(text) if val in ['.', '?', '!']]
14
+ if len(idx_list):
15
+ sents = [text[i: j] for i, j in zip([0] + idx_list, idx_list + ([size] if idx_list[-1] != size else []))]
16
+ else:
17
+ sents = [text]
18
+
19
+ y_res_x = []
20
+ words_res_x = []
21
+ for sent_tokens in sents:
22
+ tokenized_inputs = [101]
23
+ word_ids = [None]
24
+ for word_id, word in enumerate(sent_tokens):
25
+ word_tokens = tokenizer.encode(word)[1:-1]
26
+ tokenized_inputs += word_tokens
27
+ word_ids += [word_id]*len(word_tokens)
28
+ tokenized_inputs = tokenized_inputs[:(tokenizer.model_max_length-1)]
29
+ word_ids = word_ids[:(tokenizer.model_max_length-1)]
30
+ tokenized_inputs += [102]
31
+ word_ids += [None]
32
+
33
+ torch_tokenized_inputs = torch.tensor(tokenized_inputs).unsqueeze(0)
34
+ torch_attention_mask = torch.ones(torch_tokenized_inputs.shape)
35
+ predictions = model.forward(input_ids=torch_tokenized_inputs.to(device), attention_mask=torch_attention_mask.to(device))
36
+ predictions = torch.argmax(predictions.logits.squeeze(), axis=1).numpy()
37
+ predictions = [model.config.id2label[i] for i in predictions]
38
+
39
+ previous_word_idx = None
40
+ sent_words = []
41
+ predictions_words = []
42
+ word_tokens = []
43
+ first_pred = None
44
+ for i, word_idx in enumerate(word_ids):
45
+ if word_idx != previous_word_idx:
46
+ sent_words.append(tokenizer.decode(word_tokens))
47
+ word_tokens = [tokenized_inputs[i]]
48
+ predictions_words.append(first_pred)
49
+ first_pred = predictions[i]
50
+ else:
51
+ word_tokens.append(tokenized_inputs[i])
52
+ previous_word_idx = word_idx
53
+
54
+ words_res_x.extend(sent_words[1:])
55
+ y_res_x.extend(predictions_words[1:])
56
+
57
+ words_res.append(words_res_x)
58
+ y_res.append(y_res_x)
59
+
60
+ return words_res, y_res