Spaces:
Sleeping
Sleeping
File size: 7,803 Bytes
2d4811a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
from functools import partial
import pandas as pd
import streamlit as st
import torch
from datasets import Dataset, DatasetDict, load_dataset # type: ignore
from torch.nn.functional import cross_entropy
from transformers import DataCollatorForTokenClassification # type: ignore
from src.utils import device, tokenizer_hash_funcs
@st.cache(allow_output_mutation=True)
def get_data(
ds_name: str, config_name: str, split_name: str, split_sample_size: int, randomize_sample: bool
) -> Dataset:
"""Loads a Dataset from the HuggingFace hub (if not already loaded).
Uses `datasets.load_dataset` to load the dataset (see its documentation for additional details).
Args:
ds_name (str): Path or name of the dataset.
config_name (str): Name of the dataset configuration.
split_name (str): Which split of the data to load.
split_sample_size (int): The number of examples to load from the split.
Returns:
Dataset: A Dataset object.
"""
ds: DatasetDict = load_dataset(ds_name, name=config_name, use_auth_token=True).shuffle(
seed=0 if randomize_sample else None
) # type: ignore
split = ds[split_name].select(range(split_sample_size))
return split
@st.cache(
allow_output_mutation=True,
hash_funcs=tokenizer_hash_funcs,
)
def get_collator(tokenizer) -> DataCollatorForTokenClassification:
"""Returns a DataCollator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([PreTrainedTokenizer] or [PreTrainedTokenizerFast]): The tokenizer used for encoding the data.
Returns:
DataCollatorForTokenClassification: The DataCollatorForTokenClassification object.
"""
return DataCollatorForTokenClassification(tokenizer)
def create_word_ids_from_input_ids(tokenizer, input_ids: list[int]) -> list[int]:
"""Takes a list of input_ids and return corresponding word_ids
Args:
tokenizer: The tokenizer that was used to obtain the input ids.
input_ids (list[int]): List of token ids.
Returns:
list[int]: Word ids corresponding to the input ids.
"""
word_ids = []
wid = -1
tokens = [tokenizer.convert_ids_to_tokens(i) for i in input_ids]
for i, tok in enumerate(tokens):
if tok in tokenizer.all_special_tokens:
word_ids.append(-1)
continue
if not tokens[i - 1].endswith("@@") and tokens[i - 1] != "<unk>":
wid += 1
word_ids.append(wid)
assert len(word_ids) == len(input_ids)
return word_ids
def tokenize(batch, tokenizer) -> dict:
"""Tokenizes a batch of examples.
Args:
batch: The examples to tokenize
tokenizer: The tokenizer to use
Returns:
dict: The tokenized batch
"""
tokenized_inputs = tokenizer(batch["tokens"], truncation=True, is_split_into_words=True)
labels = []
wids = []
for idx, label in enumerate(batch["ner_tags"]):
try:
word_ids = tokenized_inputs.word_ids(batch_index=idx)
except ValueError:
word_ids = create_word_ids_from_input_ids(
tokenizer, tokenized_inputs["input_ids"][idx]
)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if word_idx == -1 or word_idx is None or word_idx == previous_word_idx:
label_ids.append(-100)
else:
label_ids.append(label[word_idx])
previous_word_idx = word_idx
wids.append(word_ids)
labels.append(label_ids)
tokenized_inputs["word_ids"] = wids
tokenized_inputs["labels"] = labels
return tokenized_inputs
def stringify_ner_tags(batch: dict, tags) -> dict:
"""Stringifies a dataset batch's NER tags."""
return {"ner_tags_str": [tags.int2str(idx) for idx in batch["ner_tags"]]}
def encode_dataset(split: Dataset, tokenizer):
"""Encodes a dataset split.
Args:
split (Dataset): A Dataset object.
tokenizer: A PreTrainedTokenizer object.
Returns:
Dataset: A Dataset object with the encoded inputs.
"""
tags = split.features["ner_tags"].feature
split = split.map(partial(stringify_ner_tags, tags=tags), batched=True)
remove_columns = split.column_names
ids = split["id"]
split = split.map(
partial(tokenize, tokenizer=tokenizer),
batched=True,
remove_columns=remove_columns,
)
word_ids = [[id if id is not None else -1 for id in wids] for wids in split["word_ids"]]
return split.remove_columns(["word_ids"]), word_ids, ids
def forward_pass_with_label(batch, model, collator, num_classes: int) -> dict:
"""Runs the forward pass for a batch of examples.
Args:
batch: The batch to process
model: The model to process the batch with
collator: A data collator
num_classes (int): Number of classes
Returns:
dict: a dictionary containing `losses`, `preds` and `hidden_states`
"""
# Convert dict of lists to list of dicts suitable for data collator
features = [dict(zip(batch, t)) for t in zip(*batch.values())]
# Pad inputs and labels and put all tensors on device
batch = collator(features)
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
labels = batch["labels"].to(device)
with torch.no_grad():
# Pass data through model
output = model(input_ids, attention_mask, output_hidden_states=True)
# logit.size: [batch_size, sequence_length, classes]
# Predict class with largest logit value on classes axis
preds = torch.argmax(output.logits, axis=-1).cpu().numpy() # type: ignore
# Calculate loss per token after flattening batch dimension with view
loss = cross_entropy(
output.logits.view(-1, num_classes), labels.view(-1), reduction="none"
)
# Unflatten batch dimension and convert to numpy array
loss = loss.view(len(input_ids), -1).cpu().numpy()
hidden_states = output.hidden_states[-1].cpu().numpy()
# logits = output.logits.view(len(input_ids), -1).cpu().numpy()
return {"losses": loss, "preds": preds, "hidden_states": hidden_states}
def predict(split_encoded: Dataset, model, tokenizer, collator, tags) -> pd.DataFrame:
"""Generates predictions for a given dataset split and returns the results as a dataframe.
Args:
split_encoded (Dataset): The dataset to process
model: The model to process the dataset with
tokenizer: The tokenizer to process the dataset with
collator: The data collator to use
tags: The tags used in the dataset
Returns:
pd.DataFrame: A dataframe containing token-level predictions.
"""
split_encoded = split_encoded.map(
partial(
forward_pass_with_label,
model=model,
collator=collator,
num_classes=tags.num_classes,
),
batched=True,
batch_size=8,
)
df: pd.DataFrame = split_encoded.to_pandas() # type: ignore
df["tokens"] = df["input_ids"].apply(
lambda x: tokenizer.convert_ids_to_tokens(x) # type: ignore
)
df["labels"] = df["labels"].apply(
lambda x: ["IGN" if i == -100 else tags.int2str(int(i)) for i in x]
)
df["preds"] = df["preds"].apply(lambda x: [model.config.id2label[i] for i in x])
df["preds"] = df.apply(lambda x: x["preds"][: len(x["input_ids"])], axis=1)
df["losses"] = df.apply(lambda x: x["losses"][: len(x["input_ids"])], axis=1)
df["hidden_states"] = df.apply(lambda x: x["hidden_states"][: len(x["input_ids"])], axis=1)
df["total_loss"] = df["losses"].apply(sum)
return df
|