repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
fitclip | fitclip-main/aligner/encoder/videoclip_video_text_encoder.py | import os
from typing import Iterable, Iterator, Optional
import torch
from overrides import overrides
from torchvision import transforms as T
from transformers import AutoTokenizer
from aligner.data.frame_sampler import ConsecutiveFrameSampler, FrameSampler
from aligner.encoder.s3dg import S3DG
from aligner.encoder.video_encoder import TYPE_TRANSFORM, TYPE_VIDEO_INPUT, float_standard_denormalize
from aligner.encoder.video_text_encoder import TYPE_TEXT_INPUT, TYPE_TOKENIZER, VideoTextEncoder
from aligner.encoder.videoclip import MMFusionSeparate
from aligner.transforms import ConvertBHWCtoCBHW, PadToMinFrames
from util.typing_utils import TYPE_PATH
class VideoClipVideoTextEncoder(VideoTextEncoder):
def __init__(self, video_encoder_pretrained_path: Optional[TYPE_PATH] = None,
model_pretrained_path: Optional[TYPE_PATH] = None, num_frames: int = 32, max_tokens: int = 64) -> None:
super().__init__()
self.num_frames = num_frames
self.max_tokens = max_tokens
self.video_encoder = S3DG()
if video_encoder_pretrained_path:
self.video_encoder.load_state_dict(torch.load(video_encoder_pretrained_path))
self.model = MMFusionSeparate(max_video_len=num_frames)
if model_pretrained_path:
self.model.load_state_dict(torch.load(model_pretrained_path))
os.environ["TOKENIZERS_PARALLELISM"] = "0"
self.tokenizer = AutoTokenizer.from_pretrained(self.model.model_name)
@overrides(check_signature=False)
def encode_video(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
batch_size, clip_count = video.shape[:2]
assert batch_size == 1, "Only batch_size = 1 is supported for now."
device = video.device
# FIXME: VideoCLIP uses up to 32 clips per video, which complicates our implementation.
# These clips are randomly sampled when there's more than 32 clips.
# These clips are composed of non-overlapping 32 consecutive frames, and the video is sampled at 30 fps.
video_features = self.video_encoder(video).view(batch_size, clip_count, self.video_encoder.output_size)
video_mask = torch.ones((batch_size, self.num_frames), dtype=torch.bool, device=device)
text = torch.tensor([[self.tokenizer.cls_token_id, self.tokenizer.sep_token_id]], device=device) \
.expand(batch_size, 2)
text_mask = torch.ones((batch_size, 2), dtype=torch.bool, device=device)
return self.model.forward_video(video_features, video_mask, text, text_mask)
@overrides(check_signature=False)
def encode_text(self, text: TYPE_TEXT_INPUT) -> torch.Tensor:
return self.model.forward_text(text["input_ids"], text["attention_mask"])
def _tokenize(self, texts: Iterable[str]) -> TYPE_TEXT_INPUT:
texts = [f"{self.tokenizer.sep_token} {text}" for text in texts]
return self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=self.max_tokens)
@overrides
def get_tokenizer(self) -> TYPE_TOKENIZER:
return self._tokenize
@overrides
def decode_text(self, text: TYPE_TEXT_INPUT) -> Iterator[str]:
return self.tokenizer.batch_decode(text["input_ids"])
@overrides
def get_train_frame_sampler(self) -> FrameSampler:
raise NotImplementedError
@overrides
def get_eval_frame_sampler(self) -> FrameSampler:
return ConsecutiveFrameSampler(self.num_frames, fps=30)
@overrides
def get_train_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
raise NotImplementedError
@overrides
def get_eval_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
return T.Compose([
ConvertBHWCtoCBHW(),
T.ConvertImageDtype(dtype),
T.Resize(224),
T.CenterCrop(224),
PadToMinFrames(self.num_frames, frame_dim=1),
])
@property
@overrides
def should_pad_batch(self) -> bool:
return False
@overrides
def to_bchw(self, t: torch.Tensor) -> torch.Tensor:
return t.permute(0, 2, 1, 3, 4)
@overrides
def denormalize_video_tensor(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
return float_standard_denormalize(video)
| 4,268 | 39.657143 | 120 | py |
fitclip | fitclip-main/aligner/encoder/mil_nce_video_text_encoder.py | import re
from typing import Any, Iterable, Iterator, Mapping, Optional, Union
import numpy as np
import torch
from cached_path import cached_path
from overrides import overrides
from torch import nn
from torchvision import transforms as T
from aligner.data.frame_sampler import ConsecutiveFrameSampler, FrameSampler
from aligner.encoder.s3dg import S3DG
from aligner.encoder.video_encoder import TYPE_TRANSFORM, float_standard_denormalize
from aligner.encoder.video_text_encoder import TYPE_TEXT_INPUT, TYPE_TOKENIZER, TYPE_VIDEO_INPUT, VideoTextEncoder
from aligner.transforms import ConvertBHWCtoCBHW, PadToMinFrames
from util.typing_utils import TYPE_PATH
def load_pretrained_video_encoder(path: TYPE_PATH,
map_location: Optional[Union[str, torch.device]] = None) -> Mapping[str, Any]:
checkpoint = torch.load(path, map_location=map_location)
state_dict = get_video_encoder_state_dict_from_pretrained_mil_nce_checkpoint(checkpoint) \
if "state_dict" in checkpoint else checkpoint
# Backward compatibility, also with the MIL-NCE paper pretrained one.
return {k: v for k, v in state_dict.items() if not k.startswith("text_module.")}
def load_pretrained_text_encoder(path: TYPE_PATH,
map_location: Optional[Union[str, torch.device]] = None) -> Mapping[str, Any]:
checkpoint = torch.load(path, map_location=map_location)
if "state_dict" in checkpoint:
return get_text_encoder_state_dict_from_pretrained_mil_nce_checkpoint(checkpoint)
elif any(k.startswith("text_module.") for k in checkpoint):
# Backward compatibility, also with a MIL-NCE paper pretrained one.
prefix = "text_module."
return {k[len(prefix):]: v for k, v in checkpoint.items() if k.startswith(prefix)}
else:
return checkpoint
def get_video_encoder_state_dict_from_pretrained_mil_nce_checkpoint(
checkpoint: Mapping[str, Any]) -> Mapping[str, torch.Tensor]:
pl_module_state_dict = checkpoint["state_dict"]
# Look for the corresponding encoder, with backward compatibility.
prefix = "encoder." if any(k.startswith("encoder.") for k in pl_module_state_dict.keys()) else "video_encoder."
return {k[len(prefix):]: v for k, v in pl_module_state_dict.items() if k.startswith(prefix)}
def get_text_encoder_state_dict_from_pretrained_mil_nce_checkpoint(
checkpoint: Mapping[str, Any]) -> Mapping[str, torch.Tensor]:
pl_module_state_dict = checkpoint["state_dict"]
# Look for the corresponding encoder, with backward compatibility.
prefix = "encoder.text_module." if any(k.startswith("encoder.text_module.") for k in pl_module_state_dict.keys()) \
else "text_encoder."
return {k[len(prefix):]: v for k, v in pl_module_state_dict.items() if k.startswith(prefix)}
class GlobalMaxPool1d(nn.Module):
@overrides(check_signature=False)
def forward(self, t: torch.Tensor) -> torch.Tensor:
return t.max(dim=1)[0]
class MilNceTextEncoder(nn.Module):
def __init__(self, output_size: int = 512, vocab_size: int = 66250, word_embedding_size: int = 300,
embedding: Optional[torch.Tensor] = None, hidden_size: int = 2048) -> None:
super().__init__()
# noinspection SpellCheckingInspection
self.word_embd = nn.Embedding(vocab_size, word_embedding_size) if embedding is None \
else nn.Embedding.from_pretrained(embedding)
self.fc1 = nn.Linear(self.word_embd.embedding_dim, hidden_size)
self.relu = nn.ReLU(inplace=True)
self.max_pooling = GlobalMaxPool1d()
self.fc2 = nn.Linear(hidden_size, output_size)
@overrides(check_signature=False)
def forward(self, input_ids: torch.Tensor) -> torch.Tensor:
text = self.word_embd(input_ids)
text = self.relu(self.fc1(text))
text = self.max_pooling(text)
return self.fc2(text)
def truncate_or_pad_1d_tensor(tensor: torch.Tensor, size: int, fill_value: Any = 0) -> torch.Tensor:
if len(tensor) >= size:
return tensor[:size]
else:
padded_tensor = torch.full((size,), fill_value, dtype=tensor.dtype, device=tensor.device,
requires_grad=tensor.requires_grad)
padded_tensor[:len(tensor)] = tensor
return padded_tensor
class MilNceTokenizer:
RE_WORD = re.compile(r"[\w']+")
def __init__(self, vocab: Mapping[str, int], max_tokens: int = 20, lowercase: bool = True) -> None:
super().__init__()
self.vocab = vocab
self.max_tokens = max_tokens
self.lowercase = lowercase
self.indices_to_tokens = {i: token for token, i in vocab.items()}
def _tokenize(self, text: str) -> Iterator[str]:
if self.lowercase:
text = text.lower()
return self.RE_WORD.findall(text)
def _index(self, tokens: Iterable[str]) -> torch.Tensor:
tokens_in_vocab_tensor = torch.tensor([self.vocab[word] for word in tokens if word in self.vocab],
dtype=torch.long)
return truncate_or_pad_1d_tensor(tokens_in_vocab_tensor, self.max_tokens)
def __call__(self, text: str) -> TYPE_TEXT_INPUT:
return {"input_ids": self._index(self._tokenize(text))}
def decode(self, ids: Iterable[int]) -> str:
return " ".join(self.indices_to_tokens[i] for i in ids if i != 0)
class MilNceVideoTextEncoder(VideoTextEncoder):
def __init__(self, vocab_path: TYPE_PATH = "https://www.rocq.inria.fr/cluster-willow/amiech/howto100m/s3d_dict.npy",
pretrained_path: Optional[TYPE_PATH] = None, max_tokens: int = 20, num_frames: int = 16) -> None:
super().__init__()
self.video_encoder = S3DG()
self.text_encoder = MilNceTextEncoder()
vocab: Mapping[str, int] = {t.item(): i + 1 for i, t in enumerate(np.load(cached_path(vocab_path)))}
self.tokenizer = MilNceTokenizer(vocab=vocab, max_tokens=max_tokens)
self.num_frames = num_frames
if pretrained_path:
pretrained_path = cached_path(pretrained_path)
self.video_encoder.load_state_dict(load_pretrained_video_encoder(pretrained_path, # noqa
map_location="cpu"))
self.text_encoder.load_state_dict(load_pretrained_text_encoder(pretrained_path, # noqa
map_location="cpu"))
@overrides(check_signature=False)
def encode_video(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
return self.video_encoder(video)
@overrides(check_signature=False)
def encode_text(self, text: TYPE_TEXT_INPUT) -> torch.Tensor:
return self.text_encoder(text["input_ids"])
def _tokenize(self, texts: Iterable[str]) -> TYPE_TEXT_INPUT:
tokenized = [self.tokenizer(text) for text in texts]
return {k: torch.stack([t[k] for t in tokenized]) for k in next(iter(tokenized), [])}
@overrides
def get_tokenizer(self) -> TYPE_TOKENIZER:
return self._tokenize
@overrides
def decode_text(self, text: TYPE_TEXT_INPUT) -> Iterator[str]:
for text_instance in text["input_ids"]:
yield self.tokenizer.decode(text_instance.tolist())
@overrides
def get_train_frame_sampler(self) -> FrameSampler:
raise NotImplementedError
@overrides
def get_eval_frame_sampler(self) -> FrameSampler:
return ConsecutiveFrameSampler(self.num_frames, fps=5)
@overrides
def get_train_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
raise NotImplementedError
@overrides
def get_eval_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
return T.Compose([
ConvertBHWCtoCBHW(),
T.ConvertImageDtype(dtype),
T.Resize(224),
T.CenterCrop(224),
PadToMinFrames(self.num_frames, frame_dim=1),
])
@property
@overrides
def should_pad_batch(self) -> bool:
return False
@overrides
def to_bchw(self, t: torch.Tensor) -> torch.Tensor:
return t.permute(0, 2, 1, 3, 4)
@overrides
def denormalize_video_tensor(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
return float_standard_denormalize(video)
| 8,365 | 41.040201 | 120 | py |
fitclip | fitclip-main/aligner/encoder/video_text_encoder.py | from abc import abstractmethod
from typing import Callable, Iterable, Iterator, Mapping, Tuple
import torch
from overrides import overrides
from aligner.encoder.video_encoder import TYPE_VIDEO_INPUT, VideoEncoder
TYPE_TEXT_INPUT = Mapping[str, torch.Tensor]
TYPE_OUTPUT = Tuple[torch.Tensor, torch.Tensor]
TYPE_TOKENIZER = Callable[[Iterable[str]], Mapping[str, torch.Tensor]]
class VideoTextEncoder(VideoEncoder):
@abstractmethod
def encode_text(self, text: TYPE_TEXT_INPUT) -> torch.Tensor:
raise NotImplementedError
@overrides(check_signature=False)
def forward(self, video: TYPE_VIDEO_INPUT, text: TYPE_TEXT_INPUT) -> TYPE_OUTPUT:
return self.encode_video(video), self.encode_text(text)
@abstractmethod
def get_tokenizer(self) -> TYPE_TOKENIZER:
raise NotImplementedError
@abstractmethod
def decode_text(self, text: TYPE_TEXT_INPUT) -> Iterator[str]:
"""Decodes a batch of texts."""
raise NotImplementedError
| 994 | 30.09375 | 85 | py |
fitclip | fitclip-main/aligner/encoder/video_encoder.py | from abc import abstractmethod
from typing import Callable, Optional, Tuple
import torch
from overrides import overrides
from torch import nn
from aligner.data.frame_sampler import FrameSampler
TYPE_VIDEO_INPUT = torch.Tensor
TYPE_TRANSFORM = Callable[[torch.Tensor], torch.Tensor]
class VideoEncoder(nn.Module):
@abstractmethod
def encode_video(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
raise NotImplementedError
@overrides(check_signature=False)
def forward(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
return self.encode_video(video)
@abstractmethod
def get_train_frame_sampler(self) -> FrameSampler:
raise NotImplementedError
@abstractmethod
def get_eval_frame_sampler(self) -> FrameSampler:
raise NotImplementedError
@abstractmethod
def get_train_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
raise NotImplementedError
@abstractmethod
def get_eval_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
raise NotImplementedError
@property
# Don't set as abstract method to avoid some boilerplate in subclasses.
# See https://stackoverflow.com/a/42529760/1165181
def should_pad_batch(self) -> bool:
raise NotImplementedError
@abstractmethod
def to_bchw(self, t: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@abstractmethod
def denormalize_video_tensor(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
"""Converts a transformed video tensor into an unsigned 8-bit integer tensor in the range 0-255."""
raise NotImplementedError
def float_standard_denormalize(video: TYPE_VIDEO_INPUT, mean: Optional[Tuple[float, float, float]] = None,
std: Optional[Tuple[float, float, float]] = None) -> torch.Tensor:
if std is not None:
video *= torch.tensor(std, device=video.device, dtype=video.dtype).view(-1, 1, 1)
if mean is not None:
video += torch.tensor(mean, device=video.device, dtype=video.dtype).view(-1, 1, 1)
return (video * 255).to(torch.uint8) # noqa
| 2,121 | 32.15625 | 107 | py |
fitclip | fitclip-main/aligner/encoder/slip.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# Copied from https://github.com/facebookresearch/SLIP/tree/c6faf5d
import gzip
import html
from collections import OrderedDict
from functools import lru_cache
from typing import Iterable, Iterator
import ftfy
import numpy as np
import regex as re
import timm
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from cached_path import cached_path
from torch import autograd
@lru_cache()
def default_bpe():
return cached_path("https://github.com/facebookresearch/SLIP/raw/main/bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer:
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join(self.decoder[token] for token in tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def __call__(self, texts, context_length=77):
if isinstance(texts, str):
texts = [texts]
sot_token = self.encoder["<|startoftext|>"]
eot_token = self.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
tokens = tokens[:context_length]
result[i, :len(tokens)] = torch.tensor(tokens)
if len(result) == 1:
return result[0]
return result
def is_dist_avail_and_initialized() -> bool:
return dist.is_available() and dist.is_initialized()
def get_world_size() -> int:
return dist.get_world_size() if is_dist_avail_and_initialized() else 1
def get_rank() -> int:
return dist.get_rank() if is_dist_avail_and_initialized() else 0
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
dist.all_gather(
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
return [torch.cat(tensor_all) for tensor_all in tensor_list]
class GatherLayer(autograd.Function): # noqa
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x): # noqa
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def all_gather_batch_with_grad(tensors: Iterable[torch.Tensor]) -> Iterator[torch.Tensor]:
"""
Performs all_gather operation on the provided tensors.
Graph remains connected for backward grad computation.
"""
return tensors if get_world_size() == 1 else [torch.cat(GatherLayer.apply(tensor)) for tensor in tensors]
class CLIPLoss(nn.Module):
def __init__(self):
super().__init__()
self.labels = None
self.last_local_batch_size = None
def forward(self, outputs):
image_embed = outputs['image_embed']
text_embed = outputs['text_embed']
logit_scale = outputs['logit_scale']
local_batch_size = image_embed.size(0)
if local_batch_size != self.last_local_batch_size:
self.labels = local_batch_size * get_rank() + torch.arange(
local_batch_size, device=image_embed.device
)
self.last_local_batch_size = local_batch_size
# normalized features
image_embed = F.normalize(image_embed, dim=-1, p=2)
text_embed = F.normalize(text_embed, dim=-1, p=2)
# gather features from all GPUs
image_embed_all, text_embed_all = all_gather_batch([image_embed, text_embed])
# cosine similarity as logits
logits_per_image = logit_scale * image_embed @ text_embed_all.t()
logits_per_text = logit_scale * text_embed @ image_embed_all.t()
loss = (F.cross_entropy(logits_per_image, self.labels) + F.cross_entropy(logits_per_text, self.labels)) / 2
# compute accuracy
with torch.no_grad():
pred = torch.argmax(logits_per_image, dim=-1)
correct = pred.eq(self.labels).sum()
acc = 100 * correct / local_batch_size
return {'loss': loss, 'clip_loss': loss, 'clip_acc': acc}
class SIMCLRLoss(nn.Module):
"""
This is the SimCLR loss in https://arxiv.org/abs/2002.05709
The embedding vectors are assumed to have size (2 x batch_size, embedding_dim) and
the memory layout that can be reshaped into shape (2, batch_size, embedding_dim).
This memory layout is consistent with the SimCLR collator in
https://github.com/facebookresearch/vissl/blob/master/vissl/data/collators/simclr_collator.py
Config params:
temperature (float): the temperature to be applied on the logits
"""
def __init__(self, temperature=0.1):
super().__init__()
self.tau = temperature
self.labels = None
self.masks = None
self.last_local_batch_size = None
def forward(self, outputs):
q_a = outputs['aug1_embed']
q_b = outputs['aug2_embed']
q_a = F.normalize(q_a, dim=-1, p=2)
q_b = F.normalize(q_b, dim=-1, p=2)
local_batch_size = q_a.size(0)
k_a, k_b = all_gather_batch_with_grad([q_a, q_b])
if local_batch_size != self.last_local_batch_size:
self.labels = local_batch_size * get_rank() + torch.arange(
local_batch_size, device=q_a.device
)
total_batch_size = local_batch_size * get_world_size()
self.masks = F.one_hot(self.labels, total_batch_size) * 1e9
self.last_local_batch_size = local_batch_size
logits_aa = torch.matmul(q_a, k_a.transpose(0, 1)) / self.tau
logits_aa = logits_aa - self.masks
logits_bb = torch.matmul(q_b, k_b.transpose(0, 1)) / self.tau
logits_bb = logits_bb - self.masks
logits_ab = torch.matmul(q_a, k_b.transpose(0, 1)) / self.tau
logits_ba = torch.matmul(q_b, k_a.transpose(0, 1)) / self.tau
loss_a = F.cross_entropy(torch.cat([logits_ab, logits_aa], dim=1), self.labels)
loss_b = F.cross_entropy(torch.cat([logits_ba, logits_bb], dim=1), self.labels)
loss = (loss_a + loss_b) / 2 # divide by 2 to average over all samples
# compute accuracy
with torch.no_grad():
pred = torch.argmax(torch.cat([logits_ab, logits_aa], dim=1), dim=-1)
correct = pred.eq(self.labels).sum()
acc = 100 * correct / local_batch_size
return {'loss': loss, 'ssl_loss': loss, 'ssl_acc': acc}
class SLIPLoss(nn.Module):
def __init__(self, ssl_loss, ssl_scale):
super().__init__()
self.clip_loss = CLIPLoss()
self.ssl_loss = ssl_loss
self.ssl_scale = ssl_scale
def forward(self, outputs):
clip_loss_dict = self.clip_loss(outputs)
clip_loss = clip_loss_dict['clip_loss']
clip_acc = clip_loss_dict['clip_acc']
ssl_loss_dict = self.ssl_loss(outputs)
ssl_loss = ssl_loss_dict['ssl_loss']
ssl_acc = ssl_loss_dict['ssl_acc']
return {'loss': clip_loss + self.ssl_scale * ssl_loss,
'clip_loss': clip_loss,
'clip_acc': clip_acc,
'ssl_loss': ssl_loss,
'ssl_acc': ssl_acc}
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor): # noqa
return x * torch.sigmoid(1.702 * x) # noqa
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
vision_width: int,
vision_model: nn.Module,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
**kwargs,
):
super().__init__()
self.context_length = context_length
self.vision_width = vision_width
self.visual = vision_model
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.image_projection = nn.Parameter(torch.empty(vision_width, embed_dim))
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
nn.init.normal_(self.image_projection, std=self.vision_width ** -0.5)
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def encode_image(self, image):
x = self.visual(image)
x = x @ self.image_projection
return x
def encode_text(self, text):
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_embed = self.encode_image(image)
text_embed = self.encode_text(text)
return {'image_embed': image_embed,
'text_embed': text_embed,
'logit_scale': self.logit_scale.exp()}
def _build_mlp(in_dim, mlp_dim, out_dim):
return nn.Sequential(OrderedDict([
("layer1", nn.Linear(in_dim, mlp_dim)),
("bn1", nn.SyncBatchNorm(mlp_dim)),
("relu1", nn.ReLU(inplace=True)),
("layer2", nn.Linear(mlp_dim, mlp_dim)),
("bn2", nn.SyncBatchNorm(mlp_dim)),
("relu2", nn.ReLU(inplace=True)),
("layer3", nn.Linear(mlp_dim, out_dim)),
]))
class SIMCLR(nn.Module):
def __init__(self,
# vision
vision_width: int,
vision_model: nn.Module,
# ssl
ssl_mlp_dim: int,
ssl_emb_dim: int,
**kwargs, # noqa
):
super().__init__()
self.vision_width = vision_width
self.visual = vision_model
self.image_mlp = _build_mlp(in_dim=vision_width, mlp_dim=ssl_mlp_dim, out_dim=ssl_emb_dim)
def encode_image(self, image):
return self.visual(image)
def forward(self, aug1, aug2):
h1 = self.visual(aug1)
h2 = self.visual(aug2)
aug1_embed = self.image_mlp(h1)
aug2_embed = self.image_mlp(h2)
return {'aug1_embed': aug1_embed, 'aug2_embed': aug2_embed}
class SLIP(CLIP):
def __init__(self, ssl_mlp_dim: int, ssl_emb_dim: int, **kwargs):
super().__init__(**kwargs)
self.image_mlp = _build_mlp(in_dim=self.vision_width, mlp_dim=ssl_mlp_dim, out_dim=ssl_emb_dim)
def forward(self, image, text, aug1, aug2): # noqa
aug1_embed = self.image_mlp(self.visual(aug1))
aug2_embed = self.image_mlp(self.visual(aug2))
image_embed = self.encode_image(image)
text_embed = self.encode_text(text)
return {'image_embed': image_embed, 'text_embed': text_embed, 'logit_scale': self.logit_scale.exp(),
'aug1_embed': aug1_embed, 'aug2_embed': aug2_embed}
def get_loss(model, ssl_temp, ssl_scale):
if model.startswith('SLIP'):
ssl_loss = SIMCLRLoss(temperature=ssl_temp)
return SLIPLoss(ssl_loss, ssl_scale)
if model.startswith('CLIP'):
return CLIPLoss()
if model.startswith('SIMCLR'):
return SIMCLRLoss(temperature=ssl_temp)
def get_metric_names(model):
if model.startswith('SLIP'):
return ['loss', 'clip_loss', 'ssl_loss', 'clip_acc', 'ssl_acc']
elif model.startswith('CLIP'):
return ['loss', 'clip_loss', 'clip_acc']
else:
return ['loss', 'ssl_loss', 'ssl_acc']
@timm.models.registry.register_model
def vit_small_mocov3_patch16_224(**kwargs):
return timm.models.vision_transformer._create_vision_transformer("vit_small_patch16_224", patch_size=16,
embed_dim=384, depth=12, num_heads=12, **kwargs)
def CLIP_VITS16(**kwargs):
vision_model = timm.create_model('vit_small_mocov3_patch16_224', num_classes=0)
model = CLIP(embed_dim=512, vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, transformer_layers=12, **kwargs)
return model
def SIMCLR_VITS16(**kwargs):
vision_model = timm.create_model('vit_small_mocov3_patch16_224', num_classes=0)
model = SIMCLR(vision_width=384, vision_model=vision_model, **kwargs)
return model
def SLIP_VITS16(**kwargs):
vision_model = timm.create_model('vit_small_mocov3_patch16_224', num_classes=0)
model = SLIP(embed_dim=512, vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, transformer_layers=12, **kwargs)
return model
def CLIP_VITB16(**kwargs):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0)
model = CLIP(embed_dim=512, vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, transformer_layers=12, **kwargs)
return model
def SIMCLR_VITB16(**kwargs):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0)
model = SIMCLR(vision_width=768, vision_model=vision_model, **kwargs)
return model
def SLIP_VITB16(**kwargs):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0)
model = SLIP(embed_dim=512, vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, transformer_layers=12, **kwargs)
return model
def CLIP_VITL16(**kwargs):
vision_model = timm.create_model('vit_large_patch16_224', num_classes=0)
model = CLIP(embed_dim=512, vision_width=1024, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, transformer_layers=12, **kwargs)
return model
def SIMCLR_VITL16(**kwargs):
vision_model = timm.create_model('vit_large_patch16_224', num_classes=0)
model = SIMCLR(vision_width=1024, vision_model=vision_model, **kwargs)
return model
def SLIP_VITL16(**kwargs):
vision_model = timm.create_model('vit_large_patch16_224', num_classes=0)
model = SLIP(embed_dim=512, vision_width=1024, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, transformer_layers=12, **kwargs)
return model
| 22,773 | 34.640063 | 120 | py |
fitclip | fitclip-main/aligner/encoder/s3dg.py | # Initially copied from the MIL-NCE repo.
"""Contains the definition for Gated Separable 3D network (S3D-G). """
from typing import Literal, Tuple
import torch
from overrides import overrides
from torch import nn
from torch.nn.common_types import _size_3_t, _size_6_t
class InceptionBlock(nn.Module):
def __init__(self, input_dim: int, num_outputs_0_0a: int, num_outputs_1_0a: int, num_outputs_1_0b: int,
num_outputs_2_0a: int, num_outputs_2_0b: int, num_outputs_3_0b: int, gating: bool = True) -> None:
super().__init__()
self.conv_b0 = STConv3D(input_dim, num_outputs_0_0a, kernel_size=1)
self.conv_b1_a = STConv3D(input_dim, num_outputs_1_0a, kernel_size=1)
self.conv_b1_b = STConv3D(num_outputs_1_0a, num_outputs_1_0b, kernel_size=3, padding=1, separable=True)
self.conv_b2_a = STConv3D(input_dim, num_outputs_2_0a, kernel_size=1)
self.conv_b2_b = STConv3D(num_outputs_2_0a, num_outputs_2_0b, kernel_size=3, padding=1, separable=True)
self.maxpool_b3 = torch.nn.MaxPool3d(kernel_size=3, stride=1, padding=1)
self.conv_b3_b = STConv3D(input_dim, num_outputs_3_0b, 1)
self.gating = gating
self.output_dim = num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b + num_outputs_3_0b
if gating:
self.gating_b0 = SelfGating(num_outputs_0_0a)
self.gating_b1 = SelfGating(num_outputs_1_0b)
self.gating_b2 = SelfGating(num_outputs_2_0b)
self.gating_b3 = SelfGating(num_outputs_3_0b)
@overrides(check_signature=False)
def forward(self, input_: torch.Tensor) -> torch.Tensor:
b0 = self.conv_b0(input_)
b1 = self.conv_b1_a(input_)
b1 = self.conv_b1_b(b1)
b2 = self.conv_b2_a(input_)
b2 = self.conv_b2_b(b2)
b3 = self.maxpool_b3(input_)
b3 = self.conv_b3_b(b3)
if self.gating:
b0 = self.gating_b0(b0)
b1 = self.gating_b1(b1)
b2 = self.gating_b2(b2)
b3 = self.gating_b3(b3)
return torch.cat((b0, b1, b2, b3), dim=1)
class SelfGating(nn.Module):
"""Feature gating as used in S3D-G. """
def __init__(self, input_dim: int) -> None:
super().__init__()
self.fc = nn.Linear(input_dim, input_dim)
self.sigmoid = nn.modules.activation.Sigmoid()
@overrides(check_signature=False)
def forward(self, input_: torch.Tensor) -> torch.Tensor:
spatiotemporal_average = input_.mean(dim=[2, 3, 4])
weights = self.fc(spatiotemporal_average)
weights = self.sigmoid(weights)
return weights[:, :, None, None, None] * input_
def _size3_to_spatial_temporal(size: _size_3_t, fill_value: int) -> Tuple[_size_3_t, _size_3_t]:
size = nn.modules.conv._triple(size)
return (fill_value, size[1], size[2]), (size[0], fill_value, fill_value)
class STConv3D(nn.Module):
def __init__(self, input_dim: int, output_dim: int, kernel_size: _size_3_t, stride: _size_3_t = 1,
padding: _size_3_t = 0, separable: bool = False) -> None:
super().__init__()
self.separable = separable
self.relu = nn.ReLU(inplace=True)
if separable:
assert (isinstance(kernel_size, int) and kernel_size != 1) or kernel_size[0] != 1
spatial_kernel_size, temporal_kernel_size = _size3_to_spatial_temporal(kernel_size, fill_value=1)
spatial_stride, temporal_stride = _size3_to_spatial_temporal(stride, fill_value=1)
spatial_padding, temporal_padding = _size3_to_spatial_temporal(padding, fill_value=0)
self.conv1 = nn.Conv3d(input_dim, output_dim, kernel_size=spatial_kernel_size, stride=spatial_stride,
padding=spatial_padding, bias=False)
self.conv2 = nn.Conv3d(output_dim, output_dim, kernel_size=temporal_kernel_size, stride=temporal_stride,
padding=temporal_padding, bias=False)
self.bn2 = nn.BatchNorm3d(output_dim)
else:
self.conv1 = nn.Conv3d(input_dim, output_dim, kernel_size=kernel_size, stride=stride, # noqa
padding=padding, bias=False)
self.bn1 = nn.BatchNorm3d(output_dim)
@overrides(check_signature=False)
def forward(self, input_: torch.Tensor) -> torch.Tensor:
out = self.relu(self.bn1(self.conv1(input_)))
if self.separable:
out = self.relu(self.bn2(self.conv2(out)))
return out
def _pad_top_bottom(kernel_dim: int, stride_val: int) -> Tuple[int, int]:
pad_along = max(kernel_dim - stride_val, 0)
pad_top_ = pad_along // 2
pad_bottom_ = pad_along - pad_top_
return pad_top_, pad_bottom_
def _get_padding_shape(kernel_size: _size_3_t, stride: _size_3_t) -> _size_6_t:
kernel_size = nn.modules.conv._triple(kernel_size)
stride = nn.modules.conv._triple(stride)
padding_shape = [padding_value
for pair in zip(kernel_size, stride)
for padding_value in _pad_top_bottom(*pair)]
depth_top = padding_shape.pop(0)
depth_bottom = padding_shape.pop(0)
padding_shape.append(depth_top)
padding_shape.append(depth_bottom)
return tuple(padding_shape)
class MaxPool3dTFPadding(torch.nn.Module):
def __init__(self, kernel_size: _size_3_t, stride: _size_3_t, padding: Literal["SAME"] = "SAME") -> None:
super().__init__()
if padding == "SAME":
self.padding_shape = _get_padding_shape(kernel_size, stride)
self.pad = torch.nn.ConstantPad3d(self.padding_shape, 0)
else:
raise ValueError(f"Padding strategy not supported: {padding}")
self.pool = torch.nn.MaxPool3d(kernel_size, stride, ceil_mode=True)
@overrides(check_signature=False)
def forward(self, input_: torch.Tensor) -> torch.Tensor:
input_ = self.pad(input_)
return self.pool(input_)
class S3DG(nn.Module):
def __init__(self, embedding_size: int = 512, space_to_depth: bool = True,
init: Literal["default", "kaiming_normal"] = "default", use_last_layer: bool = True) -> None:
super().__init__()
self.use_last_layer = use_last_layer
self.space_to_depth = space_to_depth
if space_to_depth:
self.conv1 = STConv3D(24, 64, kernel_size=(2, 4, 4), stride=1, padding=(1, 2, 2), separable=False) # noqa
else:
self.conv1 = STConv3D(3, 64, kernel_size=(3, 7, 7), stride=2, padding=(1, 3, 3), separable=False) # noqa
self.conv_2b = STConv3D(64, 64, kernel_size=1, separable=False)
self.conv_2c = STConv3D(64, 192, kernel_size=3, padding=1, separable=True)
self.gating = SelfGating(192)
self.maxpool_2a = MaxPool3dTFPadding(kernel_size=(1, 3, 3), stride=(1, 2, 2))
self.maxpool_3a = MaxPool3dTFPadding(kernel_size=(1, 3, 3), stride=(1, 2, 2))
self.mixed_3b = InceptionBlock(192, 64, 96, 128, 16, 32, 32)
self.mixed_3c = InceptionBlock(self.mixed_3b.output_dim, 128, 128, 192, 32, 96, 64)
self.maxpool_4a = MaxPool3dTFPadding(kernel_size=3, stride=2)
self.mixed_4b = InceptionBlock(self.mixed_3c.output_dim, 192, 96, 208, 16, 48, 64)
self.mixed_4c = InceptionBlock(self.mixed_4b.output_dim, 160, 112, 224, 24, 64, 64)
self.mixed_4d = InceptionBlock(self.mixed_4c.output_dim, 128, 128, 256, 24, 64, 64)
self.mixed_4e = InceptionBlock(self.mixed_4d.output_dim, 112, 144, 288, 32, 64, 64)
self.mixed_4f = InceptionBlock(self.mixed_4e.output_dim, 256, 160, 320, 32, 128, 128)
self.maxpool_5a = self.maxPool3d_5a_2x2 = MaxPool3dTFPadding(kernel_size=2, stride=2)
self.mixed_5b = InceptionBlock(self.mixed_4f.output_dim, 256, 160, 320, 32, 128, 128)
self.mixed_5c = InceptionBlock(self.mixed_5b.output_dim, 384, 192, 384, 48, 128, 128)
self.fc = nn.Linear(self.mixed_5c.output_dim, embedding_size)
if init == "kaiming_normal":
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode="fan_in", nonlinearity="relu")
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@property
def output_size(self) -> int:
return self.fc.out_features if self.use_last_layer else self.mixed_5c.output_dim
@staticmethod
def _space_to_depth(input_: torch.Tensor) -> torch.Tensor:
B, C, T, H, W = input_.shape
input_ = input_.view(B, C, T // 2, 2, H // 2, 2, W // 2, 2)
input_ = input_.permute(0, 3, 5, 7, 1, 2, 4, 6)
input_ = input_.contiguous().view(B, 8 * C, T // 2, H // 2, W // 2)
return input_
@overrides(check_signature=False)
def forward(self, input_: torch.Tensor) -> torch.Tensor:
if self.space_to_depth:
input_ = self._space_to_depth(input_)
net = self.conv1(input_)
if self.space_to_depth:
net = net[:, :, 1:, 1:, 1:]
net = self.maxpool_2a(net)
net = self.conv_2b(net)
net = self.conv_2c(net)
if self.gating:
net = self.gating(net)
net = self.maxpool_3a(net)
net = self.mixed_3b(net)
net = self.mixed_3c(net)
net = self.maxpool_4a(net)
net = self.mixed_4b(net)
net = self.mixed_4c(net)
net = self.mixed_4d(net)
net = self.mixed_4e(net)
net = self.mixed_4f(net)
net = self.maxpool_5a(net)
net = self.mixed_5b(net)
net = self.mixed_5c(net)
net = torch.mean(net, dim=(2, 3, 4))
if self.use_last_layer:
return self.fc(net)
else:
return net
| 9,814 | 43.817352 | 118 | py |
fitclip | fitclip-main/aligner/encoder/frozen_in_time.py | # Originally from https://github.com/m-bain/frozen-in-time/blob/ba54e43/model/model.py
import logging
import sys
from typing import Any, Dict, Literal, Mapping, MutableMapping, Optional, Tuple, Union
import numpy as np
import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from cached_path import TYPE_PATH, cached_path
from transformers import AutoModel
from aligner.encoder import frozen_in_time_stub
from aligner.encoder.video_transformer import SpaceTimeTransformer
LOGGER = logging.getLogger(__name__)
STATE_DICT_MODULE_KEY = "module."
def state_dict_data_parallel_fix(load_state_dict: MutableMapping[str, Any],
curr_state_dict: MutableMapping[str, Any]) -> MutableMapping[str, Any]:
first_load_key = next(iter(load_state_dict.keys()))
first_curr_key = next(iter(curr_state_dict.keys()))
if not first_curr_key.startswith(STATE_DICT_MODULE_KEY) and first_load_key.startswith(STATE_DICT_MODULE_KEY):
return {k[len(STATE_DICT_MODULE_KEY):]: v for k, v in load_state_dict.items()}
elif first_curr_key.startswith(STATE_DICT_MODULE_KEY) and not first_load_key.startswith(STATE_DICT_MODULE_KEY):
return {STATE_DICT_MODULE_KEY + k: v for k, v in load_state_dict.items()}
else:
return load_state_dict
class BaseModel(nn.Module):
"""Base class for all models"""
def __str__(self) -> str:
return f"{super().__str__()}\n" \
f"Trainable parameters: {sum(np.prod(p.size()) for p in self.parameters() if p.requires_grad)}"
class FrozenInTime(BaseModel):
def __init__(self, video_params: Dict[str, Any], text_params: Dict[str, Any], projection_dim: int = 256,
load_checkpoint: Optional[TYPE_PATH] = None, projection: Literal["", "minimal"] = "minimal",
load_temporal_fix: Literal["zeros", "interp", "bilinear"] = "zeros") -> None:
super().__init__()
self.video_params = video_params
self.text_params = text_params
self.load_temporal_fix = load_temporal_fix
if not text_params["pretrained"]:
raise ValueError("HuggingFace text models require `pretrained` init.")
transformers_modeling_utils_logger = logging.getLogger("transformers.modeling_utils")
transformers_modeling_utils_logger.disabled = True
self.text_model = AutoModel.from_pretrained(text_params["model"])
transformers_modeling_utils_logger.disabled = False
pretrained = video_params["pretrained"]
if video_params["model"] == "SpaceTimeTransformer":
num_frames = video_params.get("num_frames", 4)
time_init = video_params.get("time_init", "zeros")
attention_style = video_params.get("attention_style", "frozen-in-time")
arch_config = video_params.get("arch_config", "base_patch16_224")
if arch_config == "base_patch16_224":
vit_model = timm.models.vision_transformer.vit_base_patch16_224(pretrained=pretrained)
model = SpaceTimeTransformer(num_frames=num_frames, time_init=time_init,
attention_style=attention_style)
else:
raise ValueError(f"Unrecognized arch_config: {arch_config}")
model.head = nn.Identity()
model.pre_logits = nn.Identity()
ftr_dim = model.embed_dim
if not load_checkpoint:
vit_checkpoint = vit_model.state_dict()
model.load_state_dict(vit_checkpoint, strict=False)
self.video_model = model
else:
raise ValueError(f"{video_params['model']} not supported")
# for backwards compatibility (old models)
self.video_model.fc = nn.Identity()
# Project to a common embedding
if projection == "minimal":
txt_proj = nn.Sequential(nn.ReLU(), nn.Linear(self.text_model.config.hidden_size, projection_dim))
vid_proj = nn.Sequential(nn.Linear(ftr_dim, projection_dim))
elif projection == "":
txt_proj = nn.Identity()
vid_proj = nn.Identity()
else:
raise ValueError(f"Unrecognized projection: {projection}")
self.txt_proj = txt_proj
self.vid_proj = vid_proj
if load_checkpoint:
load_checkpoint = cached_path(load_checkpoint)
# To make pickle work with a missing module and class. See https://stackoverflow.com/a/2121918/1165181
sys.modules["parse_config"] = frozen_in_time_stub
LOGGER.info("Loading frozen-in-time checkpoint…")
# `map_location="cpu"` to avoid bloating GPU=0 with each process' copy of it.
checkpoint = torch.load(load_checkpoint, map_location="cpu")
del sys.modules["parse_config"]
state_dict = checkpoint["state_dict"]
new_state_dict = state_dict_data_parallel_fix(state_dict, self.state_dict())
new_state_dict = self._inflate_positional_embeds(new_state_dict)
self.load_state_dict(new_state_dict, strict=True) # noqa
LOGGER.info("Checkpoint loaded.")
def forward(self, data: Mapping[str, Any], return_embeds: bool = True) -> Union[torch.Tensor,
Tuple[torch.Tensor, torch.Tensor]]:
text_data = data["text"]
video_data = data["video"]
text_embeddings = self.compute_text(text_data)
video_embeddings = self.compute_video(video_data)
if return_embeds:
return text_embeddings, video_embeddings
return sim_matrix(text_embeddings, video_embeddings)
def compute_text(self, text_data: Mapping[str, Any]) -> torch.Tensor:
if self.text_params["model"].startswith("bert"):
text_embeddings = self.text_model(text_data["input_ids"], attention_mask=text_data["attention_mask"])[
"pooler_output"]
elif self.text_params["model"].startswith("distilbert"):
text_embeddings = self.text_model(**text_data).last_hidden_state[:, 0, :]
else:
raise ValueError(f"Unrecognized text model: {self.text_params['model']}")
return self.txt_proj(text_embeddings)
def compute_video(self, video_data: Mapping[str, Any]) -> torch.Tensor:
video_embeddings = self.video_model(video_data)
return self.vid_proj(video_embeddings)
def _inflate_positional_embeds(self, new_state_dict: MutableMapping[str, Any]) -> Mapping[str, Any]:
# allow loading of timesformer with fewer num_frames
curr_keys = set(self.state_dict().keys())
if "video_model.temporal_embed" in new_state_dict and "video_model.temporal_embed" in curr_keys:
load_temporal_embed = new_state_dict["video_model.temporal_embed"]
load_num_frames = load_temporal_embed.shape[1]
curr_num_frames = self.video_params["num_frames"]
embed_dim = load_temporal_embed.shape[2]
if load_num_frames != curr_num_frames:
if load_num_frames > curr_num_frames:
LOGGER.warning(f"The loaded {self.video_params['model']} model has MORE frames than the current "
f"one. Loading weights, filling in the extras via {self.load_temporal_fix}")
new_temporal_embed = load_temporal_embed[:, :curr_num_frames, :]
else:
LOGGER.warning(f"The loaded {self.video_params['model']} model has FEWER frames than the current "
f"one. Loading weights, filling in the extras via {self.load_temporal_fix}")
if self.load_temporal_fix == "zeros":
new_temporal_embed = torch.zeros([load_temporal_embed.shape[0], curr_num_frames, embed_dim])
new_temporal_embed[:, :load_num_frames] = load_temporal_embed
elif self.load_temporal_fix in ["interp", "bilinear"]:
# interpolate
# unsqueeze so pytorch thinks it's an image
mode = "nearest"
if self.load_temporal_fix == "bilinear":
mode = "bilinear"
load_temporal_embed = load_temporal_embed.unsqueeze(0)
new_temporal_embed = F.interpolate(load_temporal_embed,
(curr_num_frames, embed_dim), mode=mode).squeeze(0)
else:
raise ValueError(f"Unrecognized load_temporal_fix: {self.load_temporal_fix}")
new_state_dict["video_model.temporal_embed"] = new_temporal_embed
# allow loading with smaller spatial patches. assumes custom border crop, to append the
# border patches to the input sequence
if "video_model.pos_embed" in new_state_dict and "video_model.pos_embed" in curr_keys:
load_pos_embed = new_state_dict["video_model.pos_embed"]
load_num_patches = load_pos_embed.shape[1]
curr_pos_embed = self.state_dict()["video_model.pos_embed"]
if load_num_patches != curr_pos_embed.shape[1]:
raise ValueError(
"Loading models with different spatial resolution / patch number not yet implemented, sorry.")
return new_state_dict
def sim_matrix(a: torch.Tensor, b: torch.Tensor, eps: float = 1e-8) -> torch:
a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n)) # noqa
b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n)) # noqa
return torch.mm(a_norm, b_norm.transpose(0, 1))
| 9,799 | 49.515464 | 119 | py |
fitclip | fitclip-main/aligner/encoder/frozen_in_time_video_text_encoder.py | import os
from typing import Iterable, Iterator
import torch
from overrides import overrides
from torchvision import transforms as T
from transformers import AutoTokenizer
from aligner.data.frame_sampler import FrameSampler, RandomFromUniformIntervalsFrameSampler, UniformFrameSampler
from aligner.encoder.frozen_in_time import FrozenInTime
from aligner.encoder.video_encoder import TYPE_TRANSFORM, TYPE_VIDEO_INPUT, float_standard_denormalize
from aligner.encoder.video_text_encoder import TYPE_TEXT_INPUT, TYPE_TOKENIZER, VideoTextEncoder
from aligner.transforms import ConvertBHWCtoBCHW, RandomResizedCropWithRandomInterpolation
def _normalize(t: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
return t / torch.max(t.norm(dim=1, keepdim=True), eps * torch.ones_like(t)) # noqa
class FrozenInTimeVideoTextEncoder(VideoTextEncoder):
# FIXME: set the max tokens by default as in CLIP, also to avoid spending too much memory when using prompts.
def __init__(self, model: FrozenInTime, image_size: int = 224, num_frames: int = 4, max_tokens: int = 77) -> None:
super().__init__()
self.model = model
os.environ["TOKENIZERS_PARALLELISM"] = "0"
self.tokenizer = AutoTokenizer.from_pretrained(model.text_params["model"])
self.normalize = T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
self.image_size = image_size
self.num_frames = num_frames
self.max_tokens = max_tokens
@overrides(check_signature=False)
def encode_video(self, video: TYPE_VIDEO_INPUT, eps: float = 1e-8) -> torch.Tensor:
return _normalize(self.model.compute_video(video), eps=eps)
@overrides(check_signature=False)
def encode_text(self, text: TYPE_TEXT_INPUT, eps: float = 1e-8) -> torch.Tensor:
return _normalize(self.model.compute_text(text), eps=eps)
def _tokenize(self, texts: Iterable[str]) -> TYPE_TEXT_INPUT:
texts = texts if isinstance(texts, (list, tuple)) else list(texts)
return self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True, max_length=self.max_tokens)
@overrides
def get_tokenizer(self) -> TYPE_TOKENIZER:
return self._tokenize
@overrides
def decode_text(self, text: TYPE_TEXT_INPUT) -> Iterator[str]:
return self.tokenizer.batch_decode(text["input_ids"], skip_special_tokens=True)
@overrides
def get_train_frame_sampler(self) -> FrameSampler:
return RandomFromUniformIntervalsFrameSampler(self.num_frames)
@overrides
def get_eval_frame_sampler(self) -> FrameSampler:
return UniformFrameSampler(self.num_frames)
@overrides
def get_train_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
return T.Compose([
ConvertBHWCtoBCHW(),
T.ConvertImageDtype(dtype),
RandomResizedCropWithRandomInterpolation(self.image_size, scale=(0.5, 1.0)),
T.RandomHorizontalFlip(),
self.normalize,
])
@overrides
def get_eval_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
return T.Compose([
ConvertBHWCtoBCHW(),
T.ConvertImageDtype(dtype),
T.Resize(self.image_size),
T.CenterCrop(self.image_size),
self.normalize,
])
@property
@overrides
def should_pad_batch(self) -> bool:
return True
@overrides
def to_bchw(self, t: torch.Tensor) -> torch.Tensor:
return t
@overrides
def denormalize_video_tensor(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
return float_standard_denormalize(video, mean=self.normalize.mean, std=self.normalize.std)
| 3,686 | 37.810526 | 118 | py |
fitclip | fitclip-main/aligner/encoder/videoclip.py | import torch
import torch.utils.checkpoint
from torch import nn
from transformers import AutoConfig, BertModel, BertPreTrainedModel
from transformers.activations import ACT2FN
from transformers.models.bert.modeling_bert import BertEmbeddings, BertEncoder
class VideoTokenMLP(nn.Module):
def __init__(self, config):
super().__init__()
input_dim = config.input_dim if hasattr(config, "input_dim") else 512
self.linear1 = nn.Linear(input_dim, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size)
self.activation = ACT2FN[config.hidden_act]
self.linear2 = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.linear1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
hidden_states = self.linear2(hidden_states)
return hidden_states
class MMBertEmbeddings(BertEmbeddings):
def __init__(self, config):
super().__init__(config)
self.max_video_len = config.max_video_len
if hasattr(config, "use_seg_emb") and config.use_seg_emb:
"""the original VLM paper uses seg_embeddings for temporal space.
although not used it changed the randomness of initialization.
we keep it for reproducibility.
"""
self.seg_embeddings = nn.Embedding(256, config.hidden_size)
def forward( # noqa
self,
input_ids,
input_video_embeds,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
input_tensor = input_ids if input_ids is not None else inputs_embeds
if input_video_embeds is not None:
input_shape = (
input_tensor.size(0),
input_tensor.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (input_tensor.size(0), input_tensor.size(1))
if position_ids is None:
"""
Auto skip position embeddings for text only case.
use cases:
(1) action localization and segmentation:
feed in len-1 dummy video token needs text part to
skip input_video_embeds.size(1) for the right
position_ids for video [SEP] and rest text tokens.
(2) MMFusionShare for two forward passes:
in `forward_text`: input_video_embeds is None.
need to skip video [SEP] token.
# video_len + 1: [CLS] + video_embed
# self.max_video_len + 1: [SEP] for video.
# self.max_video_len + 2: [SEP] for video.
# self.max_video_len + input_ids.size(1): rest for text.
"""
if input_video_embeds is not None:
video_len = input_video_embeds.size(1)
starting_offset = self.max_video_len + 1 # video [SEP]
ending_offset = self.max_video_len + input_ids.size(1)
else:
video_len = 0
starting_offset = self.max_video_len + 2 # first text token.
ending_offset = self.max_video_len + input_ids.size(1) + 1
position_ids = torch.cat([
self.position_ids[:, :video_len + 1],
self.position_ids[:, starting_offset:ending_offset]
], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(
input_shape, dtype=torch.long, device=self.position_ids.device
)
"""
the format of input_ids is [CLS] [SEP] caption [SEP] padding.
the goal is to build [CLS] video tokens [SEP] caption [SEP] .
"""
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if input_video_embeds is not None:
inputs_mm_embeds = torch.cat([
inputs_embeds[:, :1], input_video_embeds, inputs_embeds[:, 1:]
], dim=1)
else:
# text only for `MMFusionShare`.
inputs_mm_embeds = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_mm_embeds + position_embeddings
embeddings += token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MultiLayerAttentionMaskBertEncoder(BertEncoder):
"""extend BertEncoder with the capability of
multiple layers of attention mask."""
def forward( # noqa
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_attention_mask = (
attention_mask[:, i, :, :, :]
if attention_mask.dim() == 5
else attention_mask
)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
layer_attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
layer_attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(
v
for v in [hidden_states, all_hidden_states, all_attentions]
if v is not None
)
class MMBertModel(BertModel):
"""MMBertModel has MMBertEmbedding to support video tokens."""
def __init__(self, config, add_pooling_layer=True): # noqa
super().__init__(config)
# overwrite embedding
self.embeddings = MMBertEmbeddings(config)
self.encoder = MultiLayerAttentionMaskBertEncoder(config)
self.init_weights() # noqa
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
separate_forward_split=None,
):
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions # noqa
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states # noqa
)
return_dict = (
return_dict if return_dict is not None
else self.config.use_return_dict # noqa
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids "
"and inputs_embeds at the same time"
)
elif input_ids is not None:
if input_video_embeds is not None:
input_shape = (
input_ids.size(0),
input_ids.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (
input_ids.size(0),
input_ids.size(1),
)
elif inputs_embeds is not None:
if input_video_embeds is not None:
input_shape = (
inputs_embeds.size(0),
inputs_embeds.size(1) + input_video_embeds.size(1),
)
else:
input_shape = (
input_ids.size(0),
input_ids.size(1),
)
else:
raise ValueError(
"You have to specify either input_ids or inputs_embeds")
device = (inputs_embeds if input_ids is None else input_ids).device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions
# [batch_size, from_seq_length, to_seq_length]
# ourselves in which case
# we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = \
self.get_extended_attention_mask(
attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to
# [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None: # noqa
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (
encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(
encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask( # noqa
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or
# [num_hidden_layers x num_heads]
# and head_mask is converted to shape
# [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask( # noqa
head_mask, self.config.num_hidden_layers) # noqa
embedding_output = self.embeddings(
input_ids,
input_video_embeds,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
if separate_forward_split is not None:
split_embedding_output = \
embedding_output[:, :separate_forward_split]
split_extended_attention_mask = extended_attention_mask[
:, :, :, :separate_forward_split, :separate_forward_split
]
split_encoder_outputs = self.encoder(
split_embedding_output,
attention_mask=split_extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
assert (
len(split_encoder_outputs) <= 2
), "we do not support merge on attention for now."
encoder_outputs = [[split_encoder_outputs[0]]]
if len(split_encoder_outputs) == 2:
encoder_outputs.append([])
for _all_hidden_states in split_encoder_outputs[1]:
encoder_outputs[-1].append([_all_hidden_states])
split_embedding_output = \
embedding_output[:, separate_forward_split:]
split_extended_attention_mask = extended_attention_mask[
:, :, :, separate_forward_split:, separate_forward_split:
]
split_encoder_outputs = self.encoder(
split_embedding_output,
attention_mask=split_extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
assert len(split_encoder_outputs) <= 2, "we do not support merge on attention for now."
encoder_outputs[0].append(split_encoder_outputs[0])
encoder_outputs[0] = torch.cat(encoder_outputs[0], dim=1)
if len(split_encoder_outputs) == 2:
for layer_idx, _all_hidden_states in enumerate(
split_encoder_outputs[1]
):
encoder_outputs[1][layer_idx].append(_all_hidden_states)
encoder_outputs[1][layer_idx] = torch.cat(
encoder_outputs[1][layer_idx], dim=1
)
encoder_outputs = tuple(encoder_outputs)
else:
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = None if self.pooler is None else self.pooler(sequence_output) # noqa
return (sequence_output, pooled_output) + encoder_outputs[1:]
def get_extended_attention_mask(self, attention_mask, input_shape, device):
"""This is borrowed from `modeling_utils.py` with the support of
multi-layer attention masks.
The second dim is expected to be number of layers.
See `MMAttentionMaskProcessor`.
Makes broadcastable attention and causal masks so that future
and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to,
zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] ourselves
# in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 4:
extended_attention_mask = attention_mask[:, :, None, :, :]
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # noqa; fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
else:
return super().get_extended_attention_mask(attention_mask, input_shape, device) # noqa
class MMBertForEncoder(BertPreTrainedModel):
"""A BertModel for Contrastive Learning."""
def __init__(self, config):
super().__init__(config)
self.videomlp = VideoTokenMLP(config)
self.bert = MMBertModel(config)
self.init_weights() # noqa
def forward(
self,
input_ids=None,
input_video_embeds=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = self.config.use_return_dict if return_dict is None else return_dict # noqa
video_tokens = None if input_video_embeds is None else self.videomlp(input_video_embeds)
return self.bert(input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, # noqa
position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds,
output_attentions=output_attentions, output_hidden_states=output_hidden_states,
return_dict=return_dict)
class MMFusion(nn.Module):
"""a MMPT wrapper class for MMBert style models.
TODO: move isolated mask to a subclass.
"""
def __init__(self, max_video_len: int = 32, last_iso_layer: int = 12, num_hidden_video_layers: int = 6):
super().__init__()
self.model_name = "bert-base-uncased"
transformer_config = AutoConfig.from_pretrained(self.model_name)
self.hidden_size = transformer_config.hidden_size
self.is_train = False
# 0 means no iso; 1-12 means iso up to that layer.
self.num_hidden_layers = transformer_config.num_hidden_layers
self.last_iso_layer = last_iso_layer
model_config = AutoConfig.from_pretrained(self.model_name)
model_config.max_video_len = max_video_len
# TODO: make each model a set of config class.
if hasattr(model_config, "num_layers"):
model_config.num_layers = num_hidden_video_layers
else:
model_config.num_hidden_layers = num_hidden_video_layers
self.video_encoder = MMBertForEncoder.from_pretrained(self.model_name, config=model_config)
# exact same NLP model from HuggingFace transformer.
self.text_encoder = AutoConfig.from_pretrained("bert-base-uncased")
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
**kwargs
):
raise NotImplementedError(
"Please derive MMFusion module."
)
def _mm_on_the_fly(
self,
cmasks,
vmasks,
attention_mask
):
"""helper function for mask, seg_ids and token_type_ids."""
if attention_mask is None:
attention_mask = self._mm_attention_mask(cmasks, vmasks)
"""
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
"""
token_type_ids = torch.cat([
torch.zeros((vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device),
torch.ones((cmasks.size(0), cmasks.size(1) - 2), dtype=torch.long, device=cmasks.device)], dim=1)
return attention_mask, token_type_ids
def _mm_attention_mask(self, cmasks, vmasks):
assert cmasks.size(0) == vmasks.size(0), "{}, {}, {}, {}".format(
str(cmasks.size()),
str(vmasks.size()),
str(cmasks.size(0)),
str(vmasks.size(0)),
)
mm_mask = torch.cat([cmasks[:, :1], vmasks, cmasks[:, 1:]], dim=1)
if self.last_iso_layer == 0:
# hard attention mask.
return mm_mask
else:
# a gpu iso mask; 0 : num_iso_layer is isolated;
# num_iso_layer: are MM-fused.
# make an iso layer
batch_size = cmasks.size(0)
iso_mask = self._make_iso_mask(batch_size, cmasks, vmasks)
mm_mask = mm_mask[:, None, :].repeat(1, mm_mask.size(-1), 1)
iso_mm_masks = []
# hard attention mask.
iso_mask = iso_mask[:, None, :, :].repeat(1, self.last_iso_layer, 1, 1)
iso_mm_masks.append(iso_mask)
if self.last_iso_layer < self.num_hidden_layers:
mm_mask = mm_mask[:, None, :, :].repeat(1, self.num_hidden_layers - self.last_iso_layer, 1, 1)
iso_mm_masks.append(mm_mask)
iso_mm_masks = torch.cat(iso_mm_masks, dim=1)
return iso_mm_masks
def _make_iso_mask(self, batch_size, cmasks, vmasks): # noqa
cls_self_mask = torch.cat(
[
torch.ones(
(batch_size, 1), dtype=torch.bool, device=cmasks.device),
torch.zeros(
(batch_size, cmasks.size(1) + vmasks.size(1) - 1),
dtype=torch.bool, device=cmasks.device)
], dim=1)
iso_video_mask = torch.cat(
[
# [CLS] is not used.
torch.zeros(
(batch_size, 1), dtype=torch.bool, device=cmasks.device
),
vmasks,
# assume to be 1.
cmasks[:, 1:2],
# 2 means [CLS] + [SEP]
torch.zeros(
(batch_size, cmasks.size(1) - 2),
dtype=torch.bool,
device=cmasks.device,
),
],
dim=1,
)
iso_text_mask = torch.cat(
[
torch.zeros(
(batch_size, 2 + vmasks.size(1)),
dtype=torch.bool,
device=cmasks.device,
), # [CLS] is not used.
cmasks[:, 2:], # assume to be 1.
],
dim=1,
)
cls_self_mask = cls_self_mask[:, None, :]
iso_video_mask = iso_video_mask[:, None, :].repeat(
1, vmasks.size(1) + 1, 1)
iso_text_mask = iso_text_mask[:, None, :].repeat(
1, cmasks.size(1) - 2, 1)
return torch.cat([cls_self_mask, iso_video_mask, iso_text_mask], dim=1)
def _pooling_vt_layer(
self,
layered_sequence_output,
cmasks,
vmasks
):
layer_idx = self.last_iso_layer \
if self.last_iso_layer > 0 else self.num_hidden_layers
hidden_state = layered_sequence_output[layer_idx]
# also output pooled_video and pooled_text.
batch_size = cmasks.size(0)
# pool the modality.
text_offset = vmasks.size(1) + 2 # [CLS] + [SEP]
# video tokens + [SEP]
video_outputs = hidden_state[:, 1:text_offset]
video_attention_mask = torch.cat(
[
vmasks,
torch.ones((batch_size, 1), dtype=torch.bool, device=vmasks.device),
],
dim=1,
)
assert video_outputs.size(1) == video_attention_mask.size(1)
pooled_video = (torch.sum(video_outputs * video_attention_mask.unsqueeze(-1), dim=1)
/ video_attention_mask.sum(1, keepdim=True))
# pooled_video = torch.mean(video_outputs[0], dim=1)
# text tokens + [SEP]
text_attention_mask = cmasks[:, 2:]
text_outputs = hidden_state[:, text_offset:]
assert text_outputs.size(1) == text_attention_mask.size(1)
pooled_text = torch.sum(
text_outputs * text_attention_mask.unsqueeze(-1), dim=1
) / text_attention_mask.sum(1, keepdim=True)
return pooled_video, pooled_text
class MMFusionSeparate(MMFusion):
def forward(
self,
caps,
cmasks,
vfeats,
vmasks,
attention_mask=None,
video_label=None,
text_label=None,
output_hidden_states=False,
**kwargs
):
pooled_video = self.forward_video(
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states
)
pooled_text = self.forward_text(
caps,
cmasks,
output_hidden_states
)
return {"pooled_video": pooled_video, "pooled_text": pooled_text}
def forward_video(
self,
vfeats,
vmasks,
caps,
cmasks,
output_hidden_states=False,
**kwargs # noqa
):
input_ids = caps[:, :2]
attention_mask = torch.cat([cmasks[:, :1], vmasks, cmasks[:, 1:2]], dim=1)
token_type_ids = torch.zeros(
(vmasks.size(0), vmasks.size(1) + 2),
dtype=torch.long,
device=vmasks.device)
outputs = self.video_encoder(
input_ids=input_ids,
input_video_embeds=vfeats,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
video_outputs = outputs[0]
if output_hidden_states:
return video_outputs
batch_size = cmasks.size(0)
video_attention_mask = torch.cat([torch.zeros((batch_size, 1), dtype=torch.bool, device=vmasks.device),
vmasks, torch.ones((batch_size, 1), dtype=torch.bool, device=vmasks.device)],
dim=1)
assert video_outputs.size(1) == video_attention_mask.size(1)
video_attention_mask = video_attention_mask.type(video_outputs.dtype) / video_attention_mask.sum(1,
keepdim=True)
return torch.bmm(video_outputs.transpose(2, 1), video_attention_mask.unsqueeze(2)).squeeze(-1)
def forward_text(
self,
caps,
cmasks,
output_hidden_states=False,
**kwargs # noqa
):
input_ids = torch.cat([
caps[:, :1], caps[:, 2:],
], dim=1)
attention_mask = torch.cat([
cmasks[:, :1],
cmasks[:, 2:]
], dim=1)
# different from sharing, we use all-0 type.
token_type_ids = torch.zeros(
(cmasks.size(0), cmasks.size(1) - 1),
dtype=torch.long,
device=cmasks.device)
outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)
text_outputs = outputs[0]
if output_hidden_states:
return text_outputs
batch_size = caps.size(0)
# text tokens + [SEP]
text_attention_mask = torch.cat([torch.zeros((batch_size, 1), dtype=torch.bool, device=cmasks.device),
cmasks[:, 2:]], dim=1)
assert text_outputs.size(1) == text_attention_mask.size(1)
text_attention_mask = text_attention_mask.type(text_outputs.dtype) / text_attention_mask.sum(1, keepdim=True)
return torch.bmm(text_outputs.transpose(2, 1), text_attention_mask.unsqueeze(2)).squeeze(-1)
| 28,028 | 38.256303 | 119 | py |
fitclip | fitclip-main/aligner/encoder/video_transformer.py | # From https://github.com/m-bain/frozen-in-time/blob/ba54e43/model/video_transformer.py
"""
Implementations of Video Transformers in PyTorch
A PyTorch implementation of space-time transformer as described in
'Frozen in Time: A Joint Image and Video Encoder for End-to-End Retrieval' - https://arxiv.org/abs/2104.00650
A PyTorch implementation of timesformer as described in
'Is Space-Time Attention All You Need for Video Understanding?' - https://arxiv.org/abs/2102.05095
Acknowledgments:
- This code builds on Ross Wightman's vision_transformer code in pytorch-image-models:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
- It is also inspired by lucidrains timesformer implementation:
https://github.com/lucidrains/TimeSformer-pytorch
Hacked together by Max Bain
"""
from collections import OrderedDict
from functools import partial
import torch
from einops import rearrange, repeat
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from torch import einsum, nn
def attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class VideoPatchEmbed(nn.Module):
""" Video to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768,
num_frames=8):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * num_frames
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.num_frames = num_frames
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, F, C, H, W = x.shape
assert F <= self.num_frames
x = x.view(-1, C, H, W)
x = self.proj(x)
return x
class VarAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
initialize='random'):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
if initialize == 'zeros':
self.qkv.weight.data.fill_(0)
self.qkv.bias.data.fill_(0)
# fill proj weight with 1 here to improve training dynamics. Otherwise temporal attention inputs
# are multiplied by 0*0, which is hard for the model to move out of.
self.proj.weight.data.fill_(1)
self.proj.bias.data.fill_(0)
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, einops_from, einops_to, **einops_dims):
h = self.num_heads
# project x to q, k, v values
q, k, v = self.qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
q *= self.scale
# splice out CLS token at index 1
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_))
# expand cls token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim=1)
v_ = torch.cat((cls_v, v_), dim=1)
# attention
out = attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim=1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
# to out
x = self.proj(out)
x = self.proj_drop(x)
return x
class SpaceTimeBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, time_init='zeros',
attention_style='frozen-in-time'):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = VarAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.timeattn = VarAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,
initialize=time_init)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.norm3 = norm_layer(dim)
self.attention_style = attention_style
def forward(self, x, einops_from_space, einops_to_space, einops_from_time, einops_to_time,
time_n, space_f):
time_output = self.timeattn(self.norm3(x), einops_from_time, einops_to_time, n=time_n)
time_residual = x + time_output
space_output = self.attn(self.norm1(time_residual), einops_from_space,
einops_to_space, f=space_f)
if self.attention_style == 'frozen-in-time':
space_residual = x + self.drop_path(space_output)
else:
raise NotImplementedError
x = space_residual + self.drop_path(self.mlp(self.norm2(space_residual)))
return x
class SpaceTimeTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `Space-Time Transformer` from Frozen-in-time - by Max Bain.
https://arxiv.org/abs/2104.00650
Based off:
- ViT implementation from the timm library
[https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py]
lucidrains timesformer implementation [https://github.com/lucidrains/TimeSformer-pytorch].
Notable differences:
- allows for variable length input frames (<= num_frames)
- allows for variable length input resolution (<= (img_size, img_size)) [UNTESTED]
- different attention block mechanism
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None,
num_frames=8, time_init='rand', attention_style='frozen-in-time'):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
num_frames: (int) maximum number of frames expected as input
time_init: (str) how to initialise the time attention layer, 'zeros' allows for the timesformer to start off
as ViT.
attention_style: (str) how to attend to space and time.
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_frames = num_frames
self.embed_dim = embed_dim
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
# print("######USING ATTENTION STYLE: ", attention_style)
if hybrid_backbone is not None:
raise NotImplementedError('hybrid backbone not implemented')
else:
self.patch_embed = VideoPatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=num_frames)
num_patches = self.patch_embed.num_patches
self.patches_per_frame = num_patches // num_frames
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.patches_per_frame + 1,
embed_dim)) # remember to take pos_embed[1:] for tiling over time
self.temporal_embed = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
SpaceTimeBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, time_init=time_init,
attention_style=attention_style)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# if num_frames > 1, then we perform ViT inflation and initialise time attention to zero so not necessary.
if num_frames == 1:
self.apply(self._init_weights)
# einops transformations
self.einops_from_space = 'b (f n) d'
self.einops_to_space = '(b f) n d'
self.einops_from_time = 'b (f n) d'
self.einops_to_time = '(b n) f d'
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
b, curr_frames, channels, _, _ = x.shape
x = self.patch_embed(x)
x = x.flatten(2).transpose(2, 1)
x = x.reshape(b, -1, self.patch_embed.embed_dim)
BF = x.shape[0]
cls_tokens = self.cls_token.expand(BF, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
# positional embed needs to be tiled for each frame (this does [1,2,3] --> [1,2,3,1,2,3]...)
cls_embed = self.pos_embed[:, 0, :].unsqueeze(1)
tile_pos_embed = self.pos_embed[:, 1:, :].repeat(1, self.num_frames, 1)
# temporal embed needs to be repeated within each frame (this does [1,2,3] --> [1,1,1,2,2,2,3,3,3]...)
tile_temporal_embed = self.temporal_embed.repeat_interleave(self.patches_per_frame, 1)
total_pos_embed = tile_pos_embed + tile_temporal_embed
total_pos_embed = torch.cat([cls_embed, total_pos_embed], dim=1)
curr_patches = x.shape[1]
x = x + total_pos_embed[:, :curr_patches]
x = self.pos_drop(x)
n = self.patches_per_frame
f = curr_frames
for blk in self.blocks:
x = blk(x, self.einops_from_space, self.einops_to_space, self.einops_from_time,
self.einops_to_time,
time_n=n, space_f=f)
x = self.norm(x)[:, 0]
x = self.pre_logits(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
| 14,253 | 40.800587 | 120 | py |
fitclip | fitclip-main/aligner/encoder/clip_video_text_encoder.py | import os.path
import shutil
import tempfile
from typing import Iterable, Iterator, Tuple
import torch
from cached_path import cached_path
from clip import clip
from clip.model import CLIP
from overrides import overrides
from torch import nn
from torchvision import transforms as T
from aligner.data.frame_sampler import FrameSampler, RandomFromUniformIntervalsFrameSampler, UniformFrameSampler
from aligner.encoder.video_encoder import TYPE_TRANSFORM, float_standard_denormalize
from aligner.encoder.video_text_encoder import TYPE_TEXT_INPUT, TYPE_TOKENIZER, TYPE_VIDEO_INPUT, VideoTextEncoder
from aligner.transforms import ConvertBHWCtoBCHW, RandomResizedCropWithRandomInterpolation
# By default, `clip.load` uses part in half and part in single precision for GPU.
# But this may cause issues with the teacher-student model, and we can actually control it from the trainer.
def load_clip_in_float32(*args, **kwargs) -> Tuple[nn.Module, TYPE_TRANSFORM]:
model, transform = clip.load(*args, **kwargs)
model.float()
return model, transform
# Necessary to use from Hydra so to get the first element of the tuple from `clip.load`.
# It also does more stuff than `clip.load`.
def load_clip_model(name: str, *args, **kwargs) -> nn.Module:
temp_filepaths = []
try:
if "://" in name:
name = cached_path(name)
elif os.path.exists(name) and not os.path.isdir(name) and not os.path.isfile(name):
# It could be a pipe. It could be created by a process substitution.
# We copy it to a file because `clip.load` has a check that it's a file (and hence not a pipe).
with tempfile.NamedTemporaryFile(delete=False) as output_file, open(name, "rb") as input_file:
shutil.copyfileobj(input_file, output_file)
name = output_file.name
temp_filepaths.append(name)
# We don't use the logic scale from CLIP but ours, so it may not exist. Here we need to re-create the variable,
# so it doesn't fail when loading this `state_dict`.
if os.path.exists(name): # It doesn't apply if it's a model name.
state_dict = torch.load(name)
if "logit_scale" not in state_dict:
state_dict["logit_scale"] = torch.tensor(float("nan"))
with tempfile.NamedTemporaryFile(delete=False) as file:
# We create a new file to respect the original one.
torch.save(state_dict, file)
name = file.name
temp_filepaths.append(name)
if not args: # If `args` is not empty, then `device` was set for `clip.load`.
kwargs.setdefault("device", "cpu") # To avoid bloating GPU 0 with each process' copy of it.
return load_clip_in_float32(name, *args, **kwargs)[0]
finally:
for path in temp_filepaths:
os.remove(path)
def _tokenize(texts: Iterable[str]) -> TYPE_TEXT_INPUT:
return {"input_ids": clip.tokenize(texts, truncate=True)} # noqa
class ClipVideoTextEncoder(VideoTextEncoder):
def __init__(self, model: CLIP, num_frames: int = 4) -> None:
super().__init__()
self.model = model
self.normalize = T.Normalize(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
self.num_frames = num_frames
# Indirectly unregister the param as we don't use it and would otherwise give problems while training.
if hasattr(self.model, "logit_scale"):
delattr(self.model, "logit_scale")
@overrides
def encode_video(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
batch_size = video.shape[0]
images = video.view(-1, *video.shape[2:])
encoded_video = self.model.encode_image(images)
encoded_video = encoded_video / encoded_video.norm(dim=-1, keepdim=True)
# Averaging the representations is the same as averaging the predictions:
# <t, (i1+i2)/2> = 1/2 <t, i1+i2> = (<t, i1> + <t, i2>) / 2
return encoded_video.view(batch_size, -1, *encoded_video.shape[1:]).mean(dim=1)
@overrides
def encode_text(self, text: TYPE_TEXT_INPUT) -> torch.Tensor:
encoded_texts = self.model.encode_text(text["input_ids"])
return encoded_texts / encoded_texts.norm(dim=-1, keepdim=True)
@overrides
def get_tokenizer(self) -> TYPE_TOKENIZER:
return _tokenize
@overrides
def decode_text(self, text: TYPE_TEXT_INPUT) -> Iterator[str]:
for text_instance in text:
yield clip._tokenizer.decode(text_instance["input_ids"])
@overrides
def get_train_frame_sampler(self) -> FrameSampler:
return RandomFromUniformIntervalsFrameSampler(self.num_frames)
@overrides
def get_eval_frame_sampler(self) -> FrameSampler:
return UniformFrameSampler(self.num_frames)
@overrides
def get_train_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
size = self.model.visual.input_resolution
return T.Compose([
ConvertBHWCtoBCHW(),
T.ConvertImageDtype(dtype),
RandomResizedCropWithRandomInterpolation(size, scale=(0.5, 1.0)),
T.RandomHorizontalFlip(),
self.normalize,
])
@overrides
def get_eval_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
size = self.model.visual.input_resolution
return T.Compose([
ConvertBHWCtoBCHW(),
T.ConvertImageDtype(dtype),
T.Resize(size, interpolation=T.InterpolationMode.BICUBIC),
T.CenterCrop(size),
self.normalize,
])
@property
@overrides
def should_pad_batch(self) -> bool:
return True
@overrides
def to_bchw(self, t: torch.Tensor) -> torch.Tensor:
return t
@overrides
def denormalize_video_tensor(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
return float_standard_denormalize(video, mean=self.normalize.mean, std=self.normalize.std)
| 6,022 | 39.972789 | 120 | py |
fitclip | fitclip-main/aligner/encoder/slip_video_text_encoder.py | from typing import Iterable, Iterator, Union
import torch
from cached_path import cached_path
from overrides import overrides
from torchvision import transforms as T
from aligner.data.frame_sampler import FrameSampler, UniformFrameSampler
from aligner.encoder import slip
from aligner.encoder.slip import CLIP, SLIP, SimpleTokenizer
from aligner.encoder.video_encoder import TYPE_TRANSFORM, float_standard_denormalize
from aligner.encoder.video_text_encoder import TYPE_TEXT_INPUT, TYPE_TOKENIZER, TYPE_VIDEO_INPUT, VideoTextEncoder
from aligner.transforms import ConvertBHWCtoBCHW
from util.typing_utils import TYPE_PATH
def load_model(path: TYPE_PATH) -> Union[CLIP, SLIP]:
checkpoint = torch.load(cached_path(path), map_location="cpu")
args = checkpoint["args"]
model = getattr(slip, args.model)(rand_embed=False, ssl_mlp_dim=args.ssl_mlp_dim, ssl_emb_dim=args.ssl_emb_dim)
model.load_state_dict({k.replace("module.", ""): v for k, v in checkpoint["state_dict"].items()})
return model
class SlipVideoTextEncoder(VideoTextEncoder):
def __init__(self, model: Union[CLIP, SLIP], num_frames: int = 4) -> None:
super().__init__()
self.model = model
self.tokenizer = SimpleTokenizer()
self.num_frames = num_frames
# Indirectly unregister the param as we don't use it and would otherwise give problems while training.
if hasattr(self.model, "logit_scale"):
delattr(self.model, "logit_scale")
@overrides
def encode_video(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
batch_size = video.shape[0] # noqa
images = video.view(-1, *video.shape[2:])
encoded_video = self.model.encode_image(images)
encoded_video = encoded_video / encoded_video.norm(dim=-1, keepdim=True)
# Averaging the representations is the same as averaging the predictions:
# <t, (i1+i2)/2> = 1/2 <t, i1+i2> = (<t, i1> + <t, i2>) / 2
return encoded_video.view(batch_size, -1, *encoded_video.shape[1:]).mean(dim=1)
@overrides
def encode_text(self, text: TYPE_TEXT_INPUT) -> torch.Tensor:
encoded_texts = self.model.encode_text(text["input_ids"])
return encoded_texts / encoded_texts.norm(dim=-1, keepdim=True)
def _tokenize(self, texts: Iterable[str]) -> TYPE_TEXT_INPUT:
return {"input_ids": self.tokenizer(texts)}
@overrides
def get_tokenizer(self) -> TYPE_TOKENIZER:
return self._tokenize
@overrides
def decode_text(self, text: TYPE_TEXT_INPUT) -> Iterator[str]:
for text_instance in text:
yield self.tokenizer.decode(text_instance["input_ids"])
@overrides
def get_train_frame_sampler(self) -> FrameSampler:
raise NotImplementedError
@overrides
def get_eval_frame_sampler(self) -> FrameSampler:
return UniformFrameSampler(self.num_frames)
@overrides
def get_train_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
raise NotImplementedError
@overrides
def get_eval_transform(self, dtype: torch.dtype) -> TYPE_TRANSFORM:
size = 224
return T.Compose([
ConvertBHWCtoBCHW(),
T.ConvertImageDtype(dtype),
T.Resize(size),
T.CenterCrop(size),
T.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
])
@property
@overrides
def should_pad_batch(self) -> bool:
return True
@overrides
def to_bchw(self, t: torch.Tensor) -> torch.Tensor:
return t
@overrides
def denormalize_video_tensor(self, video: TYPE_VIDEO_INPUT) -> torch.Tensor:
return float_standard_denormalize(video, mean=self.normalize.mean, std=self.normalize.std)
| 3,736 | 36.37 | 115 | py |
fitclip | fitclip-main/aligner/data/youcook2.py | import os
from glob import iglob
from typing import Optional, Tuple
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_text_dataset import VideoTextDataset
from util.typing_utils import TYPE_PATH
VAL_VIDEO_INFO_FILE_PATH = "https://raw.githubusercontent.com/antoine77340/MIL-NCE_HowTo100M/master/csv/" \
"validation_youcook.csv"
# Videos can also be obtained from https://www.rocq.inria.fr/cluster-willow/amiech/Youcook2_val.zip!validation
VAL_VIDEOS_FOLDER = "/datasets/yc2_mil_nce_val/"
class YouCook2(VideoTextDataset):
def __init__(self, video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH, **kwargs) -> None:
self.video_info = pd.read_csv(cached_path(video_info_file_path), dtype={"task": str})
video_folder = cached_path(videos_folder)
video_paths = (next(iglob(os.path.join(video_folder, row.task, f"{row.video_id}.*")))
for _, row in self.video_info.iterrows())
super().__init__(video_paths=video_paths, **kwargs)
@overrides
def _get_target(self, video_idx: int) -> str:
return self.video_info.loc[video_idx, "text"]
@overrides
def _get_times(self, video_idx: int) -> Tuple[Optional[float], Optional[float]]:
row = self.video_info.loc[video_idx]
return row.start, row.end
class YouCook2DataModule(VideoTextDataModule): # noqa
def __init__(self, val_video_info_file_path: TYPE_PATH = VAL_VIDEO_INFO_FILE_PATH,
val_videos_folder: TYPE_PATH = VAL_VIDEOS_FOLDER, **kwargs) -> None:
super().__init__(**kwargs)
self.val_video_info_file_path = val_video_info_file_path
self.val_videos_folder = val_videos_folder
@overrides
def val_dataloader(self) -> DataLoader:
dataset = YouCook2(video_info_file_path=self.val_video_info_file_path, videos_folder=self.val_videos_folder,
**self._create_dataset_encoder_kwargs(train=False))
return self._create_dataloader(dataset, train=False)
| 2,183 | 41 | 116 | py |
fitclip | fitclip-main/aligner/data/moments_in_time.py | import functools
import os
from typing import Mapping, Tuple
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoClassificationDataModule
from aligner.data.video_dataset import VideoDataset
from util.typing_utils import TYPE_PATH
from util.video_utils import get_sorted_videos_in_folder
CATEGORIES_FILE_PATH = "/datasets/moments-in-time/moments_categories.txt"
VAL_VIDEO_INFO_FILE_PATH = "/datasets/moments-in-time/validationSet.csv"
VAL_VIDEOS_FOLDER = "/datasets/moments-in-time/validation"
class MomentsInTime(VideoDataset):
def __init__(self, categories: Mapping[str, int], video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH,
**kwargs) -> None:
super().__init__(video_paths=get_sorted_videos_in_folder(cached_path(videos_folder)), **kwargs)
self.categories = categories
self.video_info = pd.read_csv(cached_path(video_info_file_path), names=["path", "category", "agreement",
"disagreement"], index_col="path")
@functools.lru_cache
@overrides
def _get_video_id(self, video_idx: int) -> str:
path = self.video_paths[video_idx]
folder_path, filename = os.path.split(path)
folder_name = os.path.basename(folder_path)
return os.path.join(folder_name, filename)
@overrides
def _get_target(self, video_idx: int) -> Tuple[str, int]:
video_id = self._get_video_id(video_idx)
category = self.video_info.loc[video_id, "category"]
return category, self.categories[category]
class MomentsInTimeDataModule(VideoClassificationDataModule): # noqa
categories = {} # Necessary because it's an abstract property. See https://stackoverflow.com/a/42529760/1165181
def __init__(self, categories_file_path: TYPE_PATH = CATEGORIES_FILE_PATH,
val_video_info_file_path: TYPE_PATH = VAL_VIDEO_INFO_FILE_PATH,
val_videos_folder: TYPE_PATH = VAL_VIDEOS_FOLDER, **kwargs) -> None:
super().__init__(**kwargs)
self.val_video_info_file_path = val_video_info_file_path
self.val_videos_folder = val_videos_folder
with open(cached_path(categories_file_path)) as file:
self.categories = {}
for line in file:
category, id_ = line.rstrip().split(",")
self.categories[category] = int(id_)
@overrides
def val_dataloader(self) -> DataLoader:
dataset = MomentsInTime(categories=self.categories, video_info_file_path=self.val_video_info_file_path,
videos_folder=self.val_videos_folder,
**self._create_dataset_encoder_kwargs(train=False))
return self._create_dataloader(dataset, train=False)
| 2,916 | 43.19697 | 116 | py |
fitclip | fitclip-main/aligner/data/frame_sampler.py | import itertools
from abc import ABC, abstractmethod
from typing import Optional, Sequence
import torch
from overrides import overrides
from util.iter_utils import pairwise
from util.video_utils import resample
class FrameSampler(ABC):
"""Returns the frame indices to seek for the given clip start and end frame indices."""
@abstractmethod
def __call__(self, start_frame: int, end_frame: int, fps: float) -> Sequence[int]:
raise NotImplementedError
class RandomFromUniformIntervalsFrameSampler(FrameSampler):
def __init__(self, max_frames: int) -> None:
super().__init__()
self.max_frames = max_frames
@overrides
def __call__(self, start_frame: int, end_frame: int, fps: float) -> Sequence[int]:
num_frames = min(self.max_frames, end_frame - start_frame + 1)
ticks = torch.linspace(start=start_frame, end=end_frame, steps=num_frames + 1, dtype=torch.int)
return [torch.randint(a, b + 1, size=()) for a, b in pairwise(ticks)]
class UniformFrameSampler(FrameSampler):
def __init__(self, max_frames: int) -> None:
super().__init__()
self.max_frames = max_frames
@overrides
def __call__(self, start_frame: int, end_frame: int, fps: float) -> Sequence[int]:
num_frames = min(self.max_frames, end_frame - start_frame + 1)
ticks = torch.linspace(start=start_frame, end=end_frame, steps=num_frames + 1, dtype=torch.int)
return [torch.round((a + b) / 2).to(torch.int) for a, b in pairwise(ticks)]
class FixedFrameFromUniformIntervalsFrameSampler(FrameSampler):
def __init__(self, max_frames: int, frame_index_from_interval_start: int) -> None:
super().__init__()
self.max_frames = max_frames
self.frame_index_from_interval_start = frame_index_from_interval_start
@overrides
def __call__(self, start_frame: int, end_frame: int, fps: float) -> Sequence[int]:
num_frames = min(self.max_frames, end_frame - start_frame + 1)
ticks = torch.linspace(start=start_frame, end=end_frame + 1, steps=num_frames + 1, dtype=torch.int)
return ticks[:-1] + self.frame_index_from_interval_start
class ConsecutiveFrameSampler(FrameSampler):
def __init__(self, max_frames: int, fps: Optional[int] = None) -> None:
super().__init__()
self.max_frames = max_frames
self.fps = fps
@overrides
def __call__(self, start_frame: int, end_frame: int, fps: float) -> Sequence[int]:
if self.fps:
indices = resample(num_frames=self.max_frames, original_fps=fps, new_fps=self.fps)
else:
indices = range(self.max_frames)
smallest_possible_end = min(end_frame, start_frame + indices[-1])
if isinstance(smallest_possible_end, torch.Tensor):
smallest_possible_end = smallest_possible_end.item() # To avoid a warning in the floor division.
start = start_frame + (end_frame - smallest_possible_end) // 2
return list(itertools.takewhile(lambda i: i <= end_frame, (start + i for i in indices)))
| 3,069 | 38.87013 | 109 | py |
fitclip | fitclip-main/aligner/data/video_reader.py | import logging
from abc import ABC, abstractmethod
from typing import Sequence, Union
import PIL
import decord
import numpy as np
import torch
import torchvision.datasets
import torchvision.transforms.functional
from overrides import overrides
from util.typing_utils import TYPE_PATH
LOGGER = logging.getLogger(__name__)
class VideoReader(ABC):
def __init__(self, path: TYPE_PATH) -> None: # noqa
pass
def __call__(self, indices: Sequence[int]) -> torch.Tensor:
raise NotImplementedError
@abstractmethod
def __len__(self) -> int:
raise NotImplementedError
@abstractmethod
def time_to_indices(self, time: Union[float, Sequence[float]]) -> np.ndarray:
raise NotImplementedError
@abstractmethod
def get_avg_fps(self) -> float:
raise NotImplementedError
@staticmethod
def from_path(path: TYPE_PATH) -> "VideoReader":
return (AccImageVideoReader if torchvision.datasets.folder.is_image_file(path) else DecordVideoReader)(path)
decord.bridge.set_bridge("torch")
class DecordVideoReader(VideoReader):
@overrides
def __init__(self, path: TYPE_PATH) -> None:
super().__init__(path)
# Using `width` and `height` from VideoReader is actually faster because it resizes while decoding, however
# it doesn't preserve the aspect ratio (even if setting only one of the two).
# Using the GPU for decoding may actually be faster, but it isn't trivial how to optimize the whole data loading
# process so to accomplish it.
try:
self.video_reader = decord.VideoReader(path, num_threads=1)
except decord.DECORDError:
LOGGER.error(f"An error occurred when trying to load the video with path {path}.")
self.video_reader = None
@overrides
def __call__(self, indices: Sequence[int]) -> torch.Tensor:
if self.video_reader:
try:
return self.video_reader.get_batch(indices) # noqa
except decord.DECORDError:
# FIXME: change the handle for the path? Or how to get the path
LOGGER.error(f"An error occurred when trying to read the video with path {self.video_reader._handle}"
f" and indices {indices}.")
return torch.zeros(len(indices), 256, 256, 3)
@overrides
def __len__(self) -> int:
return len(self.video_reader) if self.video_reader else 1
@overrides
def time_to_indices(self, time: Union[float, Sequence[float]]) -> np.ndarray:
times = self.video_reader.get_frame_timestamp(range(len(self))).mean(-1) if self.video_reader else np.zeros(1)
indices = np.searchsorted(times, time)
# Use `np.bitwise_or` so it works both with scalars and numpy arrays.
return np.where(np.bitwise_or(indices == 0, times[indices] - time <= time - times[indices - 1]), indices,
indices - 1)
@overrides
def get_avg_fps(self) -> float:
return self.video_reader.get_avg_fps() if self.video_reader else 1
torchvision.set_image_backend("accimage")
class AccImageVideoReader(VideoReader):
@overrides
def __init__(self, path: TYPE_PATH) -> None:
super().__init__(path)
self.path = path
@overrides
def __call__(self, indices: Sequence[int]) -> torch.Tensor:
try:
image = torchvision.datasets.folder.accimage_loader(self.path)
image_tensor = torchvision.transforms.functional.to_tensor(image)
return image_tensor.permute(1, 2, 0).unsqueeze(0)
except PIL.UnidentifiedImageError: # Note `accimage_loader` falls back to PIL.
LOGGER.error(f"An error occurred when trying to read the image with path {self.path}.")
return torch.zeros(len(indices), 256, 256, 3)
@overrides
def __len__(self) -> int:
return 1
@overrides
def time_to_indices(self, time: Union[float, Sequence[float]]) -> np.ndarray:
return np.zeros_like(time, dtype=int)
@overrides
def get_avg_fps(self) -> float:
return 1
| 4,118 | 33.90678 | 120 | py |
fitclip | fitclip-main/aligner/data/data_module_group.py | import bisect
from abc import ABC
from typing import Any, Callable, Iterable, Mapping, Optional, Sequence, Union
import pytorch_lightning as pl
from overrides import overrides
from pytorch_lightning import Trainer
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch.utils.data import BatchSampler, ConcatDataset, DataLoader, Dataset, RandomSampler, SequentialSampler
from torchvision.datasets.samplers import DistributedSampler as DistributedSampler2
from aligner.data.multi_source_sampler import RoundRobinMultiSourceSampler
TYPE_DM_ITERABLE_OR_MAP = Union[Iterable[pl.LightningDataModule], Mapping[str, pl.LightningDataModule]]
def _data_modules_iterable(data_modules: TYPE_DM_ITERABLE_OR_MAP) -> Iterable[pl.LightningDataModule]:
return data_modules.values() if isinstance(data_modules, Mapping) else data_modules
def _data_loader_sequence(data_modules: TYPE_DM_ITERABLE_OR_MAP,
fn: Callable[[pl.LightningDataModule], EVAL_DATALOADERS]) -> Sequence[DataLoader]:
dls = (fn(dm) for dm in _data_modules_iterable(data_modules))
return [dl for dls_dm in dls for dl in ([dls_dm] if isinstance(dls_dm, DataLoader) else dls_dm)]
class _DataModuleGroup(pl.LightningDataModule, ABC):
def __init__(self, data_modules: TYPE_DM_ITERABLE_OR_MAP) -> None:
# Before calling super because it sets `trainer`, which recursively uses these.
self.data_modules = data_modules
self._trainer = None
super().__init__()
# Use it as a property, so we can set it to the data modules when set to self.
@property
def trainer(self) -> Trainer:
return self._trainer
@trainer.setter
def trainer(self, value: Trainer) -> None:
self._trainer = value
# `self.trainer` is set during `super().__init__`, which in turn it's called from `super().__new__`,
# which we can't control and happens before `self.data_modules` even exists.
# So we need to handle the case where the attribute doesn't exist.
for dm in _data_modules_iterable(getattr(self, "data_modules", [])):
dm.trainer = value
@overrides
def prepare_data(self) -> None:
for dm in _data_modules_iterable(self.data_modules):
dm.prepare_data()
@overrides
def setup(self, stage: Optional[str] = None) -> None:
for dm in _data_modules_iterable(self.data_modules):
dm.setup(stage)
class EvalDataModuleGroup(_DataModuleGroup): # noqa
@overrides
def val_dataloader(self) -> EVAL_DATALOADERS:
return _data_loader_sequence(self.data_modules, lambda dm: dm.val_dataloader())
@overrides
def test_dataloader(self) -> EVAL_DATALOADERS:
return _data_loader_sequence(self.data_modules, lambda dm: dm.test_dataloader())
@overrides
def predict_dataloader(self) -> EVAL_DATALOADERS:
return _data_loader_sequence(self.data_modules, lambda dm: dm.predict_dataloader())
class DataModuleStructuredGroup(EvalDataModuleGroup):
@overrides
def train_dataloader(self) -> TRAIN_DATALOADERS:
return apply_to_collection(self.data_modules, pl.LightningDataModule, lambda dm: dm.train_dataloader())
class ConcatDatasetWithDatasetKey(ConcatDataset):
"""A `ConcatDataset` that returns the corresponding dataset key for each item.
It supposes the underlying datasets all return mapping items.
"""
def __init__(self, datasets: Union[Iterable[Dataset], Mapping[str, Dataset]]) -> None:
super().__init__(datasets.values() if isinstance(datasets, Mapping) else datasets)
self.keys = list(datasets.keys()) if isinstance(datasets, Mapping) else range(len(self.datasets))
@overrides(check_signature=False)
def __getitem__(self, i: int) -> Mapping[Any, Any]:
item = super().__getitem__(i)
dataset_idx = bisect.bisect_right(self.cumulative_sizes, i)
return {**item, "dataset": self.keys[dataset_idx]}
def _add_distributed_sampler(data_loaders: EVAL_DATALOADERS, mode: RunningStage) -> EVAL_DATALOADERS:
assert all(apply_to_collection(data_loaders, DataLoader, lambda dl: isinstance(dl.sampler, SequentialSampler)))
return apply_to_collection(
data_loaders, DataLoader,
lambda dl: Trainer._update_dataloader(dl, DistributedSampler2(dl.dataset), mode=mode))
class MixedBatchDataModule(EvalDataModuleGroup):
"""A data module that combines many data modules during training, with the same dataset composition for each batch,
but separately for evaluation."""
def __init__(self, *args, train_sequence_sizes: Union[int, Iterable[int], Mapping[str, int]] = 1, **kwargs) -> None:
super().__init__(*args, **kwargs)
if isinstance(train_sequence_sizes, Mapping):
assert isinstance(self.data_modules, Mapping)
self.train_sequence_sizes = [train_sequence_sizes[k] for k in self.data_modules]
else:
self.train_sequence_sizes = train_sequence_sizes
if isinstance(self.train_sequence_sizes, int):
self.train_batch_size = len(self.data_modules) * self.train_sequence_sizes
else:
self.train_batch_size = sum(self.train_sequence_sizes)
@overrides
def train_dataloader(self) -> TRAIN_DATALOADERS:
data_loaders = apply_to_collection(self.data_modules, pl.LightningDataModule, lambda dm: dm.train_dataloader())
datasets = apply_to_collection(data_loaders, DataLoader, lambda dl: dl.dataset)
dataset = ConcatDatasetWithDatasetKey(datasets)
sub_samplers = [RandomSampler(dataset) for dataset in dataset.datasets] # noqa
sampler = RoundRobinMultiSourceSampler(sub_samplers, sequence_sizes=self.train_sequence_sizes,
mode="max_size_cycle")
data_loader_iterable = data_loaders.values() if isinstance(data_loaders, Mapping) else data_loaders
# We suppose each data module has the same args for the train data loader creation for the values obtained
# here from the first data loader.
first_data_loader = next(iter(data_loader_iterable))
# We have to create the batch sampler manually for the distributed setting.
# This is because we need to control how each batch is formed. If we don't do this, the distributed sampler
# comes before the batch sampling, and the mix composition of the batches won't be the intended one.
#
# For simplicity, we apply it regardless of distributed/non-distributed setup.
batch_sampler = BatchSampler(sampler, batch_size=self.train_batch_size, drop_last=True)
if self.trainer._accelerator_connector.is_distributed:
# We need to manually set the distributed sampler instead of doing it automatically with Pytorch Lightning
# because we're using a custom sampler.
#
# This version of DistributedSampler accounts for having a sampler as input.
#
# BTW, there's a similar one (`DistributedSamplerWrapper`) in
# https://github.com/catalyst-team/catalyst/blob/master/catalyst/data/sampler.py
batch_sampler = DistributedSampler2(batch_sampler)
# We need to set the sampler as a `batch_sampler` so it activates the auto-collation in the data loader.
data_loader = DataLoader(dataset, batch_sampler=batch_sampler, num_workers=first_data_loader.num_workers,
collate_fn=first_data_loader.collate_fn, pin_memory=first_data_loader.pin_memory,
timeout=first_data_loader.timeout, worker_init_fn=first_data_loader.worker_init_fn,
multiprocessing_context=first_data_loader.multiprocessing_context,
prefetch_factor=first_data_loader.prefetch_factor,
persistent_workers=first_data_loader.persistent_workers)
if self.trainer._accelerator_connector.is_distributed:
# PL only sets the epoch to the sampler, not to the batch sampler. This is because the distributed
# sampler is typically the former not the latter.
# Note that setting the epoch is necessary for shuffling, so every epoch has different batches.
data_loader.sampler.set_epoch = lambda epoch: batch_sampler.set_epoch(epoch)
return data_loader
def _add_distributed_sampler_maybe(self, data_loaders: EVAL_DATALOADERS, mode: RunningStage) -> EVAL_DATALOADERS:
if self.trainer._accelerator_connector.is_distributed:
return _add_distributed_sampler(data_loaders, mode)
else:
return data_loaders
@overrides
def val_dataloader(self) -> EVAL_DATALOADERS:
return self._add_distributed_sampler_maybe(super().val_dataloader(), RunningStage.VALIDATING)
@overrides
def test_dataloader(self) -> EVAL_DATALOADERS:
return self._add_distributed_sampler_maybe(super().test_dataloader(), RunningStage.TESTING)
@overrides
def predict_dataloader(self) -> EVAL_DATALOADERS:
return self._add_distributed_sampler_maybe(super().predict_dataloader(), RunningStage.PREDICTING)
class TrainAndEvalDataModules(_DataModuleGroup):
def __init__(self, train_data_module: pl.LightningDataModule, eval_data_module: pl.LightningDataModule) -> None:
super().__init__([train_data_module, eval_data_module])
@overrides
def train_dataloader(self) -> TRAIN_DATALOADERS:
return self.data_modules[0].train_dataloader() # noqa
@overrides
def val_dataloader(self) -> EVAL_DATALOADERS:
return self.data_modules[1].val_dataloader() # noqa
@overrides
def test_dataloader(self) -> EVAL_DATALOADERS:
return self.data_modules[1].test_dataloader() # noqa
@overrides
def predict_dataloader(self) -> EVAL_DATALOADERS:
return self.data_modules[1].predict_dataloader() # noqa
| 10,134 | 47.492823 | 120 | py |
fitclip | fitclip-main/aligner/data/multi_source_sampler.py | import itertools
import math
import sys
from typing import Generic, Iterable, Iterator, Literal, TypeVar, Union
from torch.utils.data import Sampler
T_co = TypeVar("T_co", covariant=True)
# We don't use `CycleIterator` from PyTorch Lightning because when used along with `itertools.islice`,
# it always creates a new iterator and wrongly starts from scratch because it's both an iterable and iterator (seems
# like the function calls `iter` internally).
class CycleSampler(Generic[T_co]):
def __init__(self, data_source: Iterable[T_co], length: int = sys.maxsize) -> None:
self.length = length
self.data_source = data_source
def __iter__(self) -> Iterator[T_co]:
if not self.length:
return
counter = 0
while True:
it = iter(self.data_source)
for elem in it:
yield elem
counter += 1
if counter >= self.length:
return
def __len__(self) -> int:
return self.length
class RoundRobinMultiSourceSampler(Sampler[int]):
"""
It supposes the dataset passed along to the `DataLoader` instance is a `ConcatDataset` instance.
Recommended to use with `drop_last=True`.
Some inspiration comes from the module `pytorch_lightning.trainer.supporters`.
"""
def __init__(self, sub_samplers: Iterable[Iterable[int]], sequence_sizes: Union[int, Iterable[int]] = 1,
mode: Literal["min_size", "max_size_cycle"] = "min_size") -> None:
sub_samplers = list(sub_samplers)
sequence_sizes = list(sequence_sizes) if isinstance(sequence_sizes, Iterable) \
else [sequence_sizes] * len(sub_samplers)
assert len(sub_samplers) == len(sequence_sizes)
assert all(len(sampler) for sampler in sub_samplers), ("All sub-samplers need to support `len` and be " # noqa
"non-zero.")
assert all(s > 0 for s in sequence_sizes)
super().__init__(sub_samplers)
self.sub_samplers = sub_samplers
self.sequence_sizes = sequence_sizes
self.mode = mode
for sampler in self.sub_samplers:
sampler._original_len = len(sampler) # noqa
if mode == "max_size_cycle":
max_cycle, max_i = max((math.floor(cycle), - i) for i, cycle in enumerate(self._cycles()))
max_i *= -1 # Trick to get the first sampler index among those of max cycle size.
# Use a large number instead of the default inf because `len` can fail otherwise.
# See https://stackoverflow.com/a/2481631/1165181
self.sub_samplers = [sampler if i == max_i else CycleSampler(sampler, length=sys.maxsize)
for i, sampler in enumerate(self.sub_samplers)]
for i, sampler in enumerate(self.sub_samplers):
if i != max_i:
sampler._original_len = len(sampler.data_source) # noqa
def _cycles(self) -> Iterator[float]:
for sampler, seq_size in zip(self.sub_samplers, self.sequence_sizes):
yield len(sampler) / seq_size
def __iter__(self) -> Iterator[int]:
iterators = [iter(sampler) for sampler in self.sub_samplers]
while True:
cum_size_in_concat_dataset = 0
for it, size, sampler in zip(iterators, self.sequence_sizes, self.sub_samplers):
i = -1
for i, n in enumerate(itertools.islice(it, size)):
yield cum_size_in_concat_dataset + n
if i < size - 1:
return
cum_size_in_concat_dataset += sampler._original_len # noqa
def __len__(self) -> int:
# Note in "max_size_cycle" mode the longest sampler will actually be the smallest one because the rest are
# repeated infinitely.
min_cycle, min_i = min((math.floor(cycle), i) for i, cycle in enumerate(self._cycles()))
return (sum(seq_size * (min_cycle + int(i < min_i)) for i, seq_size in enumerate(self.sequence_sizes))
+ len(self.sub_samplers[min_i]) % self.sequence_sizes[min_i])
| 4,191 | 38.92381 | 119 | py |
fitclip | fitclip-main/aligner/data/video_data_module.py | import multiprocessing
from abc import ABC, abstractmethod
from typing import Any, Iterable, Mapping, MutableMapping, Optional, Union
import pytorch_lightning as pl
import torch.cuda
from overrides import overrides
from pytorch_lightning.utilities.apply_func import apply_to_collection
from torch.utils.data import DataLoader
from aligner.data.frame_sampler import FrameSampler
from aligner.data.video_dataset import VideoDataset
from aligner.encoder.video_encoder import TYPE_TRANSFORM, VideoEncoder
from aligner.encoder.video_text_encoder import VideoTextEncoder
ENCODER_OR_ENCODER_MAP = Union[VideoEncoder, Mapping[str, VideoEncoder]]
def precision_to_dtype(precision: Union[str, int]) -> torch.dtype:
if precision == 32:
return torch.float
elif precision == 64:
return torch.float64
elif precision in {16, "mixed"}:
return torch.float16
else:
raise ValueError(f"Unsupported precision value: {precision}")
class VideoDataModule(pl.LightningDataModule, ABC):
def __init__(self, encoder: ENCODER_OR_ENCODER_MAP, batch_size: Optional[int] = 1,
eval_batch_size: Optional[int] = 32,
num_workers: int = multiprocessing.cpu_count() // max(torch.cuda.device_count(), 1)) -> None:
super().__init__()
self.encoder = encoder
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
self.num_workers = num_workers
def _create_transform(self, train: bool) -> Union[TYPE_TRANSFORM, Mapping[str, TYPE_TRANSFORM]]:
float_precision = self.trainer.precision_plugin.precision
dtype = precision_to_dtype(float_precision)
return apply_to_collection(self.encoder, VideoEncoder,
lambda e: (e.get_train_transform if train else e.get_eval_transform)(dtype))
def _create_frame_sampler(self, train: bool) -> Union[FrameSampler, Mapping[str, FrameSampler]]:
return apply_to_collection(self.encoder, VideoEncoder,
lambda e: e.get_train_frame_sampler() if train else e.get_eval_frame_sampler())
def _create_dataset_encoder_kwargs(self, train: bool) -> MutableMapping[str, Any]:
# FIXME: Disable the cache because it seems like a new dataset is created by PL every time.
return {"frame_sampler": self._create_frame_sampler(train=train),
"transform": self._create_transform(train=train),
"pad_batch": apply_to_collection(self.encoder, VideoEncoder, lambda e: e.should_pad_batch),
"cache": False}
def _create_dataloader(self, dataset: VideoDataset, train: bool) -> DataLoader:
# Drop last in train so the NCE loss isn't smaller in the charts for the last batch.
# Also, don't waste one step with fewer memory, where we could have the next one with more memory.
batch_size = self.batch_size if train else self.eval_batch_size
return DataLoader(dataset, batch_size=batch_size, num_workers=self.num_workers, pin_memory=True,
persistent_workers=self.num_workers > 0, collate_fn=getattr(dataset, "collate", None),
shuffle=train, drop_last=train)
@overrides
def predict_dataloader(self) -> DataLoader:
return self.val_dataloader()
class VideoTextDataModule(VideoDataModule, ABC):
def __init__(self, encoder: Union[VideoTextEncoder, Mapping[str, VideoTextEncoder]], **kwargs) -> None:
super().__init__(encoder=encoder, **kwargs)
@overrides
def _create_dataset_encoder_kwargs(self, train: bool) -> MutableMapping[str, Any]:
kwargs = super()._create_dataset_encoder_kwargs(train=train)
kwargs["tokenizer"] = apply_to_collection(self.encoder, VideoEncoder, lambda e: e.get_tokenizer())
return kwargs
class VideoClassificationDataModule(VideoDataModule, ABC):
@property
@abstractmethod
def categories(self) -> Mapping[str, int]:
raise NotImplementedError
@property
def templates(self) -> Optional[Iterable[str]]:
return None
| 4,101 | 44.577778 | 114 | py |
fitclip | fitclip-main/aligner/data/video_text_dataset.py | from abc import ABC
from typing import Mapping, Union
from torch.utils.data.dataloader import default_collate
from aligner.data.tokenizer_collate import MappingTokenizerCollate
from aligner.data.video_dataset import VideoDataset
from aligner.encoder.video_text_encoder import TYPE_TOKENIZER
class VideoTextDataset(VideoDataset, ABC):
def __init__(self, tokenizer: Union[TYPE_TOKENIZER, Mapping[str, TYPE_TOKENIZER]], target_key_name: str = "text",
**kwargs) -> None:
super().__init__(target_key_name=target_key_name, **kwargs)
self.collate = MappingTokenizerCollate(tokenizer, target_key_name,
default_collate_fn=getattr(self, "collate", default_collate))
| 744 | 42.823529 | 117 | py |
fitclip | fitclip-main/aligner/data/hmdb.py | import functools
import glob
import os
from typing import Iterable, Literal, Mapping, Optional, Tuple
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.ucf import UCF_101_TEMPLATES
from aligner.data.video_data_module import VideoClassificationDataModule
from aligner.data.video_dataset import VideoDataset
from util.typing_utils import TYPE_PATH
TRAIN_TAG = 1
TEST_TAG = 2
class Hmdb(VideoDataset):
def __init__(self, categories: Mapping[str, int], splits_folder: TYPE_PATH, split: Literal[1, 2, 3],
tag: Literal[1, 2], videos_folder: TYPE_PATH, **kwargs) -> None:
self.categories = categories
videos_folder = cached_path(videos_folder)
video_paths = []
for path in glob.iglob(os.path.join(cached_path(splits_folder), f"*_test_split{split}.txt")):
category = os.path.basename(path).rsplit("_", maxsplit=2)[0]
with open(path) as file:
for line in file:
filename, file_tag = line.strip().split(maxsplit=1)
file_tag = int(file_tag)
if file_tag == tag:
video_paths.append(os.path.join(videos_folder, category, filename))
super().__init__(video_paths=video_paths, **kwargs)
@functools.lru_cache
@overrides
def _get_video_id(self, video_idx: int) -> str:
path = self.video_paths[video_idx]
folder_path, filename = os.path.split(path)
folder_name = os.path.basename(folder_path)
return os.path.join(folder_name, filename)
@functools.lru_cache
@overrides
def _get_target(self, video_idx: int) -> Tuple[str, int]:
video_id = self._get_video_id(video_idx)
folder_name = os.path.dirname(video_id)
category = folder_name.replace("_", " ")
return category, self.categories[category]
class HmdbDataModule(VideoClassificationDataModule): # noqa
categories = {} # Necessary because it's an abstract property. See https://stackoverflow.com/a/42529760/1165181
def __init__(self, categories_file_path: TYPE_PATH, splits_folder: TYPE_PATH, split: Literal[1, 2, 3],
videos_folder: TYPE_PATH, **kwargs) -> None:
super().__init__(**kwargs)
self.splits_folder = splits_folder
self.split = split
self.videos_folder = videos_folder
with open(cached_path(categories_file_path)) as file:
self.categories = {line.strip(): i for i, line in enumerate(file)}
@property
@overrides
def templates(self) -> Optional[Iterable[str]]:
return UCF_101_TEMPLATES
@overrides
def train_dataloader(self) -> DataLoader:
dataset = Hmdb(categories=self.categories, splits_folder=self.splits_folder, split=self.split,
tag=TRAIN_TAG, videos_folder=self.videos_folder, # noqa
**self._create_dataset_encoder_kwargs(train=True))
return self._create_dataloader(dataset, train=True)
@overrides
def val_dataloader(self) -> DataLoader:
dataset = Hmdb(categories=self.categories, splits_folder=self.splits_folder, split=self.split,
tag=TEST_TAG, videos_folder=self.videos_folder, # noqa
**self._create_dataset_encoder_kwargs(train=False))
return self._create_dataloader(dataset, train=False)
| 3,441 | 39.023256 | 116 | py |
fitclip | fitclip-main/aligner/data/webvid.py | import os
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_dataset import VideoDataset
from aligner.data.video_text_dataset import VideoTextDataset
from util.typing_utils import TYPE_PATH
from util.video_utils import get_sorted_videos_in_folder
TRAIN_VIDEO_INFO_FILE_PATH = "/datasets/webvid/results_2M_train.csv"
# noinspection SpellCheckingInspection
TRAIN_VIDEOS_FOLDER = "/datasets/webvid/videos_low_resolution/train/webvid_lowres/"
VAL_VIDEO_INFO_FILE_PATH = "/datasets/webvid/results_2M_val.csv"
# noinspection SpellCheckingInspection
VAL_VIDEOS_FOLDER = "/datasets/webvid/videos_low_resolution/val/val_lowres/"
class WebVid(VideoTextDataset):
def __init__(self, video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH,
filter_videos_from_info_file: bool = False, **kwargs) -> None:
# noinspection SpellCheckingInspection
self.video_info = pd.read_csv(cached_path(video_info_file_path), index_col="videoid", dtype={"videoid": str})
if filter_videos_from_info_file:
video_paths = (cached_path(os.path.join(videos_folder, f"{video_id}.mp4"))
for video_id, _ in self.video_info.iterrows())
else:
video_paths = get_sorted_videos_in_folder(cached_path(videos_folder))
super().__init__(video_paths=video_paths, **kwargs)
@overrides
def _get_target(self, video_idx: int) -> str:
video_id = self._get_video_id(video_idx)
return self.video_info.loc[video_id, "name"]
class WebVidDataModule(VideoTextDataModule): # noqa
def __init__(self, train_video_info_file_path: TYPE_PATH = TRAIN_VIDEO_INFO_FILE_PATH,
train_videos_folder: TYPE_PATH = TRAIN_VIDEOS_FOLDER, train_filter_videos_from_info_file: bool = False,
val_video_info_file_path: TYPE_PATH = VAL_VIDEO_INFO_FILE_PATH,
val_videos_folder: TYPE_PATH = VAL_VIDEOS_FOLDER, val_filter_videos_from_info_file: bool = False,
**kwargs) -> None:
super().__init__(**kwargs)
self.train_video_info_file_path = train_video_info_file_path
self.train_videos_folder = train_videos_folder
self.train_filter_videos_from_info_file = train_filter_videos_from_info_file
self.val_video_info_file_path = val_video_info_file_path
self.val_videos_folder = val_videos_folder
self.val_filter_videos_from_info_file = val_filter_videos_from_info_file
def _dataset(self, video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH,
filter_videos_from_info_file: bool, train: bool) -> VideoDataset:
return WebVid(video_info_file_path=video_info_file_path, videos_folder=videos_folder,
filter_videos_from_info_file=filter_videos_from_info_file,
**self._create_dataset_encoder_kwargs(train=train))
@overrides
def train_dataloader(self) -> DataLoader:
dataset = self._dataset(video_info_file_path=self.train_video_info_file_path,
videos_folder=self.train_videos_folder,
filter_videos_from_info_file=self.train_filter_videos_from_info_file, train=True)
return self._create_dataloader(dataset, train=True)
@overrides
def val_dataloader(self) -> DataLoader:
dataset = self._dataset(video_info_file_path=self.val_video_info_file_path,
videos_folder=self.val_videos_folder,
filter_videos_from_info_file=self.val_filter_videos_from_info_file, train=False)
return self._create_dataloader(dataset, train=False)
| 3,814 | 49.197368 | 120 | py |
fitclip | fitclip-main/aligner/data/video_dataset.py | import collections.abc
import functools
import logging
import os
from abc import ABC, abstractmethod
from typing import Any, Generic, Iterable, Mapping, Optional, Sequence, Tuple, TypeVar, Union
import torch
from overrides import overrides
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from aligner.data.frame_sampler import FrameSampler
from aligner.data.video_reader import VideoReader
from aligner.encoder.video_encoder import TYPE_TRANSFORM
from util.typing_utils import TYPE_PATH
T = TypeVar("T")
LOGGER = logging.getLogger(__name__)
def get_filename_without_extension(path: TYPE_PATH) -> str:
return os.path.basename(path).split(".", maxsplit=1)[0]
# TODO: support taking multiple clips per video, where they are chosen according to some strategy.
class VideoDataset(Dataset, Generic[T], ABC):
def __init__(self, video_paths: Iterable[TYPE_PATH], frame_sampler: Union[FrameSampler, Mapping[str, FrameSampler]],
transform: Union[TYPE_TRANSFORM, Mapping[str, TYPE_TRANSFORM]] = lambda x: x,
video_key_name: str = "video", target_key_name: str = "target", pad_batch: bool = True,
cache: bool = False) -> None:
super().__init__()
self.video_paths = video_paths if hasattr(video_paths, "__getitem__") else list(video_paths)
self.target_key_name = target_key_name
self.pad_batch = pad_batch
self.cache = cache
if isinstance(frame_sampler, Mapping):
self.frame_sampler_map = {f"{video_key_name}_{k}": v for k, v in frame_sampler.items()}
else:
self.frame_sampler_map = {video_key_name: frame_sampler}
if isinstance(transform, Mapping):
self.transform_map = {f"{video_key_name}_{k}": v for k, v in transform.items()}
else:
self.transform_map = {video_key_name: transform}
if set(self.frame_sampler_map) != set(self.transform_map):
if video_key_name in self.frame_sampler_map:
self.frame_sampler_map = {k: self.frame_sampler_map[video_key_name] for k in self.transform_map}
elif video_key_name in self.transform_map:
self.transform_map = {k: self.transform_map[video_key_name] for k in self.frame_sampler_map}
else:
raise ValueError("The provided keys for the frame sampler and the transform don't match.")
@abstractmethod
def _get_target(self, video_idx: int) -> T:
"""Returns the target associated with `self.video_paths[video_idx]`."""
raise NotImplementedError
@functools.lru_cache
def _get_video_id(self, video_idx: int) -> str:
return get_filename_without_extension(self.video_paths[video_idx])
def _get_times(self, video_idx: int) -> Tuple[Optional[float], Optional[float]]:
"""Returns the video clip start and end times for the given video index, if any."""
return None, None
@functools.lru_cache(maxsize=None)
def _cached_get_item(self, video_idx: int) -> Mapping[str, Union[torch.Tensor, str, T]]:
path = self.video_paths[video_idx]
video_id = self._get_video_id(video_idx)
video_reader = VideoReader.from_path(path)
start_time, end_time = self._get_times(video_idx)
start_frame_idx = 0 if start_time is None else video_reader.time_to_indices(start_time).item()
end_frame_idx = len(video_reader) - 1 if end_time is None else video_reader.time_to_indices(end_time).item()
idxs_map = {k: frame_sampler(start_frame_idx, end_frame_idx, fps=video_reader.get_avg_fps())
for k, frame_sampler in self.frame_sampler_map.items()}
frames_map = {k: video_reader(idxs) for k, idxs in idxs_map.items()}
return {
self.target_key_name: self._get_target(video_idx),
"video_id": video_id,
**{k: transform(frames_map[k]) for k, transform in self.transform_map.items()},
}
@overrides
def __getitem__(self, video_idx: int) -> Mapping[str, Union[torch.Tensor, str, T]]:
# Note we have to explicitly pass `self` to the wrapped one.
fn = self._cached_get_item if self.cache else functools.partial(self._cached_get_item.__wrapped__, self) # noqa
return fn(video_idx)
def __len__(self) -> int:
return len(self.video_paths)
def _collate(self, batch: Sequence[Any]) -> Any:
if self.pad_batch:
elem = batch[0]
if isinstance(elem, torch.Tensor):
return pad_sequence(batch, batch_first=True) # noqa
elif isinstance(elem, collections.abc.Mapping):
return {k: self._collate([d[k] for d in batch]) if k in self.transform_map
else default_collate([d[k] for d in batch])
for k in elem}
return default_collate(batch)
def collate(self, batch: Sequence[Any]) -> Any:
# Use an auxiliary function instead of doing it directly here because it's recursive, and it may also be
# overridden. so in the recursion the overridden version may be called instead of this one.
return self._collate(batch)
| 5,261 | 43.59322 | 120 | py |
fitclip | fitclip-main/aligner/data/tokenizer_collate.py | import collections.abc
from abc import ABC, abstractmethod
from typing import Any, Callable, Iterable, Mapping, Sequence, Tuple, Union
from overrides import overrides
from pytorch_lightning.utilities.apply_func import apply_to_collection
from torch.utils.data.dataloader import default_collate
from aligner.encoder.video_text_encoder import TYPE_TOKENIZER
# Derived from `default_collate`.
def batch_tokenize_collate(batch: Sequence[Any], tokenizer: TYPE_TOKENIZER) -> Any:
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, (str, bytes)):
return tokenizer(batch)
elif isinstance(elem, collections.abc.Mapping):
return {k: batch_tokenize_collate([d[k] for d in batch], tokenizer) for k in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(batch_tokenize_collate(samples, tokenizer) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError("Each element in sequence of batch should be of equal size.")
transposed = zip(*batch)
return [batch_tokenize_collate(samples, tokenizer) for samples in transposed]
else:
raise TypeError(f"Batch must contain strings, mappings or sequences; found {elem_type}.")
class TokenizerCollate(ABC):
"""`DataLoader` collate function that batch-tokenizes part of the batch.
The pros of batch-tokenizing during collation are:
1) We can pad at the same time, based on the longest sequence. If we tokenized in the dataset, we wouldn't know
what size to take, and we may take a long one, wasting computing and especially memory. If we batch-tokenize when
iterating through the data_module loader, we are in the main thread and wasting valuable time that could be used for
the GPU.
2) The `tokenizers` library is written in Rust and may have some optimizations for batch-tokenizing (apart from
multi-threading, which is disabled so each data_module loader worker uses one CPU core.)
"""
def __init__(self, tokenizer: Union[TYPE_TOKENIZER, Mapping[str, TYPE_TOKENIZER]], *,
batch_tokenize_collate_fn: Callable[[Sequence[Any], TYPE_TOKENIZER], Any] = batch_tokenize_collate,
default_collate_fn: Callable[[Sequence[Any]], Any] = default_collate) -> None:
super().__init__()
self.tokenizer = tokenizer
self.batch_tokenize_collate_fn = batch_tokenize_collate_fn
self.default_collate_fn = default_collate_fn
@abstractmethod
def _split_uncollated_batch(self, batch: Sequence[Any]) -> Tuple[Sequence[Any], Sequence[Any]]:
"""Splits the batch into a pair where the first element is going to be processed with the default collate
function and each of the elements in the second one are going to be batch-tokenized."""
raise NotImplementedError
@abstractmethod
def _join_collated_batch(self, collated_with_default: Any, collated_with_tokenizer: Any) -> Any:
raise NotImplementedError
def __call__(self, batch: Sequence[Any]) -> Any:
s1, s2 = self._split_uncollated_batch(batch)
batch_tokenized = apply_to_collection(self.tokenizer, Callable, lambda t: self.batch_tokenize_collate_fn(s2, t))
return self._join_collated_batch(self.default_collate_fn(s1), batch_tokenized)
class MappingTokenizerCollate(TokenizerCollate):
def __init__(self, tokenizer: TYPE_TOKENIZER, keys_to_tokenize: Union[str, Iterable[str]], **kwargs) -> None:
super().__init__(tokenizer, **kwargs)
self.keys_to_tokenize = frozenset({keys_to_tokenize} if isinstance(keys_to_tokenize, str) else keys_to_tokenize)
@overrides(check_signature=False)
def _split_uncollated_batch(self,
batch: Sequence[Mapping[str, Any]]) -> Tuple[Sequence[Any], Sequence[Any]]:
return [{k: v for k, v in d.items() if k not in self.keys_to_tokenize} for d in batch], \
[{k: v for k, v in d.items() if k in self.keys_to_tokenize} for d in batch]
@overrides(check_signature=False)
def _join_collated_batch(self, collated_with_default: Any, collated_with_tokenizer: Any) -> Any:
# If the tokenizer is actually composed of many tokenizers, we flatten out the structure.
if isinstance(self.tokenizer, Mapping):
collated_with_tokenizer = {f"{k_child}_{k_parent}": v_child
for k_parent, v_parent in collated_with_tokenizer.items()
for k_child, v_child in v_parent.items()}
return {**collated_with_default, **collated_with_tokenizer}
| 4,865 | 53.066667 | 120 | py |
fitclip | fitclip-main/aligner/data/kinetics.py | import os
from typing import Iterable, Mapping, Optional, Tuple
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoClassificationDataModule
from aligner.data.video_dataset import VideoDataset
from util.typing_utils import TYPE_PATH
from util.video_utils import get_sorted_videos_in_folder
class Kinetics(VideoDataset):
def __init__(self, categories: Mapping[str, int], video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH,
filter_videos_from_info_file: bool = False, **kwargs) -> None:
self.categories = categories
self.video_info = pd.read_csv(cached_path(video_info_file_path))
self.video_info["video_id"] = \
self.video_info.agg(lambda row: f"{row.youtube_id}_{row.time_start:06}_{row.time_end:06}", axis=1)
self.video_info.set_index("video_id", inplace=True)
if filter_videos_from_info_file:
video_paths = (cached_path(os.path.join(videos_folder, f"{video_id}.mp4"))
for video_id, _ in self.video_info.iterrows())
else:
video_paths = get_sorted_videos_in_folder(cached_path(videos_folder))
super().__init__(video_paths=video_paths, **kwargs)
@overrides
def _get_target(self, video_idx: int) -> Tuple[str, int]:
video_id = self._get_video_id(video_idx)
category = self.video_info.loc[video_id, "label"]
return category, self.categories[category]
class KineticsDataModule(VideoClassificationDataModule): # noqa
categories = {} # Necessary because it's an abstract property. See https://stackoverflow.com/a/42529760/1165181
def __init__(self, categories_file_path: TYPE_PATH, train_video_info_file_path: TYPE_PATH,
train_videos_folder: TYPE_PATH, val_video_info_file_path: TYPE_PATH, val_videos_folder: TYPE_PATH,
test_video_info_file_path: TYPE_PATH, test_videos_folder: TYPE_PATH,
train_filter_videos_from_info_file: bool = False, val_filter_videos_from_info_file: bool = False,
test_filter_videos_from_info_file: bool = False, **kwargs) -> None:
super().__init__(**kwargs)
self.train_video_info_file_path = train_video_info_file_path
self.train_videos_folder = train_videos_folder
self.train_filter_videos_from_info_file = train_filter_videos_from_info_file
self.val_video_info_file_path = val_video_info_file_path
self.val_videos_folder = val_videos_folder
self.val_filter_videos_from_info_file = val_filter_videos_from_info_file
self.test_video_info_file_path = test_video_info_file_path
self.test_videos_folder = test_videos_folder
self.test_filter_videos_from_info_file = test_filter_videos_from_info_file
with open(cached_path(categories_file_path)) as file:
self.categories = {line.strip(): i for i, line in enumerate(file)}
@property
@overrides
def templates(self) -> Optional[Iterable[str]]:
return [ # From https://github.com/openai/CLIP/blob/main/data/prompts.md#kinetics700
"a photo of {}.",
"a photo of a person {}.",
"a photo of a person using {}.",
"a photo of a person doing {}.",
"a photo of a person during {}.",
"a photo of a person performing {}.",
"a photo of a person practicing {}.",
"a video of {}.",
"a video of a person {}.",
"a video of a person using {}.",
"a video of a person doing {}.",
"a video of a person during {}.",
"a video of a person performing {}.",
"a video of a person practicing {}.",
"a example of {}.",
"a example of a person {}.",
"a example of a person using {}.",
"a example of a person doing {}.",
"a example of a person during {}.",
"a example of a person performing {}.",
"a example of a person practicing {}.",
"a demonstration of {}.",
"a demonstration of a person {}.",
"a demonstration of a person using {}.",
"a demonstration of a person doing {}.",
"a demonstration of a person during {}.",
"a demonstration of a person performing {}.",
"a demonstration of a person practicing {}.",
]
def _dataset(self, video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH,
filter_videos_from_info_file: bool, train: bool) -> VideoDataset:
return Kinetics(self.categories, video_info_file_path=video_info_file_path, videos_folder=videos_folder,
filter_videos_from_info_file=filter_videos_from_info_file,
**self._create_dataset_encoder_kwargs(train=train))
@overrides
def train_dataloader(self) -> DataLoader:
dataset = self._dataset(video_info_file_path=self.train_video_info_file_path,
videos_folder=self.train_videos_folder,
filter_videos_from_info_file=self.train_filter_videos_from_info_file, train=True)
return self._create_dataloader(dataset, train=True)
@overrides
def val_dataloader(self) -> DataLoader:
dataset = self._dataset(video_info_file_path=self.val_video_info_file_path,
videos_folder=self.val_videos_folder,
filter_videos_from_info_file=self.val_filter_videos_from_info_file, train=False)
return self._create_dataloader(dataset, train=False)
@overrides
def test_dataloader(self) -> DataLoader:
dataset = self._dataset(video_info_file_path=self.test_video_info_file_path,
videos_folder=self.test_videos_folder,
filter_videos_from_info_file=self.test_filter_videos_from_info_file, train=False)
return self._create_dataloader(dataset, train=False)
| 6,115 | 49.131148 | 116 | py |
fitclip | fitclip-main/aligner/data/msrvtt.py | import json
import os
import random
from typing import Literal
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_dataset import VideoDataset
from aligner.data.video_text_dataset import VideoTextDataset
from util.typing_utils import TYPE_PATH
from util.video_utils import get_sorted_videos_in_folder
TYPE_CAPTION_SAMPLING_STRATEGY = Literal["first", "random"]
class MsrVtt(VideoTextDataset):
def __init__(self, videos_folder: TYPE_PATH, file_list_path: TYPE_PATH, annotations_path: TYPE_PATH,
caption_sampling_strategy: TYPE_CAPTION_SAMPLING_STRATEGY, **kwargs) -> None:
with open(cached_path(file_list_path)) as file:
video_ids = {stripped_line for line in file if (stripped_line := line.strip())} # noqa
video_paths = (path
for path in get_sorted_videos_in_folder(cached_path(videos_folder))
if os.path.basename(path).split(".", maxsplit=1)[0] in video_ids)
super().__init__(video_paths=video_paths, **kwargs)
self.caption_sampling_strategy = caption_sampling_strategy
with open(cached_path(annotations_path)) as file:
metadata = json.load(file)
self.video_info = pd.DataFrame(metadata["annotations"])
self.video_info.set_index("image_id", inplace=True)
@overrides
def _get_target(self, video_idx: int) -> str:
video_id = self._get_video_id(video_idx)
captions = self.video_info.loc[video_id, "caption"]
if self.caption_sampling_strategy == "first":
return captions[0]
elif self.caption_sampling_strategy == "random":
return random.choice(captions)
else:
raise ValueError(f"Invalid choice of caption sampling strategy: {self.caption_sampling_strategy}")
class MsrVttDataModule(VideoTextDataModule): # noqa
def __init__(self,
base_path: TYPE_PATH = "https://www.robots.ox.ac.uk/~maxbain/frozen-in-time/data/MSRVTT.zip!MSRVTT/",
train_file_list_rel_path: TYPE_PATH = "train_list_jsfusion.txt", # 1K-A split
val_file_list_rel_path: TYPE_PATH = "val_list_jsfusion.txt", **kwargs) -> None:
super().__init__(**kwargs)
base_path = cached_path(base_path)
self.videos_folder = os.path.join(base_path, "videos/all")
self.annotation_path = os.path.join(base_path, "annotation/MSR_VTT.json")
self.train_file_list_path = os.path.join(base_path, "structured-symlinks", train_file_list_rel_path)
self.val_file_list_path = os.path.join(base_path, "structured-symlinks", val_file_list_rel_path)
def _dataset(self, file_list_path: TYPE_PATH, caption_sampling_strategy: TYPE_CAPTION_SAMPLING_STRATEGY,
train: bool) -> VideoDataset:
return MsrVtt(videos_folder=self.videos_folder, file_list_path=file_list_path,
annotations_path=self.annotation_path, caption_sampling_strategy=caption_sampling_strategy,
**self._create_dataset_encoder_kwargs(train=train))
@overrides
def train_dataloader(self) -> DataLoader:
dataset = self._dataset(file_list_path=self.train_file_list_path, caption_sampling_strategy="random",
train=True)
return self._create_dataloader(dataset, train=True)
@overrides
def val_dataloader(self) -> DataLoader:
dataset = self._dataset(file_list_path=self.val_file_list_path, caption_sampling_strategy="first", train=False)
return self._create_dataloader(dataset, train=False)
| 3,743 | 45.8 | 119 | py |
fitclip | fitclip-main/aligner/data/didemo.py | import json
import os
from collections import defaultdict
from cached_path import CACHE_DIR, _find_latest_cached, cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_text_dataset import VideoTextDataset
from util.typing_utils import TYPE_PATH
HASH_LIST_PATH = "https://raw.githubusercontent.com/LisaAnne/LocalizingMoments/master/data/yfcc100m_hash.txt"
VAL_ANNOTATION_PATH = "https://raw.githubusercontent.com/LisaAnne/LocalizingMoments/master/data/val_data.json"
VIDEOS_FOLDER = "https://multimedia-commons.s3-us-west-2.amazonaws.com/data/videos/mp4/"
class Didemo(VideoTextDataset):
def __init__(self, videos_folder: TYPE_PATH, hash_list_path: TYPE_PATH, annotations_path: TYPE_PATH,
**kwargs) -> None:
with open(cached_path(annotations_path)) as file:
description_list_by_id = defaultdict(list)
for video in json.load(file):
description_list_by_id[video["video"]].append(video["description"])
self.description_paragraph_by_id = {video_id: " ".join(descriptions)
for video_id, descriptions in description_list_by_id.items()}
with open(cached_path(hash_list_path)) as file:
hash_by_flickr_id = {}
for line in file:
flickr_id, hash_ = line.strip().split("\t")
hash_by_flickr_id[flickr_id] = hash_
self.video_ids_by_path = {}
for video_id in self.description_paragraph_by_id:
flickr_id = video_id.split("_")[1]
hash_ = hash_by_flickr_id[flickr_id]
video_path_or_url = os.path.join(videos_folder, hash_[:3], hash_[3:6], f"{hash_}.mp4")
# We only download some videos and not the whole folder.
# But if it's already cached, we avoid sending a HEAD request. This is an issue if the file is updated,
# but we assume it won't happen.
video_path = _find_latest_cached(video_path_or_url, CACHE_DIR) or cached_path(video_path_or_url)
self.video_ids_by_path[video_path] = video_id
super().__init__(video_paths=self.video_ids_by_path.keys(), **kwargs)
@overrides
def _get_target(self, video_idx: int) -> str:
video_path = self.video_paths[video_idx]
video_id = self.video_ids_by_path[video_path]
return self.description_paragraph_by_id[video_id]
class DidemoDataModule(VideoTextDataModule): # noqa
def __init__(self, videos_folder: TYPE_PATH = VIDEOS_FOLDER, hash_list_path: TYPE_PATH = HASH_LIST_PATH,
val_annotation_path: TYPE_PATH = VAL_ANNOTATION_PATH, **kwargs) -> None:
super().__init__(**kwargs)
self.videos_folder = videos_folder
self.hash_list_path = hash_list_path
self.val_annotation_path = val_annotation_path
@overrides
def val_dataloader(self) -> DataLoader:
dataset = Didemo(videos_folder=self.videos_folder, hash_list_path=self.hash_list_path,
annotations_path=self.val_annotation_path, **self._create_dataset_encoder_kwargs(train=False))
return self._create_dataloader(dataset, train=False)
| 3,269 | 47.088235 | 119 | py |
fitclip | fitclip-main/aligner/data/conceptual_captions.py | import functools
import os
import pandas as pd
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from torchvision.datasets.folder import IMG_EXTENSIONS
from aligner.data.video_data_module import VideoTextDataModule
from aligner.data.video_dataset import VideoDataset
from aligner.data.video_text_dataset import VideoTextDataset
from util.typing_utils import TYPE_PATH
from util.video_utils import get_videos_in_folder
class ConceptualCaptions(VideoTextDataset):
def __init__(self, video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH, **kwargs) -> None:
self.video_info = pd.read_csv(cached_path(video_info_file_path), names=["name", "url", "video_id"],
index_col="video_id")
# The version of CC3M used here was downloaded by keeping the original filenames. The issue is that the
# filenames repeat, and only one of the files was kept, but we don't know which one it is from the
# information file with the captions. So as a workaround, we remove the duplicate video IDs:
self.video_info = self.video_info[~self.video_info.index.duplicated(keep=False)]
video_paths = sorted(path
for path in get_videos_in_folder(cached_path(videos_folder), extensions=IMG_EXTENSIONS)
if os.path.basename(path) in self.video_info.index)
super().__init__(video_paths=video_paths, **kwargs)
@functools.lru_cache
@overrides
def _get_video_id(self, video_idx: int) -> str:
return os.path.basename(self.video_paths[video_idx])
@overrides
def _get_target(self, video_idx: int) -> str:
video_id = self._get_video_id(video_idx)
return self.video_info.loc[video_id, "name"]
class ConceptualCaptionsDataModule(VideoTextDataModule): # noqa
def __init__(self, train_video_info_file_path: TYPE_PATH, train_videos_folder: TYPE_PATH,
val_video_info_file_path: TYPE_PATH, val_videos_folder: TYPE_PATH, **kwargs) -> None:
super().__init__(**kwargs)
self.train_video_info_file_path = train_video_info_file_path
self.train_videos_folder = train_videos_folder
self.val_video_info_file_path = val_video_info_file_path
self.val_videos_folder = val_videos_folder
def _dataset(self, video_info_file_path: TYPE_PATH, videos_folder: TYPE_PATH, train: bool) -> VideoDataset:
return ConceptualCaptions(video_info_file_path=video_info_file_path, videos_folder=videos_folder,
**self._create_dataset_encoder_kwargs(train=train))
@overrides
def train_dataloader(self) -> DataLoader:
dataset = self._dataset(video_info_file_path=self.train_video_info_file_path,
videos_folder=self.train_videos_folder, train=True)
return self._create_dataloader(dataset, train=True)
@overrides
def val_dataloader(self) -> DataLoader:
dataset = self._dataset(video_info_file_path=self.val_video_info_file_path,
videos_folder=self.val_videos_folder, train=False)
return self._create_dataloader(dataset, train=False)
| 3,241 | 48.121212 | 116 | py |
fitclip | fitclip-main/aligner/data/ucf.py | import functools
import os
import re
from typing import Iterable, Mapping, Optional, Tuple
from cached_path import cached_path
from overrides import overrides
from torch.utils.data import DataLoader
from aligner.data.video_data_module import VideoClassificationDataModule
from aligner.data.video_dataset import VideoDataset
from util.typing_utils import TYPE_PATH
CATEGORIES_FILE_PATH = ("https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip!"
"ucfTrainTestlist/classInd.txt")
VAL_FILE_LIST_PATH = ("https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip!"
"ucfTrainTestlist/testlist01.txt")
VAL_VIDEOS_FOLDER = "https://www.crcv.ucf.edu/data/UCF101/UCF101.rar!UCF-101"
RE_CAPITALIZED_WORDS = re.compile(r"[a-zA-Z][^A-Z]*")
UCF_101_TEMPLATES = [ # From https://github.com/openai/CLIP/blob/main/data/prompts.md#ucf101
"a photo of a person {}.",
"a video of a person {}.",
"a example of a person {}.",
"a demonstration of a person {}.",
"a photo of the person {}.",
"a video of the person {}.",
"a example of the person {}.",
"a demonstration of the person {}.",
"a photo of a person using {}.",
"a video of a person using {}.",
"a example of a person using {}.",
"a demonstration of a person using {}.",
"a photo of the person using {}.",
"a video of the person using {}.",
"a example of the person using {}.",
"a demonstration of the person using {}.",
"a photo of a person doing {}.",
"a video of a person doing {}.",
"a example of a person doing {}.",
"a demonstration of a person doing {}.",
"a photo of the person doing {}.",
"a video of the person doing {}.",
"a example of the person doing {}.",
"a demonstration of the person doing {}.",
"a photo of a person during {}.",
"a video of a person during {}.",
"a example of a person during {}.",
"a demonstration of a person during {}.",
"a photo of the person during {}.",
"a video of the person during {}.",
"a example of the person during {}.",
"a demonstration of the person during {}.",
"a photo of a person performing {}.",
"a video of a person performing {}.",
"a example of a person performing {}.",
"a demonstration of a person performing {}.",
"a photo of the person performing {}.",
"a video of the person performing {}.",
"a example of the person performing {}.",
"a demonstration of the person performing {}.",
"a photo of a person practicing {}.",
"a video of a person practicing {}.",
"a example of a person practicing {}.",
"a demonstration of a person practicing {}.",
"a photo of the person practicing {}.",
"a video of the person practicing {}.",
"a example of the person practicing {}.",
"a demonstration of the person practicing {}.",
]
def _folder_name_to_category(folder_name: str) -> str:
return " ".join(RE_CAPITALIZED_WORDS.findall(folder_name))
class Ucf(VideoDataset):
def __init__(self, categories: Mapping[str, int], file_list_path: TYPE_PATH, videos_folder: TYPE_PATH,
**kwargs) -> None:
self.categories = categories
videos_folder = cached_path(videos_folder)
with open(cached_path(file_list_path)) as file:
video_ids = (stripped_line for line in file if (stripped_line := line.strip()))
super().__init__(video_paths=(os.path.join(videos_folder, path) for path in video_ids), **kwargs)
@functools.lru_cache
@overrides
def _get_video_id(self, video_idx: int) -> str:
path = self.video_paths[video_idx]
folder_path, filename = os.path.split(path)
folder_name = os.path.basename(folder_path)
return os.path.join(folder_name, filename)
@functools.lru_cache
@overrides
def _get_target(self, video_idx: int) -> Tuple[str, int]:
video_id = self._get_video_id(video_idx)
folder_name = os.path.dirname(video_id)
category = _folder_name_to_category(folder_name)
return category, self.categories[category]
class UcfDataModule(VideoClassificationDataModule): # noqa
categories = {} # Necessary because it's an abstract property. See https://stackoverflow.com/a/42529760/1165181
def __init__(self, categories_file_path: TYPE_PATH = CATEGORIES_FILE_PATH,
val_file_list_path: TYPE_PATH = VAL_FILE_LIST_PATH,
val_videos_folder: TYPE_PATH = VAL_VIDEOS_FOLDER, **kwargs) -> None:
super().__init__(**kwargs)
self.val_file_list_path = val_file_list_path
self.val_videos_folder = val_videos_folder
with open(cached_path(categories_file_path)) as file:
self.categories = {}
for line in file:
id_, folder_name = line.strip().split()
self.categories[_folder_name_to_category(folder_name)] = int(id_) - 1
@property
@overrides
def templates(self) -> Optional[Iterable[str]]:
return UCF_101_TEMPLATES
@overrides
def val_dataloader(self) -> DataLoader:
dataset = Ucf(categories=self.categories, file_list_path=self.val_file_list_path,
videos_folder=self.val_videos_folder, **self._create_dataset_encoder_kwargs(train=False))
return self._create_dataloader(dataset, train=False)
| 5,400 | 40.229008 | 116 | py |
VQ-Diffusion | VQ-Diffusion-main/inference_VQ_Diffusion.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import torch
import cv2
import argparse
import numpy as np
import torchvision
from PIL import Image
from image_synthesis.utils.io import load_yaml_config
from image_synthesis.modeling.build import build_model
from image_synthesis.utils.misc import get_model_parameters_info
class VQ_Diffusion():
def __init__(self, config, path, imagenet_cf=False):
self.info = self.get_model(ema=True, model_path=path, config_path=config, imagenet_cf=imagenet_cf)
self.model = self.info['model']
self.epoch = self.info['epoch']
self.model_name = self.info['model_name']
self.model = self.model.cuda()
self.model.eval()
for param in self.model.parameters():
param.requires_grad=False
def get_model(self, ema, model_path, config_path, imagenet_cf):
if 'OUTPUT' in model_path: # pretrained model
model_name = model_path.split(os.path.sep)[-3]
else:
model_name = os.path.basename(config_path).replace('.yaml', '')
config = load_yaml_config(config_path)
if imagenet_cf:
config['model']['params']['diffusion_config']['params']['transformer_config']['params']['class_number'] = 1001
model = build_model(config)
model_parameters = get_model_parameters_info(model)
print(model_parameters)
if os.path.exists(model_path):
ckpt = torch.load(model_path, map_location="cpu")
else:
print("Model path: {} does not exist.".format(model_path))
exit(0)
if 'last_epoch' in ckpt:
epoch = ckpt['last_epoch']
elif 'epoch' in ckpt:
epoch = ckpt['epoch']
else:
epoch = 0
missing, unexpected = model.load_state_dict(ckpt["model"], strict=False)
print('Model missing keys:\n', missing)
print('Model unexpected keys:\n', unexpected)
if ema==True and 'ema' in ckpt:
print("Evaluate EMA model")
ema_model = model.get_ema_model()
missing, unexpected = ema_model.load_state_dict(ckpt['ema'], strict=False)
return {'model': model, 'epoch': epoch, 'model_name': model_name, 'parameter': model_parameters}
def inference_generate_sample_with_class(self, text, truncation_rate, save_root, batch_size, infer_speed=False, guidance_scale=1.0):
os.makedirs(save_root, exist_ok=True)
self.model.guidance_scale = guidance_scale
data_i = {}
data_i['label'] = [text]
data_i['image'] = None
condition = text
str_cond = str(condition)
save_root_ = os.path.join(save_root, str_cond)
os.makedirs(save_root_, exist_ok=True)
with torch.no_grad():
model_out = self.model.generate_content(
batch=data_i,
filter_ratio=0,
replicate=batch_size,
content_ratio=1,
return_att_weight=False,
sample_type="top"+str(truncation_rate)+'r',
) # B x C x H x W
# save results
content = model_out['content']
content = content.permute(0, 2, 3, 1).to('cpu').numpy().astype(np.uint8)
for b in range(content.shape[0]):
cnt = b
save_base_name = '{}'.format(str(cnt).zfill(6))
save_path = os.path.join(save_root_, save_base_name+'.jpg')
im = Image.fromarray(content[b])
im.save(save_path)
def inference_generate_sample_with_condition(self, text, truncation_rate, save_root, batch_size, infer_speed=False, guidance_scale=1.0, prior_rule=0, prior_weight=0, learnable_cf=True):
os.makedirs(save_root, exist_ok=True)
self.model.guidance_scale = guidance_scale
self.model.learnable_cf = self.model.transformer.learnable_cf = learnable_cf # whether to use learnable classifier-free
self.model.transformer.prior_rule = prior_rule # inference rule: 0 for VQ-Diffusion v1, 1 for only high-quality inference, 2 for purity prior
self.model.transformer.prior_weight = prior_weight # probability adjust parameter, 'r' in Equation.11 of Improved VQ-Diffusion
data_i = {}
data_i['text'] = [text]
data_i['image'] = None
condition = text
str_cond = str(condition)
save_root_ = os.path.join(save_root, str_cond)
os.makedirs(save_root_, exist_ok=True)
if infer_speed != False:
add_string = 'r,time'+str(infer_speed)
else:
add_string = 'r'
with torch.no_grad():
model_out = self.model.generate_content(
batch=data_i,
filter_ratio=0,
replicate=batch_size,
content_ratio=1,
return_att_weight=False,
sample_type="top"+str(truncation_rate)+add_string,
) # B x C x H x W
# save results
content = model_out['content']
content = content.permute(0, 2, 3, 1).to('cpu').numpy().astype(np.uint8)
for b in range(content.shape[0]):
cnt = b
save_base_name = '{}'.format(str(cnt).zfill(6))
save_path = os.path.join(save_root_, save_base_name+'.png')
im = Image.fromarray(content[b])
im.save(save_path)
if __name__ == '__main__':
VQ_Diffusion_model = VQ_Diffusion(config='configs/ithq.yaml', path='OUTPUT/pretrained_model/ithq_learnable.pth')
# Inference VQ-Diffusion
# VQ_Diffusion_model.inference_generate_sample_with_condition("teddy bear playing in the pool", truncation_rate=0.86, save_root="RESULT", batch_size=4)
# Inference Improved VQ-Diffusion with zero-shot classifier-free sampling
# VQ_Diffusion_model.inference_generate_sample_with_condition("teddy bear playing in the pool", truncation_rate=1.0, save_root="RESULT", batch_size=4, guidance_scale=5.0, learnable_cf=False)
# VQ_Diffusion_model.inference_generate_sample_with_condition("a long exposure photo of waterfall", truncation_rate=1.0, save_root="RESULT", batch_size=4, guidance_scale=5.0, learnable_cf=False)
# Inference Improved VQ-Diffusion with learnable classifier-free sampling
VQ_Diffusion_model.inference_generate_sample_with_condition("teddy bear playing in the pool", truncation_rate=1.0, save_root="RESULT", batch_size=4, guidance_scale=5.0)
# VQ_Diffusion_model.inference_generate_sample_with_condition("a long exposure photo of waterfall", truncation_rate=1.0, save_root="RESULT", batch_size=4, guidance_scale=5.0)
# Inference Improved VQ-Diffusion with fast/high-quality inference
# VQ_Diffusion_model.inference_generate_sample_with_condition("a long exposure photo of waterfall", truncation_rate=0.86, save_root="RESULT", batch_size=4, infer_speed=0.5) # high-quality inference, 0.5x inference speed
# VQ_Diffusion_model.inference_generate_sample_with_condition("a long exposure photo of waterfall", truncation_rate=0.86, save_root="RESULT", batch_size=4, infer_speed=2) # fast inference, 2x inference speed
# infer_speed shoule be float in [0.1, 10], larger infer_speed means faster inference and smaller infer_speed means slower inference
# Inference Improved VQ-Diffusion with purity sampling
# VQ_Diffusion_model.inference_generate_sample_with_condition("a long exposure photo of waterfall", truncation_rate=0.86, save_root="RESULT", batch_size=4, prior_rule=2, prior_weight=1) # purity sampling
# Inference Improved VQ-Diffusion with both learnable classifier-free sampling and fast inference
# VQ_Diffusion_model.inference_generate_sample_with_condition("a long exposure photo of waterfall", truncation_rate=1.0, save_root="RESULT", batch_size=4, guidance_scale=5.0, infer_speed=2) # classifier-free guidance and fast inference
# VQ_Diffusion_model = VQ_Diffusion(config='OUTPUT/pretrained_model/config_text.yaml', path='OUTPUT/pretrained_model/coco_learnable.pth')
# Inference VQ-Diffusion
# VQ_Diffusion_model.inference_generate_sample_with_condition("A group of elephants walking in muddy water", truncation_rate=0.86, save_root="RESULT", batch_size=4)
# Inference Improved VQ-Diffusion with learnable classifier-free sampling
# VQ_Diffusion_model.inference_generate_sample_with_condition("A group of elephants walking in muddy water", truncation_rate=1.0, save_root="RESULT", batch_size=4, guidance_scale=3.0)
# Inference Improved VQ-Diffusion with zero-shot classifier-free sampling: load models without classifier-free fine-tune and set guidance_scale to > 1
# VQ_Diffusion_model = VQ_Diffusion(config='OUTPUT/pretrained_model/config_text.yaml', path='OUTPUT/pretrained_model/coco_pretrained.pth')
# VQ_Diffusion_model.inference_generate_sample_with_condition("A group of elephants walking in muddy water", truncation_rate=0.86, save_root="RESULT", batch_size=4, guidance_scale=3.0, learnable_cf=False)
# Inference VQ-Diffusion
# VQ_Diffusion_model = VQ_Diffusion(config='OUTPUT/pretrained_model/config_imagenet.yaml', path='OUTPUT/pretrained_model/imagenet_pretrained.pth')
# VQ_Diffusion_model.inference_generate_sample_with_class(407, truncation_rate=0.86, save_root="RESULT", batch_size=4)
# Inference Improved VQ-Diffusion with classifier-free sampling
# VQ_Diffusion_model = VQ_Diffusion(config='configs/imagenet.yaml', path='OUTPUT/pretrained_model/imagenet_learnable.pth', imagenet_cf=True)
# VQ_Diffusion_model.inference_generate_sample_with_class(407, truncation_rate=0.94, save_root="RESULT", batch_size=4, guidance_scale=1.5)
| 9,903 | 48.029703 | 239 | py |
VQ-Diffusion | VQ-Diffusion-main/train.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import argparse
import os
import warnings
import time
import torch
from image_synthesis.modeling.build import build_model
from image_synthesis.data.build import build_dataloader
from image_synthesis.utils.misc import seed_everything, merge_opts_to_config, modify_config_for_debug
from image_synthesis.utils.io import load_yaml_config
from image_synthesis.engine.logger import Logger
from image_synthesis.engine.solver import Solver
from image_synthesis.distributed.launch import launch
# environment variables
NODE_RANK = os.environ['AZ_BATCHAI_TASK_INDEX'] if 'AZ_BATCHAI_TASK_INDEX' in os.environ else 0
NODE_RANK = int(NODE_RANK)
MASTER_ADDR, MASTER_PORT = os.environ['AZ_BATCH_MASTER_NODE'].split(':') if 'AZ_BATCH_MASTER_NODE' in os.environ else ("127.0.0.1", 29500)
MASTER_PORT = int(MASTER_PORT)
DIST_URL = 'tcp://%s:%s' % (MASTER_ADDR, MASTER_PORT)
def get_args():
parser = argparse.ArgumentParser(description='PyTorch Training script')
parser.add_argument('--config_file', type=str, default='configs/vqvae_celeba_attribute_cond.yaml',
help='path of config file')
parser.add_argument('--name', type=str, default='',
help='the name of this experiment, if not provided, set to'
'the name of config file')
parser.add_argument('--output', type=str, default='OUTPUT',
help='directory to save the results')
parser.add_argument('--log_frequency', type=int, default=100,
help='print frequency (default: 100)')
parser.add_argument('--load_path', type=str, default=None,
help='path to model that need to be loaded, '
'used for loading pretrained model')
parser.add_argument('--resume_name', type=str, default=None,
help='resume one experiment with the given name')
parser.add_argument('--auto_resume', action='store_true',
help='automatically resume the training')
# args for ddp
parser.add_argument('--num_node', type=int, default=1,
help='number of nodes for distributed training')
parser.add_argument('--node_rank', type=int, default=NODE_RANK,
help='node rank for distributed training')
parser.add_argument('--dist_url', type=str, default=DIST_URL,
help='url used to set up distributed training')
parser.add_argument('--gpu', type=int, default=None,
help='GPU id to use. If given, only the specific gpu will be'
' used, and ddp will be disabled')
parser.add_argument('--sync_bn', action='store_true',
help='use sync BN layer')
parser.add_argument('--tensorboard', action='store_true',
help='use tensorboard for logging')
parser.add_argument('--timestamp', action='store_true', # default=True,
help='use tensorboard for logging')
# args for random
parser.add_argument('--seed', type=int, default=None,
help='seed for initializing training. ')
parser.add_argument('--cudnn_deterministic', action='store_true',
help='set cudnn.deterministic True')
parser.add_argument('--amp', action='store_true', # default=True,
help='automatic mixture of precesion')
parser.add_argument('--debug', action='store_true', default=False,
help='set as debug mode')
# args for modify config
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
args.cwd = os.path.abspath(os.path.dirname(__file__))
if args.resume_name is not None:
args.name = args.resume_name
args.config_file = os.path.join(args.output, args.resume_name, 'configs', 'config.yaml')
args.auto_resume = True
else:
if args.name == '':
args.name = os.path.basename(args.config_file).replace('.yaml', '')
if args.timestamp:
assert not args.auto_resume, "for timstamp, auto resume is hard to find the save directory"
time_str = time.strftime('%Y-%m-%d-%H-%M')
args.name = time_str + '-' + args.name
# modify args for debugging
if args.debug:
args.name = 'debug'
if args.gpu is None:
args.gpu = 0
args.save_dir = os.path.join(args.output, args.name)
return args
def main():
args = get_args()
if args.seed is not None or args.cudnn_deterministic:
seed_everything(args.seed, args.cudnn_deterministic)
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely disable ddp.')
torch.cuda.set_device(args.gpu)
args.ngpus_per_node = 1
args.world_size = 1
else:
if args.num_node == 1:
args.dist_url == "auto"
else:
assert args.num_node > 1
args.ngpus_per_node = torch.cuda.device_count()
args.world_size = args.ngpus_per_node * args.num_node
launch(main_worker, args.ngpus_per_node, args.num_node, args.node_rank, args.dist_url, args=(args,))
def main_worker(local_rank, args):
args.local_rank = local_rank
args.global_rank = args.local_rank + args.node_rank * args.ngpus_per_node
args.distributed = args.world_size > 1
# load config
config = load_yaml_config(args.config_file)
config = merge_opts_to_config(config, args.opts)
if args.debug:
config = modify_config_for_debug(config)
# get logger
logger = Logger(args)
logger.save_config(config)
# get model
model = build_model(config, args)
# print(model)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# get dataloader
dataloader_info = build_dataloader(config, args)
# get solver
solver = Solver(config=config, args=args, model=model, dataloader=dataloader_info, logger=logger)
# resume
if args.load_path is not None: # only load the model paramters
solver.resume(path=args.load_path,
# load_model=True,
load_optimizer_and_scheduler=False,
load_others=False)
if args.auto_resume:
solver.resume()
# with torch.autograd.set_detect_anomaly(True):
# solver.train()
solver.train()
if __name__ == '__main__':
main()
| 6,809 | 39.058824 | 138 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/distributed/launch.py | import os
import torch
from torch import distributed as dist
from torch import multiprocessing as mp
# import distributed as dist_fn
import image_synthesis.distributed.distributed as dist_fn
def find_free_port():
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def launch(fn, n_gpu_per_machine, n_machine=1, machine_rank=0, dist_url=None, args=()):
world_size = n_machine * n_gpu_per_machine
if world_size > 1:
# if "OMP_NUM_THREADS" not in os.environ:
# os.environ["OMP_NUM_THREADS"] = "1"
if dist_url == "auto":
if n_machine != 1:
raise ValueError('dist_url="auto" not supported in multi-machine jobs')
port = find_free_port()
dist_url = f"tcp://127.0.0.1:{port}"
if n_machine > 1 and dist_url.startswith("file://"):
raise ValueError(
"file:// is not a reliable init method in multi-machine jobs. Prefer tcp://"
)
mp.spawn(
distributed_worker,
nprocs=n_gpu_per_machine,
args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args),
daemon=False,
)
else:
local_rank = 0
fn(local_rank, *args)
def distributed_worker(
local_rank, fn, world_size, n_gpu_per_machine, machine_rank, dist_url, args
):
if not torch.cuda.is_available():
raise OSError("CUDA is not available. Please check your environments")
global_rank = machine_rank * n_gpu_per_machine + local_rank
try:
dist.init_process_group(
backend="NCCL",
init_method=dist_url,
world_size=world_size,
rank=global_rank,
)
except Exception:
raise OSError("failed to initialize NCCL groups")
dist_fn.synchronize()
if n_gpu_per_machine > torch.cuda.device_count():
raise ValueError(
f"specified n_gpu_per_machine larger than available device ({torch.cuda.device_count()})"
)
torch.cuda.set_device(local_rank)
if dist_fn.LOCAL_PROCESS_GROUP is not None:
raise ValueError("torch.distributed.LOCAL_PROCESS_GROUP is not None")
n_machine = world_size // n_gpu_per_machine
for i in range(n_machine):
ranks_on_i = list(range(i * n_gpu_per_machine, (i + 1) * n_gpu_per_machine))
pg = dist.new_group(ranks_on_i)
if i == machine_rank:
dist_fn.LOCAL_PROCESS_GROUP = pg
fn(local_rank, *args)
| 2,604 | 26.712766 | 101 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/distributed/distributed.py | import math
import pickle
import torch
from torch import distributed as dist
from torch.utils import data
LOCAL_PROCESS_GROUP = None
def is_primary():
return get_rank() == 0
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
if LOCAL_PROCESS_GROUP is None:
raise ValueError("tensorfn.distributed.LOCAL_PROCESS_GROUP is None")
return dist.get_rank(group=LOCAL_PROCESS_GROUP)
def synchronize():
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def is_distributed():
raise RuntimeError('Please debug this function!')
return get_world_size() > 1
def all_reduce(tensor, op=dist.ReduceOp.SUM, async_op=False):
world_size = get_world_size()
if world_size == 1:
return tensor
dist.all_reduce(tensor, op=op, async_op=async_op)
return tensor
def all_gather(data):
world_size = get_world_size()
if world_size == 1:
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([1]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), 0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
keys = []
values = []
for k in sorted(input_dict.keys()):
keys.append(k)
values.append(input_dict[k])
values = torch.stack(values, 0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
values /= world_size
reduced_dict = {k: v for k, v in zip(keys, values)}
return reduced_dict
def data_sampler(dataset, shuffle, distributed):
if distributed:
return data.distributed.DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return data.RandomSampler(dataset)
else:
return data.SequentialSampler(dataset)
| 3,169 | 20.418919 | 76 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/lr_scheduler.py | import torch
import math
# from torch.optim import AdamW, Adam
from torch._six import inf
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR
class ReduceLROnPlateauWithWarmup(object):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This scheduler reads a metrics
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Args:
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
warmup_lr: float or None, the learning rate to be touched after warmup
warmup: int, the number of steps to warmup
"""
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
threshold=1e-4, threshold_mode='rel', cooldown=0,
min_lr=0, eps=1e-8, verbose=False, warmup_lr=None,
warmup=0):
if factor >= 1.0:
raise ValueError('Factor should be < 1.0.')
self.factor = factor
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.warmup_lr = warmup_lr
self.warmup = warmup
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.eps = eps
self.last_epoch = 0
self._init_is_better(mode=mode, threshold=threshold,
threshold_mode=threshold_mode)
self._reset()
def _prepare_for_warmup(self):
if self.warmup_lr is not None:
if isinstance(self.warmup_lr, (list, tuple)):
if len(self.warmup_lr) != len(self.optimizer.param_groups):
raise ValueError("expected {} warmup_lrs, got {}".format(
len(self.optimizer.param_groups), len(self.warmup_lr)))
self.warmup_lrs = list(self.warmup_lr)
else:
self.warmup_lrs = [self.warmup_lr] * len(self.optimizer.param_groups)
else:
self.warmup_lrs = None
if self.warmup > self.last_epoch:
curr_lrs = [group['lr'] for group in self.optimizer.param_groups]
self.warmup_lr_steps = [max(0, (self.warmup_lrs[i] - curr_lrs[i])/float(self.warmup)) for i in range(len(curr_lrs))]
else:
self.warmup_lr_steps = None
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def step(self, metrics):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
epoch = self.last_epoch + 1
self.last_epoch = epoch
if epoch <= self.warmup:
self._increase_lr(epoch)
else:
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: reducing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
def _increase_lr(self, epoch):
# used for warmup
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr + self.warmup_lr_steps[i], self.min_lrs[i])
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: increasing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def is_better(self, a, best):
if self.mode == 'min' and self.threshold_mode == 'rel':
rel_epsilon = 1. - self.threshold
return a < best * rel_epsilon
elif self.mode == 'min' and self.threshold_mode == 'abs':
return a < best - self.threshold
elif self.mode == 'max' and self.threshold_mode == 'rel':
rel_epsilon = self.threshold + 1.
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + self.threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if threshold_mode not in {'rel', 'abs'}:
raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
if mode == 'min':
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self._prepare_for_warmup()
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)
class CosineAnnealingLRWithWarmup(object):
"""
adjust lr:
args:
warmup_lr: float or None, the learning rate to be touched after warmup
warmup: int, the number of steps to warmup
"""
def __init__(self, optimizer, T_max, last_epoch=-1, verbose=False,
min_lr=0, warmup_lr=None, warmup=0):
self.optimizer = optimizer
self.T_max = T_max
self.last_epoch = last_epoch
self.verbose = verbose
self.warmup_lr = warmup_lr
self.warmup = warmup
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.max_lrs = [lr for lr in self.min_lrs]
self._prepare_for_warmup()
def step(self):
epoch = self.last_epoch + 1
self.last_epoch = epoch
if epoch <= self.warmup:
self._increase_lr(epoch)
else:
self._reduce_lr(epoch)
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
progress = float(epoch - self.warmup) / float(max(1, self.T_max - self.warmup))
factor = max(0.0, 0.5 * (1.0 + math.cos(math.pi * progress)))
old_lr = float(param_group['lr'])
new_lr = max(self.max_lrs[i] * factor, self.min_lrs[i])
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: reducing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
def _increase_lr(self, epoch):
# used for warmup
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = old_lr + self.warmup_lr_steps[i]
param_group['lr'] = new_lr
self.max_lrs[i] = max(self.max_lrs[i], new_lr)
if self.verbose:
print('Epoch {:5d}: increasing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
def _prepare_for_warmup(self):
if self.warmup_lr is not None:
if isinstance(self.warmup_lr, (list, tuple)):
if len(self.warmup_lr) != len(self.optimizer.param_groups):
raise ValueError("expected {} warmup_lrs, got {}".format(
len(self.optimizer.param_groups), len(self.warmup_lr)))
self.warmup_lrs = list(self.warmup_lr)
else:
self.warmup_lrs = [self.warmup_lr] * len(self.optimizer.param_groups)
else:
self.warmup_lrs = None
if self.warmup > self.last_epoch:
curr_lrs = [group['lr'] for group in self.optimizer.param_groups]
self.warmup_lr_steps = [max(0, (self.warmup_lrs[i] - curr_lrs[i])/float(self.warmup)) for i in range(len(curr_lrs))]
else:
self.warmup_lr_steps = None
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._prepare_for_warmup() | 11,992 | 40.071918 | 128 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/clip_grad_norm.py | from torch.nn.utils import clip_grad_norm_
class ClipGradNorm(object):
def __init__(self,
start_iteration=0,
end_iteration=-1, # if negative, the norm will be always clipped
max_norm=0.5):
self.start_iteration = start_iteration
self.end_iteration = end_iteration
self.max_norm = max_norm
self.last_epoch = -1
def __call__(self, parameters):
self.last_epoch += 1
clip = False
if self.last_epoch >= self.start_iteration:
clip = True
if self.end_iteration > 0 and self.last_epoch < self.end_iteration:
clip = True
if clip:
clip_grad_norm_(parameters, max_norm=self.max_norm)
def state_dict(self):
return {key: value for key, value in self.__dict__.items()}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict) | 935 | 29.193548 | 81 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/logger.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import sys
import torch
from image_synthesis.utils.io import write_args, save_config_to_yaml
from image_synthesis.distributed.distributed import is_primary
import torch.utils.tensorboard as tensorboard
# USE_TENSORBOARD = True
# try:
# import tensorboard
# except:
# USE_TENSORBOARD = False
class Logger(object):
def __init__(self, args):
self.args = args
self.save_dir = args.save_dir
self.is_primary = is_primary()
if self.is_primary:
os.makedirs(self.save_dir, exist_ok=True)
# save the args and config
self.config_dir = os.path.join(self.save_dir, 'configs')
os.makedirs(self.config_dir, exist_ok=True)
file_name = os.path.join(self.config_dir, 'args.txt')
write_args(args, file_name)
log_dir = os.path.join(self.save_dir, 'logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir, exist_ok=True)
self.text_writer = open(os.path.join(log_dir, 'log.txt'), 'a') # 'w')
if args.tensorboard:
self.log_info('using tensorboard')
self.tb_writer = torch.utils.tensorboard.SummaryWriter(log_dir=log_dir) # tensorboard.SummaryWriter(log_dir=log_dir)
else:
self.tb_writer = None
def save_config(self, config):
if self.is_primary:
save_config_to_yaml(config, os.path.join(self.config_dir, 'config.yaml'))
def log_info(self, info, check_primary=True):
if self.is_primary or (not check_primary):
print(info)
if self.is_primary:
info = str(info)
time_str = time.strftime('%Y-%m-%d-%H-%M')
info = '{}: {}'.format(time_str, info)
if not info.endswith('\n'):
info += '\n'
self.text_writer.write(info)
self.text_writer.flush()
def add_scalar(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_scalar(**kargs)
def add_scalars(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_scalars(**kargs)
def add_image(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_image(**kargs)
def add_images(self, **kargs):
"""Log a scalar variable."""
if self.is_primary:
if self.tb_writer is not None:
self.tb_writer.add_images(**kargs)
def close(self):
if self.is_primary:
self.text_writer.close()
self.tb_writer.close()
| 3,005 | 32.4 | 132 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/solver.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import os
import time
import math
import torch
import threading
import multiprocessing
import copy
from PIL import Image
from torch.nn.utils import clip_grad_norm_, clip_grad_norm
import torchvision
from image_synthesis.utils.misc import instantiate_from_config, format_seconds
from image_synthesis.distributed.distributed import reduce_dict
from image_synthesis.distributed.distributed import is_primary, get_rank
from image_synthesis.utils.misc import get_model_parameters_info
from image_synthesis.engine.lr_scheduler import ReduceLROnPlateauWithWarmup, CosineAnnealingLRWithWarmup
from image_synthesis.engine.ema import EMA
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR
try:
from torch.cuda.amp import autocast, GradScaler
AMP = True
except:
print('Warning: import torch.amp failed, so no amp will be used!')
AMP = False
STEP_WITH_LOSS_SCHEDULERS = (ReduceLROnPlateauWithWarmup, ReduceLROnPlateau)
class Solver(object):
def __init__(self, config, args, model, dataloader, logger):
self.config = config
self.args = args
self.model = model
self.dataloader = dataloader
self.logger = logger
self.max_epochs = config['solver']['max_epochs']
self.save_epochs = config['solver']['save_epochs']
self.save_iterations = config['solver'].get('save_iterations', -1)
self.sample_iterations = config['solver']['sample_iterations']
if self.sample_iterations == 'epoch':
self.sample_iterations = self.dataloader['train_iterations']
self.validation_epochs = config['solver'].get('validation_epochs', 2)
assert isinstance(self.save_epochs, (int, list))
assert isinstance(self.validation_epochs, (int, list))
self.debug = config['solver'].get('debug', False)
self.last_epoch = -1
self.last_iter = -1
self.ckpt_dir = os.path.join(args.save_dir, 'checkpoint')
self.image_dir = os.path.join(args.save_dir, 'images')
os.makedirs(self.ckpt_dir, exist_ok=True)
os.makedirs(self.image_dir, exist_ok=True)
# get grad_clipper
if 'clip_grad_norm' in config['solver']:
self.clip_grad_norm = instantiate_from_config(config['solver']['clip_grad_norm'])
else:
self.clip_grad_norm = None
# get lr
adjust_lr = config['solver'].get('adjust_lr', 'sqrt')
base_lr = config['solver'].get('base_lr', 1.0e-4)
if adjust_lr == 'none':
self.lr = base_lr
elif adjust_lr == 'sqrt':
self.lr = base_lr * math.sqrt(args.world_size * config['dataloader']['batch_size'])
elif adjust_lr == 'linear':
self.lr = base_lr * args.world_size * config['dataloader']['batch_size']
else:
raise NotImplementedError('Unknown type of adjust lr {}!'.format(adjust_lr))
self.logger.log_info('Get lr {} from base lr {} with {}'.format(self.lr, base_lr, adjust_lr))
if hasattr(model, 'get_optimizer_and_scheduler') and callable(getattr(model, 'get_optimizer_and_scheduler')):
optimizer_and_scheduler = model.get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])
else:
optimizer_and_scheduler = self._get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers'])
assert type(optimizer_and_scheduler) == type({}), 'optimizer and schduler should be a dict!'
self.optimizer_and_scheduler = optimizer_and_scheduler
# configre for ema
if 'ema' in config['solver'] and args.local_rank == 0:
ema_args = config['solver']['ema']
ema_args['model'] = self.model
self.ema = EMA(**ema_args)
else:
self.ema = None
self.logger.log_info(str(get_model_parameters_info(self.model)))
self.model.cuda()
self.device = self.model.device
if self.args.distributed:
self.logger.log_info('Distributed, begin DDP the model...')
self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=False)
self.logger.log_info('Distributed, DDP model done!')
# prepare for amp
self.args.amp = self.args.amp and AMP
if self.args.amp:
self.scaler = GradScaler()
self.logger.log_info('Using AMP for training!')
self.logger.log_info("{}: global rank {}: prepare solver done!".format(self.args.name,self.args.global_rank), check_primary=False)
def _get_optimizer_and_scheduler(self, op_sc_list):
optimizer_and_scheduler = {}
for op_sc_cfg in op_sc_list:
op_sc = {
'name': op_sc_cfg.get('name', 'none'),
'start_epoch': op_sc_cfg.get('start_epoch', 0),
'end_epoch': op_sc_cfg.get('end_epoch', -1),
'start_iteration': op_sc_cfg.get('start_iteration', 0),
'end_iteration': op_sc_cfg.get('end_iteration', -1),
}
if op_sc['name'] == 'none':
# parameters = self.model.parameters()
parameters = filter(lambda p: p.requires_grad, self.model.parameters())
else:
# NOTE: get the parameters with the given name, the parameters() should be overide
parameters = self.model.parameters(name=op_sc['name'])
# build optimizer
op_cfg = op_sc_cfg.get('optimizer', {'target': 'torch.optim.SGD', 'params': {}})
if 'params' not in op_cfg:
op_cfg['params'] = {}
if 'lr' not in op_cfg['params']:
op_cfg['params']['lr'] = self.lr
op_cfg['params']['params'] = parameters
optimizer = instantiate_from_config(op_cfg)
op_sc['optimizer'] = {
'module': optimizer,
'step_iteration': op_cfg.get('step_iteration', 1)
}
assert isinstance(op_sc['optimizer']['step_iteration'], int), 'optimizer steps should be a integer number of iterations'
# build scheduler
if 'scheduler' in op_sc_cfg:
sc_cfg = op_sc_cfg['scheduler']
sc_cfg['params']['optimizer'] = optimizer
# for cosine annealing lr, compute T_max
if sc_cfg['target'].split('.')[-1] in ['CosineAnnealingLRWithWarmup', 'CosineAnnealingLR']:
T_max = self.max_epochs * self.dataloader['train_iterations']
sc_cfg['params']['T_max'] = T_max
scheduler = instantiate_from_config(sc_cfg)
op_sc['scheduler'] = {
'module': scheduler,
'step_iteration': sc_cfg.get('step_iteration', 1)
}
if op_sc['scheduler']['step_iteration'] == 'epoch':
op_sc['scheduler']['step_iteration'] = self.dataloader['train_iterations']
optimizer_and_scheduler[op_sc['name']] = op_sc
return optimizer_and_scheduler
def _get_lr(self, return_type='str'):
lrs = {}
for op_sc_n, op_sc in self.optimizer_and_scheduler.items():
lr = op_sc['optimizer']['module'].state_dict()['param_groups'][0]['lr']
lrs[op_sc_n+'_lr'] = round(lr, 10)
if return_type == 'str':
lrs = str(lrs)
lrs = lrs.replace('none', 'lr').replace('{', '').replace('}','').replace('\'', '')
elif return_type == 'dict':
pass
else:
raise ValueError('Unknow of return type: {}'.format(return_type))
return lrs
def sample(self, batch, phase='train', step_type='iteration'):
tic = time.time()
self.logger.log_info('Begin to sample...')
if self.ema is not None:
self.ema.modify_to_inference()
suffix = '_ema'
else:
suffix = ''
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model = self.model.module
else:
model = self.model
with torch.no_grad():
if self.debug == False:
if self.args.amp:
with autocast():
samples = model.sample(batch=batch, step=self.last_iter)
else:
samples = model.sample(batch=batch, step=self.last_iter)
else:
samples = model.sample(batch=batch[0].cuda(), step=self.last_iter)
step = self.last_iter if step_type == 'iteration' else self.last_epoch
for k, v in samples.items():
save_dir = os.path.join(self.image_dir, phase, k)
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, 'e{:010d}_itr{:010d}_rank{}{}'.format(self.last_epoch, self.last_iter%self.dataloader['train_iterations'], get_rank(), suffix))
if torch.is_tensor(v) and v.dim() == 4 and v.shape[1] in [1, 3]: # image
im = v
im = im.to(torch.uint8)
self.logger.add_images(tag='{}/{}e_{}itr/{}'.format(phase, self.last_epoch, self.last_iter%self.dataloader['train_iterations'], k), img_tensor=im, global_step=step, dataformats='NCHW')
# save images
im_grid = torchvision.utils.make_grid(im)
im_grid = im_grid.permute(1, 2, 0).to('cpu').numpy()
im_grid = Image.fromarray(im_grid)
im_grid.save(save_path + '.jpg')
self.logger.log_info('save {} to {}'.format(k, save_path+'.jpg'))
else: # may be other values, such as text caption
with open(save_path+'.txt', 'a') as f:
f.write(str(v)+'\n')
f.close()
self.logger.log_info('save {} to {}'.format(k, save_path+'txt'))
if self.ema is not None:
self.ema.modify_to_train()
self.logger.log_info('Sample done, time: {:.2f}'.format(time.time() - tic))
def step(self, batch, phase='train'):
loss = {}
if self.debug == False:
for k, v in batch.items():
if torch.is_tensor(v):
batch[k] = v.cuda()
else:
batch = batch[0].cuda()
for op_sc_n, op_sc in self.optimizer_and_scheduler.items():
if phase == 'train':
# check if this optimizer and scheduler is valid in this iteration and epoch
if op_sc['start_iteration'] > self.last_iter:
continue
if op_sc['end_iteration'] > 0 and op_sc['end_iteration'] <= self.last_iter:
continue
if op_sc['start_epoch'] > self.last_epoch:
continue
if op_sc['end_epoch'] > 0 and op_sc['end_epoch'] <= self.last_epoch:
continue
input = {
'batch': batch,
'return_loss': True,
'step': self.last_iter,
}
if op_sc_n != 'none':
input['name'] = op_sc_n
if phase == 'train':
if self.args.amp:
with autocast():
output = self.model(**input)
else:
output = self.model(**input)
else:
with torch.no_grad():
if self.args.amp:
with autocast():
output = self.model(**input)
else:
output = self.model(**input)
if phase == 'train':
if op_sc['optimizer']['step_iteration'] > 0 and (self.last_iter + 1) % op_sc['optimizer']['step_iteration'] == 0:
op_sc['optimizer']['module'].zero_grad()
if self.args.amp:
self.scaler.scale(output['loss']).backward()
if self.clip_grad_norm is not None:
self.clip_grad_norm(self.model.parameters())
self.scaler.step(op_sc['optimizer']['module'])
self.scaler.update()
else:
output['loss'].backward()
if self.clip_grad_norm is not None:
self.clip_grad_norm(self.model.parameters())
op_sc['optimizer']['module'].step()
if 'scheduler' in op_sc:
if op_sc['scheduler']['step_iteration'] > 0 and (self.last_iter + 1) % op_sc['scheduler']['step_iteration'] == 0:
if isinstance(op_sc['scheduler']['module'], STEP_WITH_LOSS_SCHEDULERS):
op_sc['scheduler']['module'].step(output.get('loss'))
else:
op_sc['scheduler']['module'].step()
# update ema model
if self.ema is not None:
self.ema.update(iteration=self.last_iter)
loss[op_sc_n] = {k: v for k, v in output.items() if ('loss' in k or 'acc' in k)}
return loss
def save(self, force=False):
if is_primary():
# save with the epoch specified name
if self.save_iterations > 0:
if (self.last_iter + 1) % self.save_iterations == 0:
save = True
else:
save = False
else:
if isinstance(self.save_epochs, int):
save = (self.last_epoch + 1) % self.save_epochs == 0
else:
save = (self.last_epoch + 1) in self.save_epochs
if save or force:
state_dict = {
'last_epoch': self.last_epoch,
'last_iter': self.last_iter,
'model': self.model.module.state_dict() if isinstance(self.model, torch.nn.parallel.DistributedDataParallel) else self.model.state_dict()
}
if self.ema is not None:
state_dict['ema'] = self.ema.state_dict()
if self.clip_grad_norm is not None:
state_dict['clip_grad_norm'] = self.clip_grad_norm.state_dict()
# add optimizers and schedulers
optimizer_and_scheduler = {}
for op_sc_n, op_sc in self.optimizer_and_scheduler.items():
state_ = {}
for k in op_sc:
if k in ['optimizer', 'scheduler']:
op_or_sc = {kk: vv for kk, vv in op_sc[k].items() if kk != 'module'}
op_or_sc['module'] = op_sc[k]['module'].state_dict()
state_[k] = op_or_sc
else:
state_[k] = op_sc[k]
optimizer_and_scheduler[op_sc_n] = state_
state_dict['optimizer_and_scheduler'] = optimizer_and_scheduler
if save:
save_path = os.path.join(self.ckpt_dir, '{}e_{}iter.pth'.format(str(self.last_epoch).zfill(6), self.last_iter))
torch.save(state_dict, save_path)
self.logger.log_info('saved in {}'.format(save_path))
# save with the last name
save_path = os.path.join(self.ckpt_dir, 'last.pth')
torch.save(state_dict, save_path)
self.logger.log_info('saved in {}'.format(save_path))
def resume(self,
path=None, # The path of last.pth
load_optimizer_and_scheduler=True, # whether to load optimizers and scheduler
load_others=True # load other informations
):
if path is None:
path = os.path.join(self.ckpt_dir, 'last.pth')
if os.path.exists(path):
state_dict = torch.load(path, map_location='cuda:{}'.format(self.args.local_rank))
if load_others:
self.last_epoch = state_dict['last_epoch']
self.last_iter = state_dict['last_iter']
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
try:
self.model.module.load_state_dict(state_dict['model'])
except:
model_dict = self.model.module.state_dict()
temp_state_dict = {k:v for k,v in state_dict['model'].items() if k in model_dict.keys()}
model_dict.update(temp_state_dict)
self.model.module.load_state_dict(model_dict)
else:
self.model.load_state_dict(state_dict['model'])
if 'ema' in state_dict and self.ema is not None:
try:
self.ema.load_state_dict(state_dict['ema'])
except:
model_dict = self.ema.state_dict()
temp_state_dict = {k:v for k,v in state_dict['ema'].items() if k in model_dict.keys()}
model_dict.update(temp_state_dict)
self.ema.load_state_dict(model_dict)
if 'clip_grad_norm' in state_dict and self.clip_grad_norm is not None:
self.clip_grad_norm.load_state_dict(state_dict['clip_grad_norm'])
# handle optimizer and scheduler
for op_sc_n, op_sc in state_dict['optimizer_and_scheduler'].items():
for k in op_sc:
if k in ['optimizer', 'scheduler']:
for kk in op_sc[k]:
if kk == 'module' and load_optimizer_and_scheduler:
self.optimizer_and_scheduler[op_sc_n][k][kk].load_state_dict(op_sc[k][kk])
elif load_others: # such as step_iteration, ...
self.optimizer_and_scheduler[op_sc_n][k][kk] = op_sc[k][kk]
elif load_others: # such as start_epoch, end_epoch, ....
self.optimizer_and_scheduler[op_sc_n][k] = op_sc[k]
self.logger.log_info('Resume from {}'.format(path))
def train_epoch(self):
self.model.train()
self.last_epoch += 1
if self.args.distributed:
self.dataloader['train_loader'].sampler.set_epoch(self.last_epoch)
epoch_start = time.time()
itr_start = time.time()
itr = -1
for itr, batch in enumerate(self.dataloader['train_loader']):
if itr == 0:
print("time2 is " + str(time.time()))
data_time = time.time() - itr_start
step_start = time.time()
self.last_iter += 1
loss = self.step(batch, phase='train')
# logging info
if self.logger is not None and self.last_iter % self.args.log_frequency == 0:
info = '{}: train'.format(self.args.name)
info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])
for loss_n, loss_dict in loss.items():
info += ' ||'
loss_dict = reduce_dict(loss_dict)
info += '' if loss_n == 'none' else ' {}'.format(loss_n)
# info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])
for k in loss_dict:
info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))
self.logger.add_scalar(tag='train/{}/{}'.format(loss_n, k), scalar_value=float(loss_dict[k]), global_step=self.last_iter)
# log lr
lrs = self._get_lr(return_type='dict')
for k in lrs.keys():
lr = lrs[k]
self.logger.add_scalar(tag='train/{}_lr'.format(k), scalar_value=lrs[k], global_step=self.last_iter)
# add lr to info
info += ' || {}'.format(self._get_lr())
# add time consumption to info
spend_time = time.time() - self.start_train_time
itr_time_avg = spend_time / (self.last_iter + 1)
info += ' || data_time: {dt}s | fbward_time: {fbt}s | iter_time: {it}s | iter_avg_time: {ita}s | epoch_time: {et} | spend_time: {st} | left_time: {lt}'.format(
dt=round(data_time, 1),
it=round(time.time() - itr_start, 1),
fbt=round(time.time() - step_start, 1),
ita=round(itr_time_avg, 1),
et=format_seconds(time.time() - epoch_start),
st=format_seconds(spend_time),
lt=format_seconds(itr_time_avg*self.max_epochs*self.dataloader['train_iterations']-spend_time)
)
self.logger.log_info(info)
itr_start = time.time()
# sample
if self.sample_iterations > 0 and (self.last_iter + 1) % self.sample_iterations == 0:
# print("save model here")
# self.save(force=True)
# print("save model done")
self.model.eval()
self.sample(batch, phase='train', step_type='iteration')
self.model.train()
# modify here to make sure dataloader['train_iterations'] is correct
assert itr >= 0, "The data is too less to form one iteration!"
self.dataloader['train_iterations'] = itr + 1
def validate_epoch(self):
if 'validation_loader' not in self.dataloader:
val = False
else:
if isinstance(self.validation_epochs, int):
val = (self.last_epoch + 1) % self.validation_epochs == 0
else:
val = (self.last_epoch + 1) in self.validation_epochs
if val:
if self.args.distributed:
self.dataloader['validation_loader'].sampler.set_epoch(self.last_epoch)
self.model.eval()
overall_loss = None
epoch_start = time.time()
itr_start = time.time()
itr = -1
for itr, batch in enumerate(self.dataloader['validation_loader']):
data_time = time.time() - itr_start
step_start = time.time()
loss = self.step(batch, phase='val')
for loss_n, loss_dict in loss.items():
loss[loss_n] = reduce_dict(loss_dict)
if overall_loss is None:
overall_loss = loss
else:
for loss_n, loss_dict in loss.items():
for k, v in loss_dict.items():
overall_loss[loss_n][k] = (overall_loss[loss_n][k] * itr + loss[loss_n][k]) / (itr + 1)
if self.logger is not None and (itr+1) % self.args.log_frequency == 0:
info = '{}: val'.format(self.args.name)
info = info + ': Epoch {}/{} | iter {}/{}'.format(self.last_epoch, self.max_epochs, itr, self.dataloader['validation_iterations'])
for loss_n, loss_dict in loss.items():
info += ' ||'
info += '' if loss_n == 'none' else ' {}'.format(loss_n)
# info = info + ': Epoch {}/{} | iter {}/{}'.format(self.last_epoch, self.max_epochs, itr, self.dataloader['validation_iterations'])
for k in loss_dict:
info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))
itr_time_avg = (time.time() - epoch_start) / (itr + 1)
info += ' || data_time: {dt}s | fbward_time: {fbt}s | iter_time: {it}s | epoch_time: {et} | left_time: {lt}'.format(
dt=round(data_time, 1),
fbt=round(time.time() - step_start, 1),
it=round(time.time() - itr_start, 1),
et=format_seconds(time.time() - epoch_start),
lt=format_seconds(itr_time_avg*(self.dataloader['train_iterations']-itr-1))
)
self.logger.log_info(info)
itr_start = time.time()
# modify here to make sure dataloader['validation_iterations'] is correct
assert itr >= 0, "The data is too less to form one iteration!"
self.dataloader['validation_iterations'] = itr + 1
if self.logger is not None:
info = '{}: val'.format(self.args.name)
for loss_n, loss_dict in overall_loss.items():
info += '' if loss_n == 'none' else ' {}'.format(loss_n)
info += ': Epoch {}/{}'.format(self.last_epoch, self.max_epochs)
for k in loss_dict:
info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))
self.logger.add_scalar(tag='val/{}/{}'.format(loss_n, k), scalar_value=float(loss_dict[k]), global_step=self.last_epoch)
self.logger.log_info(info)
def validate(self):
self.validation_epoch()
def train(self):
start_epoch = self.last_epoch + 1
self.start_train_time = time.time()
self.logger.log_info('{}: global rank {}: start training...'.format(self.args.name, self.args.global_rank), check_primary=False)
for epoch in range(start_epoch, self.max_epochs):
self.train_epoch()
self.save(force=True)
self.validate_epoch()
| 26,443 | 47.08 | 204 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/engine/ema.py | import torch
import copy
class EMA(object):
def __init__(self,
model,
decay=0.99,
update_interval=1,
device=torch.device('cpu')):
self.decay = decay
self.update_iterval = update_interval
self.device = device
self.model = model
with torch.no_grad():
if hasattr(model, 'get_ema_model') and callable(model.get_ema_model):
self.ema_model = copy.deepcopy(model.get_ema_model())
self.cur_state_dict = model.get_ema_model().state_dict()
else:
self.ema_model = copy.deepcopy(model)
self.cur_state_dict = model.state_dict()
self.ema_model.to(self.device)
self.cur_state_dict = {k: v.clone().to(self.device) for k, v in self.cur_state_dict.items()}
def update(self, iteration):
if (iteration + 1) % self.update_iterval == 0:
# print('{} Update ema'.format(iteration))
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
cur_state_dict = self.model.get_ema_model().state_dict()
else:
cur_state_dict = self.model.state_dict()
ema_state_dict = self.ema_model.state_dict()
for k in ema_state_dict.keys():
ema_state_dict[k] = ema_state_dict[k] * self.decay + cur_state_dict[k].clone().to(self.device) * (1-self.decay)
self.ema_model.load_state_dict(ema_state_dict)
def state_dict(self):
return self.ema_model.state_dict()
def load_state_dict(self, state_dict, strict=True):
state_dict_ = {k: v.clone().to(self.device) for k, v in state_dict.items()}
self.ema_model.load_state_dict(state_dict_, strict=strict)
def modify_to_inference(self):
# get current model
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
self.cur_state_dict = self.model.get_ema_model().state_dict()
else:
self.cur_state_dict = self.model.state_dict()
self.cur_state_dict = {k: v.clone().to(self.device) for k, v in self.cur_state_dict.items()}
ema_state_dict = self.ema_model.state_dict()
ema_state_dict = {k: v.to(self.model.device) for k, v in ema_state_dict.items()}
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
self.model.get_ema_model().load_state_dict(ema_state_dict)
else:
self.model.load_state_dict(ema_state_dict)
def modify_to_train(self):
self.cur_state_dict = {k: v.clone().to(self.model.device) for k, v in self.cur_state_dict.items()}
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
self.model.get_ema_model().load_state_dict(self.cur_state_dict)
else:
self.model.load_state_dict(self.cur_state_dict)
| 2,968 | 42.028986 | 127 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/utils/misc.py | import importlib
import random
import numpy as np
import torch
import warnings
import os
def seed_everything(seed, cudnn_deterministic=False):
"""
Function that sets seed for pseudo-random number generators in:
pytorch, numpy, python.random
Args:
seed: the integer value seed for global random state
"""
if seed is not None:
print(f"Global seed set to {seed}")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if cudnn_deterministic:
torch.backends.cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
def merge_opts_to_config(config, opts):
def modify_dict(c, nl, v):
if len(nl) == 1:
c[nl[0]] = type(c[nl[0]])(v)
else:
# print(nl)
c[nl[0]] = modify_dict(c[nl[0]], nl[1:], v)
return c
if opts is not None and len(opts) > 0:
assert len(opts) % 2 == 0, "each opts should be given by the name and values! The length shall be even number!"
for i in range(len(opts) // 2):
name = opts[2*i]
value = opts[2*i+1]
config = modify_dict(config, name.split('.'), value)
return config
def modify_config_for_debug(config):
config['dataloader']['num_workers'] = 0
config['dataloader']['batch_size'] = 1
return config
def get_model_parameters_info(model):
# for mn, m in model.named_modules():
parameters = {'overall': {'trainable': 0, 'non_trainable': 0, 'total': 0}}
for child_name, child_module in model.named_children():
parameters[child_name] = {'trainable': 0, 'non_trainable': 0}
for pn, p in child_module.named_parameters():
if p.requires_grad:
parameters[child_name]['trainable'] += p.numel()
else:
parameters[child_name]['non_trainable'] += p.numel()
parameters[child_name]['total'] = parameters[child_name]['trainable'] + parameters[child_name]['non_trainable']
parameters['overall']['trainable'] += parameters[child_name]['trainable']
parameters['overall']['non_trainable'] += parameters[child_name]['non_trainable']
parameters['overall']['total'] += parameters[child_name]['total']
# format the numbers
def format_number(num):
K = 2**10
M = 2**20
G = 2**30
if num > G: # K
uint = 'G'
num = round(float(num)/G, 2)
elif num > M:
uint = 'M'
num = round(float(num)/M, 2)
elif num > K:
uint = 'K'
num = round(float(num)/K, 2)
else:
uint = ''
return '{}{}'.format(num, uint)
def format_dict(d):
for k, v in d.items():
if isinstance(v, dict):
format_dict(v)
else:
d[k] = format_number(v)
format_dict(parameters)
return parameters
def format_seconds(seconds):
h = int(seconds // 3600)
m = int(seconds // 60 - h * 60)
s = int(seconds % 60)
d = int(h // 24)
h = h - d * 24
if d == 0:
if h == 0:
if m == 0:
ft = '{:02d}s'.format(s)
else:
ft = '{:02d}m:{:02d}s'.format(m, s)
else:
ft = '{:02d}h:{:02d}m:{:02d}s'.format(h, m, s)
else:
ft = '{:d}d:{:02d}h:{:02d}m:{:02d}s'.format(d, h, m, s)
return ft
def instantiate_from_config(config):
if config is None:
return None
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
module, cls = config["target"].rsplit(".", 1)
cls = getattr(importlib.import_module(module, package=None), cls)
return cls(**config.get("params", dict()))
def class_from_string(class_name):
module, cls = class_name.rsplit(".", 1)
cls = getattr(importlib.import_module(module, package=None), cls)
return cls
def get_all_file(dir, end_with='.h5'):
if isinstance(end_with, str):
end_with = [end_with]
filenames = []
for root, dirs, files in os.walk(dir):
for f in files:
for ew in end_with:
if f.endswith(ew):
filenames.append(os.path.join(root, f))
break
return filenames
def get_sub_dirs(dir, abs=True):
sub_dirs = os.listdir(dir)
if abs:
sub_dirs = [os.path.join(dir, s) for s in sub_dirs]
return sub_dirs
def get_model_buffer(model):
state_dict = model.state_dict()
buffers_ = {}
params_ = {n: p for n, p in model.named_parameters()}
for k in state_dict:
if k not in params_:
buffers_[k] = state_dict[k]
return buffers_
| 5,066 | 29.160714 | 119 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/utils/io.py | import sys
import yaml
import torch
import json
def load_yaml_config(path):
with open(path) as f:
config = yaml.full_load(f)
return config
def save_config_to_yaml(config, path):
assert path.endswith('.yaml')
with open(path, 'w') as f:
f.write(yaml.dump(config))
f.close()
def save_dict_to_json(d, path, indent=None):
json.dump(d, open(path, 'w'), indent=indent)
def load_dict_from_json(path):
return json.load(open(path, 'r'))
def write_args(args, path):
args_dict = dict((name, getattr(args, name)) for name in dir(args)if not name.startswith('_'))
with open(path, 'a') as args_file:
args_file.write('==> torch version: {}\n'.format(torch.__version__))
args_file.write('==> cudnn version: {}\n'.format(torch.backends.cudnn.version()))
args_file.write('==> Cmd:\n')
args_file.write(str(sys.argv))
args_file.write('\n==> args:\n')
for k, v in sorted(args_dict.items()):
args_file.write(' %s: %s\n' % (str(k), str(v)))
args_file.close() | 1,067 | 28.666667 | 98 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/cub200_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
from tqdm import tqdm
import pickle
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class Cub200Dataset(Dataset):
def __init__(self, data_root, phase = 'train', im_preprocessor_config=None, drop_caption_rate=0.0):
self.transform = instantiate_from_config(im_preprocessor_config)
self.image_folder = os.path.join(data_root, 'images')
self.root = os.path.join(data_root, phase)
pickle_path = os.path.join(self.root, "filenames.pickle")
self.name_list = pickle.load(open(pickle_path, 'rb'), encoding="bytes")
self.num = len(self.name_list)
# load all caption file to dict in memory
self.caption_dict = {}
for index in tqdm(range(self.num)):
name = self.name_list[index]
this_text_path = os.path.join(data_root, 'text', 'text', name+'.txt')
with open(this_text_path, 'r') as f:
caption = f.readlines()
self.caption_dict[name] = caption
print("load caption file done")
self.drop_rate = drop_caption_rate
self.phase = phase
def __len__(self):
return self.num
def __getitem__(self, index):
name = self.name_list[index]
image_path = os.path.join(self.image_folder, name+'.jpg')
image = load_img(image_path)
image = np.array(image).astype(np.uint8)
image = self.transform(image = image)['image']
caption_list = self.caption_dict[name]
caption = random.choice(caption_list).replace('\n', '').lower()
data = {
'image': np.transpose(image.astype(np.float32), (2, 0, 1)),
'text': caption if (self.phase != 'train' or self.drop_rate < 1e-6 or random.random() >= self.drop_rate) else '',
}
return data
| 2,040 | 33.016667 | 129 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/mscoco_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class CocoDataset(Dataset):
def __init__(self, data_root, phase = 'train', im_preprocessor_config=None, drop_caption_rate=0.0):
self.transform = instantiate_from_config(im_preprocessor_config)
self.root = os.path.join(data_root, phase)
# input_file = os.path.join(data_root, input_file)
caption_file = "captions_"+phase+"2014.json"
caption_file = os.path.join(data_root, "annotations", caption_file)
self.json_file = json.load(open(caption_file, 'r'))
print("length of the dataset is ")
print(len(self.json_file['annotations']))
self.num = len(self.json_file['annotations'])
self.image_prename = "COCO_" + phase + "2014_"
self.folder_path = os.path.join(data_root, phase+'2014', phase+'2014')
self.drop_rate = drop_caption_rate
self.phase = phase
def __len__(self):
return self.num
def __getitem__(self, index):
this_item = self.json_file['annotations'][index]
caption = this_item['caption'].lower()
image_name = str(this_item['image_id']).zfill(12)
image_path = os.path.join(self.folder_path, self.image_prename+image_name+'.jpg')
image = load_img(image_path)
image = np.array(image).astype(np.uint8)
image = self.transform(image = image)['image']
data = {
'image': np.transpose(image.astype(np.float32), (2, 0, 1)),
'text': caption if (self.phase != 'train' or self.drop_rate < 1e-6 or random.random() >= self.drop_rate) else '',
}
return data
| 1,873 | 36.48 | 129 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/imagenet_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
def load_img(filepath):
img = Image.open(filepath).convert('RGB')
return img
class ImageNetDataset(Dataset):
def __init__(self, data_root, input_file, phase = 'train', im_preprocessor_config=None, drop_caption_rate=0.0):
self.transform = instantiate_from_config(im_preprocessor_config)
self.root = os.path.join(data_root, phase)
input_file = os.path.join(data_root, input_file)
temp_label = json.load(open('image_synthesis/data/imagenet_class_index.json', 'r'))
self.labels = {}
for i in range(1000):
self.labels[temp_label[str(i)][0]] = i
self.A_paths = []
self.A_labels = []
with open(input_file, 'r') as f:
temp_path = f.readlines()
for path in temp_path:
label = self.labels[path.split('/')[0]]
self.A_paths.append(os.path.join(self.root, path.strip()))
self.A_labels.append(label)
self.num = len(self.A_paths)
self.A_size = len(self.A_paths)
self.drop_rate = drop_caption_rate
self.phase = phase
def __len__(self):
return self.num
def __getitem__(self, index):
try:
return self.load_img(index)
except:
return self.__getitem__(random.randint(0, self.__len__()-1))
def load_img(self, index):
A_path = self.A_paths[index % self.A_size]
A = load_img(A_path)
# if self.transform is not None:
A = self.transform(A)['image']
A_label = self.A_labels[index % self.A_size]
data = {
'image': np.transpose(A.astype(np.float32), (2, 0, 1)),
'label': A_label if (self.phase != 'train' or self.drop_rate < 1e-6 or random.random() >= self.drop_rate) else 1000,
}
return data
| 2,016 | 33.775862 | 132 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/ffhq_dataset.py | from torch.utils.data import Dataset
import numpy as np
import io
from PIL import Image
import os
import json
import random
from image_synthesis.utils.misc import instantiate_from_config
import torchvision.datasets as datasets
class FFHQDataset(datasets.ImageFolder):
def __init__(self, data_root, im_preprocessor_config):
self.img_preprocessor = instantiate_from_config(im_preprocessor_config)
super(FFHQDataset, self).__init__(root=data_root)
def __getitem__(self, index):
# image_name = self.imgs[index][0].split('/')[-1]
image = super(FFHQDataset, self).__getitem__(index)[0]
image = self.img_preprocessor(image=np.array(image).astype(np.uint8))['image']
data = {
'image': np.transpose(image.astype(np.float32), (2, 0, 1)),
}
return data
| 848 | 31.653846 | 86 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/build.py | import torch
# from image_synthesis.data.base_dataset import ConcatDatasetWithIndex as ConcatDataset
from torch.utils.data import ConcatDataset
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.distributed.distributed import is_distributed
def build_dataloader(config, args=None, return_dataset=False):
dataset_cfg = config['dataloader']
train_dataset = []
for ds_cfg in dataset_cfg['train_datasets']:
ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')
ds = instantiate_from_config(ds_cfg)
train_dataset.append(ds)
if len(train_dataset) > 1:
train_dataset = ConcatDataset(train_dataset)
else:
train_dataset = train_dataset[0]
val_dataset = []
for ds_cfg in dataset_cfg['validation_datasets']:
ds_cfg['params']['data_root'] = dataset_cfg.get('data_root', '')
ds = instantiate_from_config(ds_cfg)
val_dataset.append(ds)
if len(val_dataset) > 1:
val_dataset = ConcatDataset(val_dataset)
else:
val_dataset = val_dataset[0]
if args is not None and args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
train_iters = len(train_sampler) // dataset_cfg['batch_size']
val_iters = len(val_sampler) // dataset_cfg['batch_size']
else:
train_sampler = None
val_sampler = None
train_iters = len(train_dataset) // dataset_cfg['batch_size']
val_iters = len(val_dataset) // dataset_cfg['batch_size']
# if args is not None and not args.debug:
# num_workers = max(2*dataset_cfg['batch_size'], dataset_cfg['num_workers'])
# num_workers = min(64, num_workers)
# else:
# num_workers = dataset_cfg['num_workers']
num_workers = dataset_cfg['num_workers']
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=dataset_cfg['batch_size'],
shuffle=(train_sampler is None),
num_workers=num_workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True,
persistent_workers=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=dataset_cfg['batch_size'],
shuffle=False, #(val_sampler is None),
num_workers=num_workers,
pin_memory=True,
sampler=val_sampler,
drop_last=True,
persistent_workers=True)
dataload_info = {
'train_loader': train_loader,
'validation_loader': val_loader,
'train_iterations': train_iters,
'validation_iterations': val_iters
}
if return_dataset:
dataload_info['train_dataset'] = train_dataset
dataload_info['validation_dataset'] = val_dataset
return dataload_info
| 3,454 | 44.460526 | 100 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/utils/image_preprocessor.py | import albumentations
import random
import numpy as np
from PIL import Image
import cv2
from io import BytesIO
from torchvision import transforms as trans
class DalleTransformerPreprocessor(object):
def __init__(self,
size=256,
phase='train',
additional_targets=None):
self.size = size
self.phase = phase
# ddc: following dalle to use randomcrop
self.train_preprocessor = albumentations.Compose([albumentations.RandomCrop(height=size, width=size)],
additional_targets=additional_targets)
self.val_preprocessor = albumentations.Compose([albumentations.CenterCrop(height=size, width=size)],
additional_targets=additional_targets)
def __call__(self, image, **kargs):
"""
image: PIL.Image
"""
if isinstance(image, np.ndarray):
image = Image.fromarray(image.astype(np.uint8))
w, h = image.size
s_min = min(h, w)
if self.phase == 'train':
off_h = int(random.uniform(3*(h-s_min)//8, max(3*(h-s_min)//8+1, 5*(h-s_min)//8)))
off_w = int(random.uniform(3*(w-s_min)//8, max(3*(w-s_min)//8+1, 5*(w-s_min)//8)))
# import pdb; pdb.set_trace()
image = image.crop((off_w, off_h, off_w + s_min, off_h + s_min))
# resize image
t_max = min(s_min, round(9/8*self.size))
t_max = max(t_max, self.size)
t = int(random.uniform(self.size, t_max+1))
image = image.resize((t, t))
image = np.array(image).astype(np.uint8)
image = self.train_preprocessor(image=image) #randomcrop (size,size)
else:
if w < h:
w_ = self.size
h_ = int(h * w_/w)
else:
h_ = self.size
w_ = int(w * h_/h)
image = image.resize((w_, h_))
image = np.array(image).astype(np.uint8)
image = self.val_preprocessor(image=image)
return image
class ImageNetTransformerPreprocessor(object):
def __init__(self,
size=256,
phase='train',
additional_targets=None):
self.size = size
self.phase = phase
# ddc: following dalle to use randomcrop
self.train_preprocessor = albumentations.Compose([albumentations.RandomCrop(height=size, width=size)],
additional_targets=additional_targets)
self.val_preprocessor = albumentations.Compose([albumentations.CenterCrop(height=size, width=size)],
additional_targets=additional_targets)
def __call__(self, image, **kargs):
"""
image: PIL.Image
"""
if isinstance(image, np.ndarray):
image = Image.fromarray(image.astype(np.uint8))
w, h = image.size
s_min = min(h, w)
if self.phase == 'train':
if w < h:
w_ = self.size
h_ = int(h * w_/w)
else:
h_ = self.size
w_ = int(w * h_/h)
image = image.resize((w_, h_))
image = np.array(image).astype(np.uint8)
image = self.train_preprocessor(image=image)
else:
if w < h:
w_ = self.size
h_ = int(h * w_/w)
else:
h_ = self.size
w_ = int(w * h_/h)
image = image.resize((w_, h_))
image = np.array(image).astype(np.uint8)
image = self.val_preprocessor(image=image)
return image
| 3,890 | 35.364486 | 140 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/utils/comm.py | """
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import torch
import torch.distributed as dist
# from diffdist.functional import all_gather as better_all_gather
class Comm(object):
def __init__(self, local_rank=0):
self.local_rank = 0
@property
def world_size(self):
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
@property
def rank(self):
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
@property
def local_rank(self):
if not dist.is_available():
print("****************** yes1")
return 0
if not dist.is_initialized():
print("****************** yes2")
return 0
print("****************** yes3", self._local_rank)
return self._local_rank
@local_rank.setter
def local_rank(self, value):
if not dist.is_available():
self._local_rank = 0
if not dist.is_initialized():
self._local_rank = 0
self._local_rank = value
@property
def head(self):
return 'Rank[{}/{}]'.format(self.rank, self.world_size)
def is_main_process(self):
return self.rank == 0
def synchronize(self):
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if self.world_size == 1:
return
dist.barrier()
comm = Comm()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = comm.world_size
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).to("cuda")
size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = comm.world_size
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def gather_tensors(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(comm.world_size)
]
dist.all_gather(tensors_gather, tensor, async_op=False)
# need to do this to restore propagation of the gradients
tensors_gather[comm.rank] = tensor
output = torch.cat(tensors_gather, dim=0)
return output
def gather_tensors_fake(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(comm.world_size)
]
dist.all_gather(tensors_gather, tensor, async_op=False)
# need to do this to restore propagation of the gradients
tensors_gather[comm.rank] = tensor
output = torch.cat(tensors_gather, dim=0)
output = torch.cat([output,output.detach()],0)
return output
def gather_nearby_tensors(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
step=comm.rank//2
if comm.rank%2==0:
nearby_rank=step*2+1
else:
nearby_rank=step*2
cpu_tensor=tensor
tensors_gather = [
torch.ones_like(cpu_tensor)
for _ in range(comm.world_size)
]
dist.all_gather(tensors_gather, cpu_tensor, async_op=False)
# need to do this to restore propagation of the gradients
tensors_gather=[tensors_gather[nearby_rank].to(tensor.device),tensor]
output = torch.cat(tensors_gather, dim=0)
return output
def gather_tensors_with_gradient(x):
""" collect all tensor from all GPUs
args:
x: shape (mini_batch, ...)
returns:
shape (mini_batch * num_gpu, ...)
"""
x = x.contiguous()
out_list = [torch.zeros_like(x, device=x.device, dtype=x.dtype) for _ in range(comm.world_size)]
out_list = better_all_gather(out_list, x)
return torch.cat(out_list, dim=0)
gather_funcs={
"ALL":gather_tensors,
"NEAR":gather_nearby_tensors,
"GRAD":gather_tensors_with_gradient,
"FAKE":gather_tensors_fake
}
from contextlib import contextmanager
@contextmanager
def torch_distributed_zero_first():
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
local_rank=comm.local_rank
if local_rank not in [-1, 0]:
dist.barrier(device_ids=[local_rank])
yield
if local_rank == 0:
dist.barrier(device_ids=[0])
| 6,860 | 28.701299 | 103 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/data/utils/manage.py | from sys import stdout
import zipfile
import os.path as osp
import lmdb
import logging
from PIL import Image
import pickle
import io
import glob
import os
from pathlib import Path
import time
from threading import Thread
from queue import Queue,Empty
import subprocess
def func_wrapper(func):
def sub_func(queue,kwargs):
while True:
try:
key=queue.get(False)
ret=func(key,**kwargs)
except Empty:
break
return sub_func
class ThreadPool:
def __init__(self,n):
self.threads=[]
self.n=n
def run(self,func,array,**kwargs):
queue=Queue()
for val in array:
queue.put(val)
threads=[]
target=func_wrapper(func)
# hold_thread=subprocess.Popen("exec "+"python /mnt/blob/datasets/holder.py",shell=True,stdout=subprocess.DEVNULL)
time.sleep(1)
print(f"start loading queue {queue.qsize()}")
logging.info(f"start loading queue {queue.qsize()}")
for i in range(self.n):
print(i)
thread=Thread(target=target, args=(queue,kwargs))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
# hold_thread.kill()
home = str(Path.home())
abs_blob_path=os.path.realpath("/mnt/blob/")
CACHE_FOLDER=os.path.join(home,"caching")
USE_CACHE=True
def norm(path):
assert "*" not in path
return os.path.realpath(os.path.abspath(path))
def in_blob(file):
if abs_blob_path in file:
return True
else:
return False
def map_name(file):
path=norm(file)
path=path.lstrip(abs_blob_path+"/")
path=path.replace("/","_")
assert len(path)<250
return path
def preload(db,sync=True,load=True):
if not load:
return
print(f"loading {db.db_path}")
logging.info(f"loading {db.db_path}")
if sync:
db.initialize()
else:
p = Thread(target=db.initialize)
p.start()
def get_keys_from_lmdb(db):
with db.begin(write=False) as txn:
return list(txn.cursor().iternext(values=False))
def decode_img(byteflow):
img=Image.open(io.BytesIO(byteflow)).convert("RGB")
img.load()
return img
def decode_text(byteflow):
return pickle.loads(byteflow)
decode_funcs={
"image": decode_img,
"text": decode_text
}
class MultipleZipManager:
def __init__(self, files: list):
raise
def remove_prefix(text, prefix):
return text[len(prefix):] if text.startswith(prefix) else text
class ZipManager:
def __init__(self, db_path,data_type,prefix=None,load=True) -> None:
self.decode_func=decode_funcs[data_type]
self.db_path=db_path
cache_file=os.path.join(CACHE_FOLDER,map_name(db_path))
if USE_CACHE and os.path.exists(cache_file):
logging.info(f"using local cache {cache_file}")
self.db_path=cache_file
if prefix is None:
self.prefix = None
else:
self.prefix=f"{prefix}_"
self._init=False
preload(self,load=load)
def deinitialze(self):
self.zip_fd.close()
del self.zip_fd
self._init = False
def initialize(self,close=True):
self.zip_fd = zipfile.ZipFile(self.db_path, mode="r")
if not hasattr(self,"_keys"):
self._keys = self.zip_fd.namelist()
if self.prefix is not None:
self._keys=[self.prefix+key for key in self._keys]
self._init = True
if close:
self.deinitialze()
@property
def keys(self):
while not hasattr(self,"_keys"):
time.sleep(0.1)
return self._keys
def get(self, name):
if not self._init:
self.initialize(close=False) # https://discuss.pytorch.org/t/dataloader-stucks/14087/3
byteflow = self.zip_fd.read(name)
return self.decode_func(byteflow)
class DBManager:
def __init__(self, db_path,data_type,prefix=None,load=True) -> None:
self.decode_func=decode_funcs[data_type]
self.db_path=db_path
cache_file=os.path.join(CACHE_FOLDER,map_name(db_path))
if USE_CACHE and os.path.exists(cache_file):
logging.info(f"using local cache {cache_file}")
self.db_path=cache_file
if prefix is None:
self.prefix = None
else:
self.prefix=f"{prefix}_"
self._init=False
preload(self,load=load)
def initialize(self):
self.env = lmdb.open(
self.db_path,
subdir=osp.isdir(self.db_path),
readonly=True,
lock=False,
readahead=False,
meminit=False,
max_readers=10000
)
self._init=True
@property
def keys(self):
while not self._init:
time.sleep(0.1)
if self.prefix is not None:
_keys=[self.prefix+key.decode() for key in get_keys_from_lmdb(self.env)]
else:
_keys=[key.decode() for key in get_keys_from_lmdb(self.env)]
return _keys
def get(self, name):
env = self.env
if self.prefix is not None:
name=remove_prefix(name,self.prefix)
with env.begin(write=False) as txn:
byteflow = txn.get(name.encode())
if byteflow is None:
print("fuck",name)
raise name
return self.decode_func(byteflow)
def __exit__(self, exc_type, exc_value, traceback):
del self.env
import json
class KVReader:
def __init__(self,db_path,data_type,prefix=None,load=True):
assert data_type=="text"
if prefix is None:
self.prefix = None
else:
self.prefix=f"{prefix}_"
self.db_path=db_path
preload(self,load=load)
self._init=False
self._opened=False
def initialize(self):
f=open(self.db_path,"r")
start=int(f.read(1000).strip())
f.seek(start)
self.mp=json.load(f)
if self.prefix is not None:
self.mp={self.prefix+k:v for k,v in self.mp.items()}
f.close()
self._init=True
def open(self):
self.f=open(self.db_path,"r")
self._opened=True
@property
def keys(self):
while not self._init:
time.sleep(0.1)
return list(self.mp.keys())
def get(self,key):
if not self._opened:
self.open()
idx=self.mp[key]
self.f.seek(idx)
text=self.f.readline().strip()
return {"alt_text":text}
def __len__(self):
return len(self.mp)
@staticmethod
def create(file,keys,values):
assert len(keys)==len(values)
f=open(file,"w")
f.write("\n"*1000)
idx=[]
for val in values:
idx.append(f.tell())
f.write(val)
f.write("\n")
start=f.tell()
ki={k:i for i,k in zip(idx,keys)}
json.dump(ki, f, ensure_ascii=False)
f.seek(0)
f.write(str(start))
f.close()
class MultipleLMDBManager:
def __init__(self, files: list, data_type,get_key=False,sync=True):
self.files = files
self._is_init = False
self.data_type=data_type
assert data_type in decode_funcs
self.get_key=get_key
if sync:
print("sync",files)
self.initialize()
else:
print("async",files)
preload(self)
def keep_subset(self,subset):
mapping={key:self.mapping[key] for key in subset}
del self.mapping
self.mapping=mapping
def initialize(self):
self.mapping={}
self.managers={}
new_files=[]
for old_file in self.files:
items=old_file.split("|")
file=items[0]
if len(items)>1:
prefix = items[1]
else:
prefix = None
if not file.startswith("glob-"):
new_files.append(old_file)
else:
desc=remove_prefix(file,"glob-")
sub_files = glob.glob(desc)
sub_files = sorted(sub_files)
if prefix is not None:
sub_files = [f"{f}|{prefix}" for f in sub_files]
new_files.extend(sub_files)
self.files=new_files
for i,old_file in enumerate(self.files):
items=old_file.split("|")
file=items[0]
if len(items)>1:
prefix = items[1]
else:
prefix = None
if file.endswith(".lmdb"):
Manager = DBManager
elif file.endswith(".zip"):
Manager = ZipManager
elif file.endswith(".kv"):
Manager = KVReader
else:
raise
self.managers[i] = Manager(file,self.data_type,prefix=prefix,load=False)
print(file, " done")
ThreadPool(4).run(preload,self.managers.values())
if self.get_key:
self._keys=[]
for index,manager in self.managers.items():
file=manager.db_path
print(f"{file} loading")
logging.info(f"{file} loading")
keys=manager.keys
self._keys.extend(keys)
for key in keys:
self.mapping[key]=index
logging.info(f"{file} loaded, size = {len(keys)}")
print(f"{file} loaded, size = {len(keys)}")
self._is_init=True
@property
def keys(self):
while not self._is_init:
time.sleep(0.1)
return self._keys
def cleanup(self):
del self._keys
del self.mapping
def get(self, name,source=None):
if source is None:
source=self.mapping[name]
data = self.managers[source].get(name)
return data
class MetaDB:
def __init__(self, path, readonly=True, size=None):
self.readonly = readonly
if readonly:
self.db = lmdb.open(
path, readonly=readonly, max_readers=10000, subdir=False, lock=False
)
else:
assert size is not None
self.db = lmdb.open(
path,
readonly=readonly,
max_readers=10000,
subdir=False,
map_size=int(1073741824 * size),
)
def keys(self):
with self.db.begin(write=False) as txn:
keys = list(txn.cursor().iternext(values=False))
return keys
def encode_int(self,num):
return num.to_bytes(4,"big")
def decode_int(self,num_bytes):
return int.from_bytes(num_bytes,"big")
def get(self, key, func=None):
with self.db.begin(write=False) as txn:
val = txn.get(key)
if val is None:
raise
if func:
val = func(val)
return val
| 11,184 | 25.630952 | 122 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/modules/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/image-synthesis")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
# download_target = os.path.join(root, filename)
download_target = "OUTPUT/pretrained_model/ViT-B-32.pt"
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
# model_path = _download(_MODELS[name])
model_path = "OUTPUT/pretrained_model/ViT-B-32.pt"
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77,
add_start_and_end: bool = True, with_mask: bool = True,
pad_value: int = 0, tokenizer=None, just_token: bool = False) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
just_token: bool
If True, just return the token of text
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
if tokenizer is None:
tokenizer = _tokenizer
sot_token = [tokenizer.encoder["<|startoftext|>"]] if add_start_and_end else []
eot_token = [tokenizer.encoder["<|endoftext|>"]] if add_start_and_end else []
all_tokens = [sot_token + tokenizer.encode(text.lower()) + eot_token for text in texts]
if just_token:
return all_tokens
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) + pad_value
if with_mask:
mask = torch.zeros(len(all_tokens), context_length, dtype=torch.bool)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
temp = tokens[-1]
tokens = tokens[:context_length]
tokens[-1] = temp
assert len(tokens) == context_length
# raise RuntimeError("Input text {} is too long for context length {}".format(texts[i], context_length))
result[i, :len(tokens)] = torch.tensor(tokens)
if with_mask:
mask[i, :len(tokens)] = True
results = {
'token': result,
}
if with_mask:
results['mask'] = mask
return results
| 7,962 | 35.360731 | 142 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/modules/clip/model.py | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
if hasattr(self, 'visual'):
return self.visual.conv1.weight.dtype
else:
return self.transformer.resblocks[0].attn.in_proj_weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
| 17,333 | 38.848276 | 178 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/base_codec.py | import torch
from torch import nn
class BaseCodec(nn.Module):
def get_tokens(self, x, **kwargs):
"""
Input:
x: input data
Return:
indices: B x L, the codebook indices, where L is the length
of flattened feature map size
"""
raise NotImplementedError
def get_number_of_tokens(self):
"""
Return: int, the number of tokens
"""
raise NotImplementedError
def encode(self, img):
raise NotImplementedError
def decode(self, img_seq):
raise NotImplementedError
def forward(self, **kwargs):
raise NotImplementedError
def train(self, mode=True):
self.training = mode
if self.trainable and mode:
return super().train(True)
else:
return super().train(False)
def _set_trainable(self):
if not self.trainable:
for pn, p in self.named_parameters():
p.requires_grad = False
self.eval() | 1,046 | 23.348837 | 72 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/taming_gumbel_vqvae.py | import torch
import torch.nn as nn
from omegaconf import OmegaConf
import sys
sys.path.append("..")
# sys.path.append("../image_synthesis")
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.taming.models.vqgan import GumbelVQ, VQModel
from image_synthesis.taming.models.cond_transformer import Net2NetTransformer
import os
import torchvision.transforms.functional as TF
import PIL
from image_synthesis.modeling.codecs.base_codec import BaseCodec
from einops import rearrange
import math
class Encoder(nn.Module):
def __init__(self, encoder, quant_conv, quantize):
super().__init__()
self.encoder = encoder
self.quant_conv = quant_conv
self.quantize = quantize
@torch.no_grad()
def forward(self, x):
x = 2*x - 1
h = self.encoder(x)
h = self.quant_conv(h)
quant, _, [_, _, indices] = self.quantize(h)
return indices.view(x.shape[0], -1)
class Decoder(nn.Module):
def __init__(self, decoder, post_quant_conv, quantize, w=16, h=16):
super().__init__()
self.decoder = decoder
self.post_quant_conv = post_quant_conv
self.quantize = quantize
self.w = w
self.h = h
@torch.no_grad()
def forward(self, indices):
z = self.quantize.get_codebook_entry(indices.view(-1), shape=(indices.shape[0], self.h, self.w, -1))
quant = self.post_quant_conv(z)
dec = self.decoder(quant)
x = torch.clamp(dec, -1., 1.)
x = (x + 1.)/2.
return x
class TamingFFHQVQVAE(BaseCodec):
def __init__(
self,
trainable=False,
token_shape=[16,16],
config_path='OUTPUT/pretrained_model/taming_dvae/vqgan_ffhq_f16_1024.yaml',
ckpt_path='OUTPUT/pretrained_model/taming_dvae/vqgan_ffhq_f16_1024.pth',
num_tokens=1024,
quantize_number=0,
mapping_path=None,
):
super().__init__()
model = self.LoadModel(config_path, ckpt_path)
self.enc = Encoder(model.encoder, model.quant_conv, model.quantize)
self.dec = Decoder(model.decoder, model.post_quant_conv, model.quantize, token_shape[0], token_shape[1])
self.num_tokens = num_tokens
self.quantize_number = quantize_number
if self.quantize_number != 0 and mapping_path!=None:
self.full_to_quantize = torch.load(mapping_path)
self.quantize_to_full = torch.zeros(self.quantize_number)-1
for idx, i in enumerate(self.full_to_quantize):
if self.quantize_to_full[i] == -1:
self.quantize_to_full[i] = idx
self.quantize_to_full = self.quantize_to_full.long()
self.trainable = trainable
self.token_shape = token_shape
self._set_trainable()
def LoadModel(self, config_path, ckpt_path):
config = OmegaConf.load(config_path)
# model = instantiate_from_config(config.model)
model = Net2NetTransformer(**config.model.params)
sd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
model.load_state_dict(sd, strict=False)
if (isinstance(model, Net2NetTransformer)):
model = model.first_stage_model
return model
@property
def device(self):
# import pdb; pdb.set_trace()
return self.enc.quant_conv.weight.device
def preprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-255
"""
imgs = imgs.div(255) # map to 0 - 1
return imgs
# return map_pixels(imgs)
def postprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-1
"""
imgs = imgs * 255
return imgs
def get_tokens(self, imgs, **kwargs):
imgs = self.preprocess(imgs)
code = self.enc(imgs)
if self.quantize_number != 0:
code = self.full_to_quantize[code]
output = {'token': code}
# output = {'token': rearrange(code, 'b h w -> b (h w)')}
return output
def decode(self, img_seq):
if self.quantize_number != 0:
img_seq=self.quantize_to_full[img_seq].type_as(img_seq)
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(math.sqrt(n)))
x_rec = self.dec(img_seq)
x_rec = self.postprocess(x_rec)
return x_rec
class TamingVQVAE(BaseCodec):
def __init__(
self,
trainable=False,
token_shape=[16,16],
config_path='OUTPUT/pretrained_model/taming_dvae/vqgan_imagenet_f16_16384.yaml',
ckpt_path='OUTPUT/pretrained_model/taming_dvae/vqgan_imagenet_f16_16384.pth',
num_tokens=16384,
quantize_number=974,
mapping_path='./help_folder/statistics/taming_vqvae_974.pt',
):
super().__init__()
model = self.LoadModel(config_path, ckpt_path)
self.enc = Encoder(model.encoder, model.quant_conv, model.quantize)
self.dec = Decoder(model.decoder, model.post_quant_conv, model.quantize, token_shape[0], token_shape[1])
self.num_tokens = num_tokens
self.quantize_number = quantize_number
if self.quantize_number != 0 and mapping_path!=None:
self.full_to_quantize = torch.load(mapping_path)
self.quantize_to_full = torch.zeros(self.quantize_number)-1
for idx, i in enumerate(self.full_to_quantize):
if self.quantize_to_full[i] == -1:
self.quantize_to_full[i] = idx
self.quantize_to_full = self.quantize_to_full.long()
self.trainable = trainable
self.token_shape = token_shape
self._set_trainable()
def LoadModel(self, config_path, ckpt_path):
config = OmegaConf.load(config_path)
model = VQModel(**config.model.params)
sd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
model.load_state_dict(sd, strict=False)
return model
@property
def device(self):
# import pdb; pdb.set_trace()
return self.enc.quant_conv.weight.device
def preprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-255
"""
imgs = imgs.div(255) # map to 0 - 1
return imgs
# return map_pixels(imgs)
def postprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-1
"""
imgs = imgs * 255
return imgs
def get_tokens(self, imgs, **kwargs):
imgs = self.preprocess(imgs)
code = self.enc(imgs)
if self.quantize_number != 0:
code = self.full_to_quantize[code]
output = {'token': code}
# output = {'token': rearrange(code, 'b h w -> b (h w)')}
return output
def decode(self, img_seq):
if self.quantize_number != 0:
img_seq=self.quantize_to_full[img_seq].type_as(img_seq)
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(math.sqrt(n)))
x_rec = self.dec(img_seq)
x_rec = self.postprocess(x_rec)
return x_rec
class TamingGumbelVQVAE(BaseCodec):
def __init__(
self,
trainable=False,
token_shape=[32,32],
config_path='OUTPUT/pretrained_model/taming_dvae/taming_f8_8192_openimages.yaml',
ckpt_path='OUTPUT/pretrained_model/taming_dvae/taming_f8_8192_openimages_last.pth',
num_tokens=8192,
quantize_number=2887,
mapping_path='./help_folder/statistics/taming_vqvae_2887.pt',
):
super().__init__()
model = self.LoadModel(config_path, ckpt_path)
self.enc = Encoder(model.encoder, model.quant_conv, model.quantize)
self.dec = Decoder(model.decoder, model.post_quant_conv, model.quantize, token_shape[0], token_shape[1])
self.num_tokens = num_tokens
self.quantize_number = quantize_number
if self.quantize_number != 0 and mapping_path!=None:
self.full_to_quantize = torch.load(mapping_path)
self.quantize_to_full = torch.zeros(self.quantize_number)-1
for idx, i in enumerate(self.full_to_quantize):
if self.quantize_to_full[i] == -1:
self.quantize_to_full[i] = idx
self.quantize_to_full = self.quantize_to_full.long()
self.trainable = trainable
self.token_shape = token_shape
self._set_trainable()
def LoadModel(self, config_path, ckpt_path):
config = OmegaConf.load(config_path)
model = GumbelVQ(**config.model.params)
sd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
model.load_state_dict(sd, strict=False)
return model
@property
def device(self):
# import pdb; pdb.set_trace()
return self.enc.quant_conv.weight.device
def preprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-255
"""
imgs = imgs.div(255) # map to 0 - 1
return imgs
# return map_pixels(imgs)
def postprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-1
"""
imgs = imgs * 255
return imgs
def get_tokens(self, imgs, **kwargs):
imgs = self.preprocess(imgs)
code = self.enc(imgs)
if self.quantize_number != 0:
code = self.full_to_quantize[code]
output = {'token': code}
# output = {'token': rearrange(code, 'b h w -> b (h w)')}
return output
def decode(self, img_seq):
if self.quantize_number != 0:
img_seq=self.quantize_to_full[img_seq].type_as(img_seq)
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(math.sqrt(n)))
x_rec = self.dec(img_seq)
x_rec = self.postprocess(x_rec)
return x_rec
| 10,011 | 33.885017 | 112 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/patch_vqgan.py | from numpy.core.shape_base import block
from numpy.lib import stride_tricks
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import random
from torch.nn.modules.linear import Linear
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.modeling.codecs.base_codec import BaseCodec
# from image_synthesis.modeling.modules.vqgan_loss.vqperceptual import VQLPIPSWithDiscriminator
from image_synthesis.modeling.utils.misc import mask_with_top_k, logits_top_k, get_token_type
from image_synthesis.distributed.distributed import all_reduce
# class for quantization
# class for quantization
class EMAVectorQuantizer(nn.Module):
"""
see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py
____________________________________________
Discretization bottleneck part of the VQ-VAE.
Inputs:
- n_e : number of embeddings
- e_dim : dimension of embedding
- beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
_____________________________________________
"""
def __init__(self, n_e, e_dim, beta,
masked_n_e_ratio=0,#1.0/4,
embed_init_scale=1.0,
decay = 0.99,
embed_ema=True,
get_embed_type='retrive',
distance_type='euclidean',
):
super(EMAVectorQuantizer, self).__init__()
self.n_e = n_e
self.masked_n_e_ratio = masked_n_e_ratio
self.e_dim = e_dim
self.beta = beta
self.decay = decay
self.embed_ema = embed_ema
self.get_embed_type = get_embed_type
self.distance_type = distance_type
if self.embed_ema:
self.eps = 1.0e-5
embed = torch.randn(n_e, e_dim)
# embed = torch.zeros(n_e, e_dim)
# embed.data.uniform_(-embed_init_scale / self.n_e, embed_init_scale / self.n_e)
self.register_buffer("embedding", embed)
self.register_buffer("cluster_size", torch.zeros(n_e))
self.register_buffer("embedding_avg", embed.clone())
else:
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-embed_init_scale / self.n_e, embed_init_scale / self.n_e)
self.masked_embed_start = self.n_e - int(self.masked_n_e_ratio * self.n_e)
if self.distance_type == 'learned':
self.distance_fc = nn.Linear(self.e_dim, self.n_e)
@property
def norm_feat(self):
return self.distance_type in ['cosine', 'sinkhorn']
@property
def embed_weight(self):
if isinstance(self.embedding, nn.Embedding):
return self.embedding.weight
else:
return self.embedding
def norm_embedding(self):
if self.training:
with torch.no_grad():
w = self.embed_weight.data.clone()
w = F.normalize(w, dim=1, p=2)
if isinstance(self.embedding, nn.Embedding):
self.embedding.weight.copy_(w)
else:
self.embedding.copy_(w)
def _quantize(self, z, token_type=None):
"""
z: L x D
token_type: L, 1 denote unmasked token, other masked token
"""
if self.distance_type == 'euclidean':
d = torch.sum(z ** 2, dim=1, keepdim=True) + \
torch.sum(self.embed_weight**2, dim=1) - 2 * \
torch.matmul(z, self.embed_weight.t())
elif self.distance_type == 'cosine':
d = 0 - torch.einsum('ld,nd->ln', z, self.embed_weight) # BHW x N
else:
raise NotImplementedError('distance not implemented for {}'.format(self.distance_type))
# find closest encodings
# import pdb; pdb.set_trace()
if token_type is None or self.masked_embed_start == self.n_e:
min_encoding_indices = torch.argmin(d, dim=1) # L
else:
min_encoding_indices = torch.zeros(z.shape[0]).long().to(z.device)
idx = token_type == 1
if idx.sum() > 0:
d_ = d[idx][:, :self.masked_embed_start] # l x n
indices_ = torch.argmin(d_, dim=1)
min_encoding_indices[idx] = indices_
idx = token_type != 1
if idx.sum() > 0:
d_ = d[idx][:, self.masked_embed_start:] # l x n
indices_ = torch.argmin(d_, dim=1) + self.masked_embed_start
min_encoding_indices[idx] = indices_
if self.get_embed_type == 'matmul':
min_encodings = torch.zeros(min_encoding_indices.shape[0], self.n_e).to(z)
min_encodings.scatter_(1, min_encoding_indices.unsqueeze(1), 1)
# import pdb; pdb.set_trace()
z_q = torch.matmul(min_encodings, self.embed_weight)#.view(z.shape)
elif self.get_embed_type == 'retrive':
z_q = F.embedding(min_encoding_indices, self.embed_weight)#.view(z.shape)
else:
raise NotImplementedError
return z_q, min_encoding_indices
def forward(self, z, token_type=None):
"""
z: B x C x H x W
token_type: B x 1 x H x W
"""
if self.distance_type in ['sinkhorn', 'cosine']:
# need to norm feat and weight embedding
self.norm_embedding()
z = F.normalize(z, dim=1, p=2)
# reshape z -> (batch, height, width, channel) and flatten
batch_size, _, height, width = z.shape
# import pdb; pdb.set_trace()
z = z.permute(0, 2, 3, 1).contiguous() # B x H x W x C
z_flattened = z.view(-1, self.e_dim) # BHW x C
if token_type is not None:
token_type_flattened = token_type.view(-1)
else:
token_type_flattened = None
z_q, min_encoding_indices = self._quantize(z_flattened, token_type_flattened)
z_q = z_q.view(batch_size, height, width, -1) #.permute(0, 2, 3, 1).contiguous()
if self.training and self.embed_ema:
# import pdb; pdb.set_trace()
assert self.distance_type in ['euclidean', 'cosine']
indices_onehot = F.one_hot(min_encoding_indices, self.n_e).to(z_flattened.dtype) # L x n_e
indices_onehot_sum = indices_onehot.sum(0) # n_e
z_sum = (z_flattened.transpose(0, 1) @ indices_onehot).transpose(0, 1) # n_e x D
all_reduce(indices_onehot_sum)
all_reduce(z_sum)
self.cluster_size.data.mul_(self.decay).add_(indices_onehot_sum, alpha=1 - self.decay)
self.embedding_avg.data.mul_(self.decay).add_(z_sum, alpha=1 - self.decay)
n = self.cluster_size.sum()
cluster_size = (self.cluster_size + self.eps) / (n + self.n_e * self.eps) * n
embed_normalized = self.embedding_avg / cluster_size.unsqueeze(1)
self.embedding.data.copy_(embed_normalized)
# print((self.embed > 1.0e-20).abs().sum())
if self.embed_ema:
loss = (z_q.detach() - z).pow(2).mean()
else:
# compute loss for embedding
loss = torch.mean((z_q.detach()-z).pow(2)) + self.beta * torch.mean((z_q - z.detach()).pow(2))
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
# used_quantize_embed = torch.zeros_like(loss) + min_encoding_indices.unique().shape[0]
# used_quantize_embed = all_reduce(used_quantize_embed) / get_world_size()
output = {
'quantize': z_q,
'used_quantize_embed': torch.zeros_like(loss) + min_encoding_indices.unique().shape[0],
'quantize_loss': loss,
'index': min_encoding_indices.view(batch_size, height, width)
}
if token_type_flattened is not None:
unmasked_num_token = all_reduce((token_type_flattened == 1).sum())
masked_num_token = all_reduce((token_type_flattened != 1).sum())
output['unmasked_num_token'] = unmasked_num_token
output['masked_num_token'] = masked_num_token
return output
def only_get_indices(self, z, token_type=None):
"""
z: B x C x H x W
token_type: B x 1 x H x W
"""
if self.distance_type in ['sinkhorn', 'cosine']:
# need to norm feat and weight embedding
self.norm_embedding()
z = F.normalize(z, dim=1, p=2)
# reshape z -> (batch, height, width, channel) and flatten
batch_size, _, height, width = z.shape
# import pdb; pdb.set_trace()
z = z.permute(0, 2, 3, 1).contiguous() # B x H x W x C
z_flattened = z.view(-1, self.e_dim) # BHW x C
if token_type is not None:
token_type_flattened = token_type.view(-1)
else:
token_type_flattened = None
_, min_encoding_indices = self._quantize(z_flattened, token_type_flattened)
min_encoding_indices = min_encoding_indices.view(batch_size, height, width)
return min_encoding_indices
def get_codebook_entry(self, indices, shape):
# import pdb; pdb.set_trace()
# shape specifying (batch, height, width)
if self.get_embed_type == 'matmul':
min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices)
min_encodings.scatter_(1, indices[:,None], 1)
# get quantized latent vectors
z_q = torch.matmul(min_encodings.float(), self.embed_weight)
elif self.get_embed_type == 'retrive':
z_q = F.embedding(indices, self.embed_weight)
else:
raise NotImplementedError
if shape is not None:
z_q = z_q.view(*shape, -1) # B x H x W x C
if len(z_q.shape) == 4:
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
# blocks for encoder and decoder
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
def nonlinearity(x):
# swish
return x*torch.sigmoid(x)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv, upsample_type='interpolate'):
super().__init__()
self.upsample_type = upsample_type
self.with_conv = with_conv
if self.upsample_type == 'conv':
self.sample = nn.ConvTranspose2d(in_channels, in_channels, kernel_size=4, stride=2, padding=1),
if self.with_conv:
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
if self.upsample_type == 'conv':
x = self.sample(x)
else:
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout=0.0, temb_channels=512):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels,
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x+h
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b,c,h,w = q.shape
q = q.reshape(b,c,h*w)
q = q.permute(0,2,1) # b,hw,c
k = k.reshape(b,c,h*w) # b,c,hw
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b,c,h*w)
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b,c,h,w)
h_ = self.proj_out(h_)
return x+h_
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=2,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), scale_by_2=None, num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=False, **ignore_kwargs):
super().__init__()
if isinstance(resolution, int):
resolution = [resolution, resolution] # H, W
elif isinstance(resolution, (tuple, list)):
resolution = list(resolution)
else:
raise ValueError('Unknown type of resolution:', resolution)
attn_resolutions_ = []
for ar in attn_resolutions:
if isinstance(ar, (list, tuple)):
attn_resolutions_.append(list(ar))
else:
attn_resolutions_.append([ar, ar])
attn_resolutions = attn_resolutions_
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if scale_by_2 is None:
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = [r // 2 for r in curr_res]
else:
if scale_by_2[i_level]:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = [r // 2 for r in curr_res]
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
#assert x.shape[2] == self.resolution[0] and x.shape[3] == self.resolution[1], "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution)
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if getattr(self.down[i_level], 'downsample', None) is not None:
h = self.down[i_level].downsample(hs[-1])
if i_level != self.num_resolutions-1:
# hs.append(self.down[i_level].downsample(hs[-1]))
hs.append(h)
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), scale_by_2=None, num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True,
resolution, z_channels, **ignorekwargs):
super().__init__()
if isinstance(resolution, int):
resolution = [resolution, resolution] # H, W
elif isinstance(resolution, (tuple, list)):
resolution = list(resolution)
else:
raise ValueError('Unknown type of resolution:', resolution)
attn_resolutions_ = []
for ar in attn_resolutions:
if isinstance(ar, (list, tuple)):
attn_resolutions_.append(list(ar))
else:
attn_resolutions_.append([ar, ar])
attn_resolutions = attn_resolutions_
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.requires_image = False
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
if scale_by_2 is None:
curr_res = [r // 2**(self.num_resolutions-1) for r in self.resolution]
else:
scale_factor = sum([int(s) for s in scale_by_2])
curr_res = [r // 2**scale_factor for r in self.resolution]
self.z_shape = (1, z_channels, curr_res[0], curr_res[1])
print("Working with z of shape {} = {} dimensions.".format(self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if scale_by_2 is None:
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = [r * 2 for r in curr_res]
else:
if scale_by_2[i_level]:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = [r * 2 for r in curr_res]
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z, **kwargs):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
# if i_level != 0:
if getattr(self.up[i_level], 'upsample', None) is not None:
h = self.up[i_level].upsample(h)
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class PatchVQGAN(BaseCodec):
def __init__(self,
*,
encoder_config,
decoder_config,
lossconfig=None,
n_embed,
embed_dim,
ignore_keys=[],
data_info={'key': 'image'},
quantizer_type='VQ',
quantizer_dis_type='euclidean',
decay = 0.99,
trainable=False,
ckpt_path=None,
token_shape=None
):
super().__init__()
self.encoder = instantiate_from_config(encoder_config) # Encoder(**encoder_config)
self.decoder = instantiate_from_config(decoder_config) # Decoder(**decoder_config)
if quantizer_type == 'EMAVQ':
self.quantize = EMAVectorQuantizer(n_embed, embed_dim, beta=0.25, decay = decay, distance_type=quantizer_dis_type)
print('using EMA vector Quantizer')
elif quantizer_type == 'PQEMAVQ':
self.quantize = PQEMAVectorQuantizer(n_embed, embed_dim, beta=0.25,decay = decay, distance_type=quantizer_dis_type)
print('using PQ EMA vector Quantizer')
elif quantizer_type == 'VQ':
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25)
else:
raise NotImplementedError
# import pdb; pdb.set_trace()
self.quant_conv = torch.nn.Conv2d(encoder_config['params']["z_channels"], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, decoder_config['params']["z_channels"], 1)
self.data_info = data_info
if lossconfig is not None and trainable:
self.loss = instantiate_from_config(lossconfig)
else:
self.loss = None
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.trainable = trainable
self._set_trainable()
self.token_shape = token_shape
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")
if 'model' in sd:
sd = sd['model']
else:
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("VQGAN: Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"VQGAN: Restored from {path}")
@property
def device(self):
return self.quant_conv.weight.device
def pre_process(self, data):
data = data.to(self.device)
data = data / 127.5 - 1.0
return data
def multi_pixels_with_mask(self, data, mask):
if data.max() > 1:
raise ValueError('The data need to be preprocessed!')
mask = mask.to(self.device)
data = data * mask
data[~mask.repeat(1,3,1,1)] = -1.0
return data
def post_process(self, data):
data = (data + 1.0) * 127.5
data = torch.clamp(data, min=0.0, max=255.0)
return data
def get_number_of_tokens(self):
return self.quantize.n_e
def get_tokens(self, data, mask=None, return_token_index=False, **kwargs):
data = self.pre_process(data)
x = self.encoder(data)
x = self.quant_conv(x)
idx = self.quantize(x)['index']
if self.token_shape is None:
self.token_shape = idx.shape[1:3]
if self.decoder.requires_image:
self.mask_im_tmp = self.multi_pixels_with_mask(data, mask)
output = {}
output['token'] = idx.view(idx.shape[0], -1)
# import pdb; pdb.set_trace()
if mask is not None: # mask should be B x 1 x H x W
# downsampling
# mask = F.interpolate(mask.float(), size=idx_mask.shape[-2:]).to(torch.bool)
token_type = get_token_type(mask, self.token_shape) # B x 1 x H x W
mask = token_type == 1
output = {
'target': idx.view(idx.shape[0], -1).clone(),
'mask': mask.view(mask.shape[0], -1),
'token': idx.view(idx.shape[0], -1),
'token_type': token_type.view(token_type.shape[0], -1),
}
else:
output = {
'token': idx.view(idx.shape[0], -1)
}
# get token index
# used for computing token frequency
if return_token_index:
token_index = output['token'] #.view(-1)
output['token_index'] = token_index
return output
def decode(self, token):
assert self.token_shape is not None
# import pdb; pdb.set_trace()
bhw = (token.shape[0], self.token_shape[0], self.token_shape[1])
quant = self.quantize.get_codebook_entry(token.view(-1), shape=bhw)
quant = self.post_quant_conv(quant)
if self.decoder.requires_image:
rec = self.decoder(quant, self.mask_im_tmp)
self.mask_im_tmp = None
else:
rec = self.decoder(quant)
rec = self.post_process(rec)
return rec
def get_rec_loss(self, input, rec):
if input.max() > 1:
input = self.pre_process(input)
if rec.max() > 1:
rec = self.pre_process(rec)
rec_loss = F.mse_loss(rec, input)
return rec_loss
@torch.no_grad()
def sample(self, batch):
data = self.pre_process(batch[self.data_info['key']])
x = self.encoder(data)
x = self.quant_conv(x)
quant = self.quantize(x)['quantize']
quant = self.post_quant_conv(quant)
if self.decoder.requires_image:
mask_im = self.multi_pixels_with_mask(data, batch['mask'])
rec = self.decoder(quant, mask_im)
else:
rec = self.decoder(quant)
rec = self.post_process(rec)
out = {'input': batch[self.data_info['key']], 'reconstruction': rec}
if self.decoder.requires_image:
out['mask_input'] = self.post_process(mask_im)
out['mask'] = batch['mask'] * 255
# import pdb; pdb.set_trace()
return out
def get_last_layer(self):
if isinstance(self.decoder, Decoder):
return self.decoder.conv_out.weight
elif isinstance(self.decoder, PatchDecoder):
return self.decoder.post_layer.weight
elif isinstance(self.decoder, Patch8x8Decoder):
return self.decoder.post_layer.weight
else:
return self.decoder.patch_de_embed.proj.weight
def parameters(self, recurse=True, name=None):
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
if name == 'generator':
params = list(self.encoder.parameters())+ \
list(self.decoder.parameters())+\
list(self.quantize.parameters())+\
list(self.quant_conv.parameters())+\
list(self.post_quant_conv.parameters())
elif name == 'discriminator':
params = self.loss.discriminator.parameters()
else:
raise ValueError("Unknown type of name {}".format(name))
return params
def forward(self, batch, name='none', return_loss=True, step=0, **kwargs):
if name == 'generator':
input = self.pre_process(batch[self.data_info['key']])
x = self.encoder(input)
x = self.quant_conv(x)
quant_out = self.quantize(x)
quant = quant_out['quantize']
emb_loss = quant_out['quantize_loss']
# recconstruction
quant = self.post_quant_conv(quant)
if self.decoder.requires_image:
rec = self.decoder(quant, self.multi_pixels_with_mask(input, batch['mask']))
else:
rec = self.decoder(quant)
# save some tensors for
self.input_tmp = input
self.rec_tmp = rec
if isinstance(self.loss, VQLPIPSWithDiscriminator):
output = self.loss(codebook_loss=emb_loss,
inputs=input,
reconstructions=rec,
optimizer_name=name,
global_step=step,
last_layer=self.get_last_layer())
else:
raise NotImplementedError('{}'.format(type(self.loss)))
elif name == 'discriminator':
if isinstance(self.loss, VQLPIPSWithDiscriminator):
output = self.loss(codebook_loss=None,
inputs=self.input_tmp,
reconstructions=self.rec_tmp,
optimizer_name=name,
global_step=step,
last_layer=self.get_last_layer())
else:
raise NotImplementedError('{}'.format(type(self.loss)))
else:
raise NotImplementedError('{}'.format(name))
return output
| 35,439 | 38.116998 | 147 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/image_codec/ema_vqvae.py | import torch
import torch.nn as nn
from omegaconf import OmegaConf
import sys
sys.path.append("..")
# sys.path.append("../image_synthesis")
import os
import torchvision.transforms.functional as TF
import PIL
from image_synthesis.modeling.codecs.base_codec import BaseCodec
from einops import rearrange
import math
import yaml
from image_synthesis.utils.misc import instantiate_from_config
class Encoder(nn.Module):
def __init__(self, encoder, quant_conv, quantize):
super().__init__()
self.encoder = encoder
self.quant_conv = quant_conv
self.quantize = quantize
@torch.no_grad()
def forward(self, x):
x = 2*x - 1
h = self.encoder(x)
h = self.quant_conv(h)
# quant, _, [_, _, indices] = self.quantize(h)
# return indices.view(x.shape[0], -1)
indices = self.quantize.only_get_indices(h)
return indices.view(x.shape[0], -1)
class Decoder(nn.Module):
def __init__(self, decoder, post_quant_conv, quantize, w=16, h=16):
super().__init__()
self.decoder = decoder
self.post_quant_conv = post_quant_conv
self.quantize = quantize
self.w = w
self.h = h
@torch.no_grad()
def forward(self, indices):
z = self.quantize.get_codebook_entry(indices.view(-1), shape=(indices.shape[0], self.h, self.w))
quant = self.post_quant_conv(z)
dec = self.decoder(quant)
x = torch.clamp(dec, -1., 1.)
x = (x + 1.)/2.
return x
class PatchVQVAE(BaseCodec):
def __init__(
self,
trainable=False,
token_shape=[16,16],
):
super().__init__()
config_path = "OUTPUT/pretrained_model/taming_dvae/config.yaml"
ckpt_path="OUTPUT/pretrained_model/taming_dvae/ithq_vqvae.pth"
model = self.LoadModel(config_path, ckpt_path)
self.enc = Encoder(model.encoder, model.quant_conv, model.quantize)
self.dec = Decoder(model.decoder, model.post_quant_conv, model.quantize, token_shape[0], token_shape[1])
self.num_tokens = 4096
self.trainable = trainable
self.token_shape = token_shape
self._set_trainable()
def LoadModel(self, config_path, ckpt_path):
with open(config_path) as f:
config = yaml.full_load(f)
model = instantiate_from_config(config['model'])
sd = torch.load(ckpt_path, map_location="cpu")["model"]
model.load_state_dict(sd, strict=False)
return model
def half(self): # not sure if it's right
"""
overwrite this function
"""
from dall_e.utils import Conv2d
for n, m in self.named_modules():
if isinstance(m, Conv2d) and m.use_float16:
print(n)
m._apply(lambda t: t.half() if t.is_floating_point() else t)
return self
@property
def device(self):
# import pdb; pdb.set_trace()
return self.enc.quant_conv.weight.device
def preprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-255
"""
imgs = imgs.div(255) # map to 0 - 1
return imgs
# return map_pixels(imgs)
def postprocess(self, imgs):
"""
imgs: B x C x H x W, in the range 0-1
"""
imgs = imgs * 255
return imgs
def get_tokens(self, imgs, **kwargs):
imgs = self.preprocess(imgs)
code = self.enc(imgs)
output = {'token': code}
# output = {'token': rearrange(code, 'b h w -> b (h w)')}
return output
def decode(self, img_seq):
b, n = img_seq.shape
# if self.token_shape is not None:
# img_seq = img_seq.view(b, self.token_shape[0], self.token_shape[1])
# else:
# img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(math.sqrt(n)))
x_rec = self.dec(img_seq)
x_rec = self.postprocess(x_rec)
return x_rec
| 4,083 | 29.706767 | 112 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/codecs/text_codec/tokenize.py | import torch
import torch.nn as nn
from image_synthesis.modeling.modules.clip.clip import tokenize
from image_synthesis.modeling.codecs.base_codec import BaseCodec
from image_synthesis.utils.misc import instantiate_from_config
class Tokenize(BaseCodec):
def __init__(self, context_length:int = 256,
add_start_and_end:bool = False,
just_token = False,
with_mask:bool = True,
pad_value:int = 0,
clip_embedding = False,
condition_emb_config = None,
tokenizer_config={
'target': 'image_synthesis.modeling.modules.clip.simple_tokenizer.SimpleTokenizer',
'params':{
'end_idx': 49152 # 16384 fo DALL-E
},
},
):
"""
This is a wrapper class for tokenize of texts.
For CLIP and DALLE-pytorch tokenize, the default
arguments are different:
CLIP based:
context_length: 77
add_start_and_end: True
DALLE-pytorch based:
context_length: 256
add_start_and_end: False
"""
super().__init__()
self.context_length = context_length
self.add_start_and_end = add_start_and_end
self.with_mask = with_mask
self.pad_value = pad_value
self.just_token = just_token
self.trainable = False
self.condition_emb = None
self.clip_embedding = clip_embedding
if self.clip_embedding == True:
assert condition_emb_config != None
self.condition_emb = instantiate_from_config(condition_emb_config)
self.tokenizer = instantiate_from_config(tokenizer_config)
def __repr__(self):
rep = "Tokenize for text\n\tcontent_length: {}\n\tadd_start_and_end: {}\n\twith_mask: {}"\
.format(self.context_length, self.add_start_and_end, self.with_mask)
return rep
def check_length(self, token):
return len(token) <= self.context_length
def get_tokens(self, text, **kwargs):
text_token = tokenize(text, context_length=self.context_length,
add_start_and_end=self.add_start_and_end,
with_mask=self.with_mask, pad_value=self.pad_value,
tokenizer=self.tokenizer,
just_token=self.just_token)
if self.clip_embedding == False:
return text_token
else:
if self.condition_emb.additional_last_embedding == True:
with torch.no_grad():
cond_emb, last_embedding = self.condition_emb(text_token['token'].cuda())
text_token['embed_token'] = cond_emb.detach()
text_token['last_embed'] = last_embedding
else:
with torch.no_grad():
cond_emb = self.condition_emb(text_token['token'].cuda())
text_token['embed_token'] = cond_emb.detach()
return text_token
| 3,124 | 36.202381 | 104 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/models/conditional_dalle.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import torch
import math
from torch import nn
from image_synthesis.utils.misc import instantiate_from_config
import time
import numpy as np
from PIL import Image
import os
from torch.cuda.amp import autocast
class C_DALLE(nn.Module):
def __init__(
self,
*,
content_info={'key': 'image'},
condition_info={'key': 'label'},
guidance_scale=1.0,
learnable_cf=False,
content_codec_config,
diffusion_config
):
super().__init__()
self.content_info = content_info
self.condition_info = condition_info
self.guidance_scale = guidance_scale
self.content_codec = instantiate_from_config(content_codec_config)
self.transformer = instantiate_from_config(diffusion_config)
self.truncation_forward = False
def parameters(self, recurse=True, name=None):
# return super().parameters(recurse=True)
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
names = name.split('+')
params = []
for n in names:
try: # the parameters() method is not overwritten for some classes
params += getattr(self, name).parameters(recurse=recurse, name=name)
except:
params += getattr(self, name).parameters(recurse=recurse)
return params
@property
def device(self):
return self.transformer.device
def get_ema_model(self):
return self.transformer
@torch.no_grad()
def prepare_condition(self, batch):
cond_key = self.condition_info['key']
cond = batch[cond_key]
if torch.is_tensor(cond):
cond = cond.to(self.device)
cond_ = {}
cond_['condition_token'] = cond
return cond_
@autocast(enabled=False)
@torch.no_grad()
def prepare_content(self, batch, with_mask=False):
cont_key = self.content_info['key']
cont = batch[cont_key]
if torch.is_tensor(cont):
cont = cont.to(self.device)
if not with_mask:
cont = self.content_codec.get_tokens(cont)
else:
mask = batch['mask'.format(cont_key)]
cont = self.content_codec.get_tokens(cont, mask, enc_with_mask=False)
cont_ = {}
for k, v in cont.items():
v = v.to(self.device) if torch.is_tensor(v) else v
cont_['content_' + k] = v
return cont_
@torch.no_grad()
def prepare_input(self, batch):
input = self.prepare_condition(batch)
input.update(self.prepare_content(batch))
return input
def predict_start_with_truncation(self, func, sample_type):
if sample_type[-1] == 'p':
truncation_k = int(sample_type[:-1].replace('top', ''))
content_codec = self.content_codec
save_path = self.this_save_path
def wrapper(*args, **kwards):
out = func(*args, **kwards)
val, ind = out.topk(k = truncation_k, dim=1)
probs = torch.full_like(out, -70)
probs.scatter_(1, ind, val)
return probs
return wrapper
elif sample_type[-1] == 'r':
truncation_r = float(sample_type[:-1].replace('top', ''))
def wrapper(*args, **kwards):
out = func(*args, **kwards)
temp, indices = torch.sort(out, 1, descending=True)
temp1 = torch.exp(temp)
temp2 = temp1.cumsum(dim=1)
temp3 = temp2 < truncation_r
new_temp = torch.full_like(temp3[:,0:1,:], True)
temp6 = torch.cat((new_temp, temp3), dim=1)
temp3 = temp6[:,:-1,:]
temp4 = temp3.gather(1, indices.argsort(1))
temp5 = temp4.float()*out+(1-temp4.float())*(-70)
probs = temp5
return probs
return wrapper
else:
print("wrong sample type")
@torch.no_grad()
def generate_content(
self,
*,
batch,
condition=None,
filter_ratio = 0.5,
temperature = 1.0,
content_ratio = 0.0,
replicate=1,
return_att_weight=False,
sample_type="normal",
):
self.eval()
if type(batch['label']) == list:
batch['label']=torch.tensor(batch['label'])
if condition is None:
condition = self.prepare_condition(batch=batch)
else:
condition = self.prepare_condition(batch=None, condition=condition)
# content = None
if replicate != 1:
for k in condition.keys():
if condition[k] is not None:
condition[k] = torch.cat([condition[k] for _ in range(replicate)], dim=0)
content_token = None
guidance_scale = self.guidance_scale
cf_cond_emb = torch.ones(len(batch['label']) * replicate).to(self.device) * 1000
def cf_predict_start(log_x_t, cond_emb, t):
log_x_recon = self.transformer.predict_start(log_x_t, cond_emb, t)[:, :-1]
if abs(guidance_scale - 1) < 1e-3:
return torch.cat((log_x_recon, self.transformer.zero_vector), dim=1)
cf_log_x_recon = self.transformer.predict_start(log_x_t, cf_cond_emb.type_as(cond_emb), t)[:, :-1]
log_new_x_recon = cf_log_x_recon + guidance_scale * (log_x_recon - cf_log_x_recon)
log_new_x_recon -= torch.logsumexp(log_new_x_recon, dim=1, keepdim=True)
log_new_x_recon = log_new_x_recon.clamp(-70, 0)
log_pred = torch.cat((log_new_x_recon, self.transformer.zero_vector), dim=1)
return log_pred
if sample_type.split(',')[0][:3] == "top" and self.truncation_forward == False:
self.transformer.cf_predict_start = self.predict_start_with_truncation(cf_predict_start, sample_type.split(',')[0])
self.truncation_forward = True
trans_out = self.transformer.sample(condition_token=condition['condition_token'],
condition_mask=condition.get('condition_mask', None),
condition_embed=condition.get('condition_embed_token', None),
content_token=content_token,
filter_ratio=filter_ratio,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=False,
print_log=False,
sample_type=sample_type)
content = self.content_codec.decode(trans_out['content_token']) #(8,1024)->(8,3,256,256)
self.train()
out = {
'content': content
}
return out
@torch.no_grad()
def reconstruct(
self,
input
):
if torch.is_tensor(input):
input = input.to(self.device)
cont = self.content_codec.get_tokens(input)
cont_ = {}
for k, v in cont.items():
v = v.to(self.device) if torch.is_tensor(v) else v
cont_['content_' + k] = v
rec = self.content_codec.decode(cont_['content_token'])
return rec
@torch.no_grad()
def sample(
self,
batch,
clip = None,
temperature = 1.,
return_rec = True,
filter_ratio = [0, 0.5, 1.0],
content_ratio = [1], # the ratio to keep the encoded content tokens
return_att_weight=False,
return_logits=False,
sample_type="normal",
**kwargs,
):
self.eval()
condition = self.prepare_condition(batch)
content = self.prepare_content(batch)
content_samples = {'input_image': batch[self.content_info['key']]}
if return_rec:
content_samples['reconstruction_image'] = self.content_codec.decode(content['content_token'])
# import pdb; pdb.set_trace()
for fr in filter_ratio:
for cr in content_ratio:
num_content_tokens = int((content['content_token'].shape[1] * cr))
if num_content_tokens < 0:
continue
else:
content_token = content['content_token'][:, :num_content_tokens]
if sample_type == 'debug':
trans_out = self.transformer.sample_debug(condition_token=condition['condition_token'],
condition_mask=condition.get('condition_mask', None),
condition_embed=condition.get('condition_embed_token', None),
content_token=content_token,
filter_ratio=fr,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=return_logits,
content_logits=content.get('content_logits', None),
sample_type=sample_type,
**kwargs)
else:
trans_out = self.transformer.sample(condition_token=condition['condition_token'],
condition_mask=condition.get('condition_mask', None),
condition_embed=condition.get('condition_embed_token', None),
content_token=content_token,
filter_ratio=fr,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=return_logits,
content_logits=content.get('content_logits', None),
sample_type=sample_type,
**kwargs)
content_samples['cond1_cont{}_fr{}_image'.format(cr, fr)] = self.content_codec.decode(trans_out['content_token'])
if return_att_weight:
content_samples['cond1_cont{}_fr{}_image_condition_attention'.format(cr, fr)] = trans_out['condition_attention'] # B x Lt x Ld
content_att = trans_out['content_attention']
shape = *content_att.shape[:-1], self.content.token_shape[0], self.content.token_shape[1]
content_samples['cond1_cont{}_fr{}_image_content_attention'.format(cr, fr)] = content_att.view(*shape) # B x Lt x Lt -> B x Lt x H x W
if return_logits:
content_samples['logits'] = trans_out['logits']
self.train()
output = {'condition': batch[self.condition_info['key']]}
output.update(content_samples)
return output
def forward(
self,
batch,
name='none',
**kwargs
):
input = self.prepare_input(batch)
output = self.transformer(input, **kwargs)
return output
| 11,968 | 40.559028 | 154 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/models/unconditional_dalle.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import torch
import math
from torch import nn
from image_synthesis.utils.misc import instantiate_from_config
import time
import numpy as np
from PIL import Image
import os
from torch.cuda.amp import autocast
class UC_DALLE(nn.Module):
def __init__(
self,
*,
content_info={'key': 'image'},
content_codec_config,
diffusion_config
):
super().__init__()
self.content_info = content_info
self.content_codec = instantiate_from_config(content_codec_config)
self.transformer = instantiate_from_config(diffusion_config)
self.truncation_forward = False
def parameters(self, recurse=True, name=None):
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
names = name.split('+')
params = []
for n in names:
try: # the parameters() method is not overwritten for some classes
params += getattr(self, name).parameters(recurse=recurse, name=name)
except:
params += getattr(self, name).parameters(recurse=recurse)
return params
@property
def device(self):
return self.transformer.device
def get_ema_model(self):
return self.transformer
@autocast(enabled=False)
@torch.no_grad()
def prepare_content(self, batch, with_mask=False):
cont_key = self.content_info['key']
cont = batch[cont_key]
if torch.is_tensor(cont):
cont = cont.to(self.device)
if not with_mask:
cont = self.content_codec.get_tokens(cont)
else:
mask = batch['mask'.format(cont_key)]
cont = self.content_codec.get_tokens(cont, mask, enc_with_mask=False)
cont_ = {}
for k, v in cont.items():
v = v.to(self.device) if torch.is_tensor(v) else v
cont_['content_' + k] = v
return cont_
@torch.no_grad()
def prepare_input(self, batch):
input = self.prepare_content(batch)
return input
def predict_start_with_truncation(self, func, sample_type):
if sample_type[-1] == 'p':
truncation_k = int(sample_type[:-1].replace('top', ''))
content_codec = self.content_codec
save_path = self.this_save_path
def wrapper(*args, **kwards):
out = func(*args, **kwards)
val, ind = out.topk(k = truncation_k, dim=1)
probs = torch.full_like(out, -70)
probs.scatter_(1, ind, val)
return probs
return wrapper
elif sample_type[-1] == 'r':
truncation_r = float(sample_type[:-1].replace('top', ''))
def wrapper(*args, **kwards):
out = func(*args, **kwards)
temp, indices = torch.sort(out, 1, descending=True)
temp1 = torch.exp(temp)
temp2 = temp1.cumsum(dim=1)
temp3 = temp2 < truncation_r
new_temp = torch.full_like(temp3[:,0:1,:], True)
temp6 = torch.cat((new_temp, temp3), dim=1)
temp3 = temp6[:,:-1,:]
temp4 = temp3.gather(1, indices.argsort(1))
temp5 = temp4.float()*out+(1-temp4.float())*(-70)
probs = temp5
return probs
return wrapper
else:
print("wrong sample type")
@torch.no_grad()
def generate_content(
self,
*,
batch,
filter_ratio = 0.5,
temperature = 1.0,
content_ratio = 0.0,
replicate=1,
return_att_weight=False,
sample_type="normal",
):
self.eval()
content_token = None
if sample_type.split(',')[0][:3] == "top" and self.truncation_forward == False:
self.transformer.predict_start = self.predict_start_with_truncation(self.transformer.predict_start, sample_type.split(',')[0])
self.truncation_forward = True
trans_out = self.transformer.sample(condition_token=None,
condition_mask=None,
condition_embed=None,
content_token=content_token,
filter_ratio=filter_ratio,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=False,
print_log=False,
sample_type=sample_type,
batch_size=replicate)
content = self.content_codec.decode(trans_out['content_token']) #(8,1024)->(8,3,256,256)
self.train()
out = {
'content': content
}
return out
@torch.no_grad()
def reconstruct(
self,
input
):
if torch.is_tensor(input):
input = input.to(self.device)
cont = self.content_codec.get_tokens(input)
cont_ = {}
for k, v in cont.items():
v = v.to(self.device) if torch.is_tensor(v) else v
cont_['content_' + k] = v
rec = self.content_codec.decode(cont_['content_token'])
return rec
@torch.no_grad()
def sample(
self,
batch,
clip = None,
temperature = 1.,
return_rec = True,
filter_ratio = [0],
content_ratio = [1], # the ratio to keep the encoded content tokens
return_att_weight=False,
return_logits=False,
sample_type="normal",
**kwargs,
):
self.eval()
content = self.prepare_content(batch)
content_samples = {'input_image': batch[self.content_info['key']]}
if return_rec:
content_samples['reconstruction_image'] = self.content_codec.decode(content['content_token'])
# import pdb; pdb.set_trace()
for fr in filter_ratio:
for cr in content_ratio:
num_content_tokens = int((content['content_token'].shape[1] * cr))
if num_content_tokens < 0:
continue
else:
content_token = content['content_token'][:, :num_content_tokens]
trans_out = self.transformer.sample(condition_token=None,
condition_mask=None,
condition_embed=None,
content_token=content_token,
filter_ratio=fr,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=return_logits,
content_logits=content.get('content_logits', None),
sample_type=sample_type,
batch_size=batch[self.content_info['key']].shape[0],
**kwargs)
content_samples['cond1_cont{}_fr{}_image'.format(cr, fr)] = self.content_codec.decode(trans_out['content_token'])
if return_logits:
content_samples['logits'] = trans_out['logits']
self.train()
output = {}
output.update(content_samples)
return output
def forward(
self,
batch,
name='none',
**kwargs
):
input = self.prepare_input(batch)
output = self.transformer(input, **kwargs)
return output
| 8,216 | 35.52 | 138 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/models/dalle.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import torch
import math
from torch import nn
from image_synthesis.utils.misc import instantiate_from_config
import time
import numpy as np
from PIL import Image
import os
from torch.cuda.amp import autocast
class DALLE(nn.Module):
def __init__(
self,
*,
content_info={'key': 'image'},
condition_info={'key': 'text'},
learnable_cf=False,
content_codec_config,
condition_codec_config,
diffusion_config
):
super().__init__()
self.content_info = content_info
self.condition_info = condition_info
self.guidance_scale = 1.0
self.learnable_cf = learnable_cf
self.content_codec = instantiate_from_config(content_codec_config)
self.condition_codec = instantiate_from_config(condition_codec_config)
self.transformer = instantiate_from_config(diffusion_config)
self.truncation_forward = False
def parameters(self, recurse=True, name=None):
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
names = name.split('+')
params = []
for n in names:
try: # the parameters() method is not overwritten for some classes
params += getattr(self, name).parameters(recurse=recurse, name=name)
except:
params += getattr(self, name).parameters(recurse=recurse)
return params
@property
def device(self):
return self.transformer.device
def get_ema_model(self):
return self.transformer
@torch.no_grad()
def prepare_condition(self, batch, condition=None):
cond_key = self.condition_info['key']
cond = batch[cond_key] if condition is None else condition
if torch.is_tensor(cond):
cond = cond.to(self.device)
cond = self.condition_codec.get_tokens(cond)
cond_ = {}
for k, v in cond.items():
v = v.to(self.device) if torch.is_tensor(v) else v
cond_['condition_' + k] = v
return cond_
@autocast(enabled=False)
@torch.no_grad()
def prepare_content(self, batch, with_mask=False):
cont_key = self.content_info['key']
cont = batch[cont_key]
if torch.is_tensor(cont):
cont = cont.to(self.device)
if not with_mask:
cont = self.content_codec.get_tokens(cont)
else:
mask = batch['mask'.format(cont_key)]
cont = self.content_codec.get_tokens(cont, mask, enc_with_mask=False)
cont_ = {}
for k, v in cont.items():
v = v.to(self.device) if torch.is_tensor(v) else v
cont_['content_' + k] = v
return cont_
@autocast(enabled=False)
@torch.no_grad()
def prepare_input(self, batch):
input = self.prepare_condition(batch)
input.update(self.prepare_content(batch))
return input
def p_sample_with_truncation(self, func, sample_type):
truncation_rate = float(sample_type.replace('q', ''))
def wrapper(*args, **kwards):
out = func(*args, **kwards)
import random
if random.random() < truncation_rate:
out = func(out, args[1], args[2], **kwards)
return out
return wrapper
def predict_start_with_truncation(self, func, sample_type):
if sample_type[-1] == 'p':
truncation_k = int(sample_type[:-1].replace('top', ''))
content_codec = self.content_codec
save_path = self.this_save_path
def wrapper(*args, **kwards):
out = func(*args, **kwards)
val, ind = out.topk(k = truncation_k, dim=1)
probs = torch.full_like(out, -70)
probs.scatter_(1, ind, val)
return probs
return wrapper
elif sample_type[-1] == 'r':
truncation_r = float(sample_type[:-1].replace('top', ''))
def wrapper(*args, **kwards):
out = func(*args, **kwards)
# notice for different batches, out are same, we do it on out[0]
temp, indices = torch.sort(out, 1, descending=True)
temp1 = torch.exp(temp)
temp2 = temp1.cumsum(dim=1)
temp3 = temp2 < truncation_r
new_temp = torch.full_like(temp3[:,0:1,:], True)
temp6 = torch.cat((new_temp, temp3), dim=1)
temp3 = temp6[:,:-1,:]
temp4 = temp3.gather(1, indices.argsort(1))
temp5 = temp4.float()*out+(1-temp4.float())*(-70)
probs = temp5
return probs
return wrapper
else:
print("wrong sample type")
@torch.no_grad()
def generate_content(
self,
*,
batch,
condition=None,
filter_ratio = 0.5,
temperature = 1.0,
content_ratio = 0.0,
replicate=1,
return_att_weight=False,
sample_type="top0.85r",
):
self.eval()
if condition is None:
condition = self.prepare_condition(batch=batch)
else:
condition = self.prepare_condition(batch=None, condition=condition)
batch_size = len(batch['text']) * replicate
if self.learnable_cf:
cf_cond_emb = self.transformer.empty_text_embed.unsqueeze(0).repeat(batch_size, 1, 1)
else:
batch['text'] = [''] * batch_size
cf_condition = self.prepare_condition(batch=batch)
cf_cond_emb = self.transformer.condition_emb(cf_condition['condition_token']).float()
def cf_predict_start(log_x_t, cond_emb, t):
log_x_recon = self.transformer.predict_start(log_x_t, cond_emb, t)[:, :-1]
if abs(self.guidance_scale - 1) < 1e-3:
return torch.cat((log_x_recon, self.transformer.zero_vector), dim=1)
cf_log_x_recon = self.transformer.predict_start(log_x_t, cf_cond_emb.type_as(cond_emb), t)[:, :-1]
log_new_x_recon = cf_log_x_recon + self.guidance_scale * (log_x_recon - cf_log_x_recon)
log_new_x_recon -= torch.logsumexp(log_new_x_recon, dim=1, keepdim=True)
log_new_x_recon = log_new_x_recon.clamp(-70, 0)
log_pred = torch.cat((log_new_x_recon, self.transformer.zero_vector), dim=1)
return log_pred
if replicate != 1:
for k in condition.keys():
if condition[k] is not None:
condition[k] = torch.cat([condition[k] for _ in range(replicate)], dim=0)
content_token = None
if len(sample_type.split(',')) > 1:
if sample_type.split(',')[1][:1]=='q':
self.transformer.p_sample = self.p_sample_with_truncation(self.transformer.p_sample, sample_type.split(',')[1])
if sample_type.split(',')[0][:3] == "top" and self.truncation_forward == False:
self.transformer.cf_predict_start = self.predict_start_with_truncation(cf_predict_start, sample_type.split(',')[0])
self.truncation_forward = True
if len(sample_type.split(',')) == 2 and sample_type.split(',')[1][:4]=='time' and int(float(sample_type.split(',')[1][4:])) >= 2:
trans_out = self.transformer.sample_fast(condition_token=condition['condition_token'],
condition_mask=condition.get('condition_mask', None),
condition_embed=condition.get('condition_embed_token', None),
content_token=content_token,
filter_ratio=filter_ratio,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=False,
print_log=False,
sample_type=sample_type,
skip_step=int(float(sample_type.split(',')[1][4:])-1))
else:
if 'time' in sample_type and float(sample_type.split(',')[1][4:]) < 1:
self.transformer.prior_ps = int(1024 // self.transformer.num_timesteps * float(sample_type.split(',')[1][4:]))
if self.transformer.prior_rule == 0:
self.transformer.prior_rule = 1
self.transformer.update_n_sample()
trans_out = self.transformer.sample(condition_token=condition['condition_token'],
condition_mask=condition.get('condition_mask', None),
condition_embed=condition.get('condition_embed_token', None),
content_token=content_token,
filter_ratio=filter_ratio,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=False,
print_log=False,
sample_type=sample_type)
content = self.content_codec.decode(trans_out['content_token']) #(8,1024)->(8,3,256,256)
self.train()
out = {
'content': content
}
return out
@torch.no_grad()
def reconstruct(
self,
input
):
if torch.is_tensor(input):
input = input.to(self.device)
cont = self.content_codec.get_tokens(input)
cont_ = {}
for k, v in cont.items():
v = v.to(self.device) if torch.is_tensor(v) else v
cont_['content_' + k] = v
rec = self.content_codec.decode(cont_['content_token'])
return rec
@torch.no_grad()
def sample(
self,
batch,
clip = None,
temperature = 1.,
return_rec = True,
filter_ratio = [0, 0.5, 1.0],
content_ratio = [1], # the ratio to keep the encoded content tokens
return_att_weight=False,
return_logits=False,
sample_type="normal",
**kwargs,
):
self.eval()
condition = self.prepare_condition(batch)
content = self.prepare_content(batch)
content_samples = {'input_image': batch[self.content_info['key']]}
if return_rec:
content_samples['reconstruction_image'] = self.content_codec.decode(content['content_token'])
for fr in filter_ratio:
for cr in content_ratio:
num_content_tokens = int((content['content_token'].shape[1] * cr))
if num_content_tokens < 0:
continue
else:
content_token = content['content_token'][:, :num_content_tokens]
if sample_type == 'debug':
trans_out = self.transformer.sample_debug(condition_token=condition['condition_token'],
condition_mask=condition.get('condition_mask', None),
condition_embed=condition.get('condition_embed_token', None),
content_token=content_token,
filter_ratio=fr,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=return_logits,
content_logits=content.get('content_logits', None),
sample_type=sample_type,
**kwargs)
else:
trans_out = self.transformer.sample(condition_token=condition['condition_token'],
condition_mask=condition.get('condition_mask', None),
condition_embed=condition.get('condition_embed_token', None),
content_token=content_token,
filter_ratio=fr,
temperature=temperature,
return_att_weight=return_att_weight,
return_logits=return_logits,
content_logits=content.get('content_logits', None),
sample_type=sample_type,
**kwargs)
content_samples['cond1_cont{}_fr{}_image'.format(cr, fr)] = self.content_codec.decode(trans_out['content_token'])
if return_att_weight:
content_samples['cond1_cont{}_fr{}_image_condition_attention'.format(cr, fr)] = trans_out['condition_attention'] # B x Lt x Ld
content_att = trans_out['content_attention']
shape = *content_att.shape[:-1], self.content.token_shape[0], self.content.token_shape[1]
content_samples['cond1_cont{}_fr{}_image_content_attention'.format(cr, fr)] = content_att.view(*shape) # B x Lt x Lt -> B x Lt x H x W
if return_logits:
content_samples['logits'] = trans_out['logits']
self.train()
output = {'condition': batch[self.condition_info['key']]}
output.update(content_samples)
return output
def forward(
self,
batch,
name='none',
**kwargs
):
input = self.prepare_input(batch)
output = self.transformer(input, **kwargs)
return output
| 14,512 | 43.246951 | 154 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/class_embedding.py | import torch
import torch.nn as nn
from .base_embedding import BaseEmbedding
class ClassEmbedding(BaseEmbedding):
def __init__(self,
num_embed=1000,
embed_dim=512,
identity=False,
trainable=True,
):
super().__init__()
self.identity = identity
self.trainable = trainable
self.num_embed = num_embed
self.embed_dim = embed_dim
if self.identity == False:
self.emb = nn.Embedding(self.num_embed, embed_dim)
self._set_trainable()
def forward(self, index, **kwargs):
"""
index: B x L, index
mask: B x L, bool type. The value of False indicating padded index
"""
if self.identity == True:
return index
else:
emb = self.emb(index).unsqueeze(1)
return emb
| 899 | 26.272727 | 74 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/dalle_mask_image_embedding.py | import torch
import torch.nn as nn
from .base_embedding import BaseEmbedding
class DalleMaskImageEmbedding(BaseEmbedding):
def __init__(self,
num_embed=8192,
spatial_size=[32, 32], # height and with
embed_dim=3968,
trainable=True,
pos_emb_type='embedding'
):
super().__init__()
if isinstance(spatial_size, int):
spatial_size = [spatial_size, spatial_size]
self.spatial_size = spatial_size
self.num_embed = num_embed + 1
self.embed_dim = embed_dim
self.trainable = trainable
self.pos_emb_type = pos_emb_type
assert self.pos_emb_type in ['embedding', 'parameter']
self.emb = nn.Embedding(self.num_embed, embed_dim)
if self.pos_emb_type == 'embedding':
self.height_emb = nn.Embedding(self.spatial_size[0], embed_dim) # height
self.width_emb = nn.Embedding(self.spatial_size[1], embed_dim) # width
else:
self.height_emb = nn.Parameter(torch.zeros(1, self.spatial_size[0], embed_dim)) # height #32,1024
self.width_emb = nn.Parameter(torch.zeros(1, self.spatial_size[1], embed_dim)) # width #32,1024
self._set_trainable()
def forward(self, index, **kwargs):
assert index.dim() == 2 # B x L
try:
index[index < 0] = 0
emb = self.emb(index)
except:
raise RuntimeError('IndexError: index out of range in self, max index {}, num embed {}'.format(index.max(), self.num_embed))
# add col and row embedding
if emb.shape[1] > 0:
# if False:
if self.pos_emb_type == 'embedding':
height_emb = self.height_emb(torch.arange(self.spatial_size[0], device=index.device).view(1, self.spatial_size[0])).unsqueeze(2) # 1 x H x D -> 1 x H x 1 x D
width_emb = self.width_emb(torch.arange(self.spatial_size[1], device=index.device).view(1, self.spatial_size[1])).unsqueeze(1) # 1 x W x D -> 1 x 1 x W x D
else:
height_emb = self.height_emb.unsqueeze(2) # 1 x H x D -> 1 x H x 1 x D
width_emb = self.width_emb.unsqueeze(1) # 1 x W x D -> 1 x 1 x W x D
pos_emb = (height_emb + width_emb).view(1, self.spatial_size[0] * self.spatial_size[1], -1) # 1 x H x W x D -> 1 x L xD
emb = emb + pos_emb[:, :emb.shape[1], :]
return emb
| 2,507 | 42.241379 | 173 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/base_embedding.py | import torch
from torch import nn
class BaseEmbedding(nn.Module):
def get_loss(self):
return None
def forward(self, **kwargs):
raise NotImplementedError
def train(self, mode=True):
self.training = mode
if self.trainable and mode:
super().train()
return self
def _set_trainable(self):
if not self.trainable:
for pn, p in self.named_parameters():
p.requires_grad = False
self.eval()
| 507 | 19.32 | 49 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/embeddings/clip_text_embedding.py | import torch
import torch.nn as nn
from image_synthesis.modeling.modules.clip import clip
from image_synthesis.modeling.modules.clip import model as clip_model
from .base_embedding import BaseEmbedding
class CLIPTextEmbedding(BaseEmbedding):
def __init__(self,
clip_name='ViT-B/32',
num_embed=49408,
normalize=True,
pick_last_embedding=True,
keep_seq_len_dim=False,
additional_last_embedding=False,
embed_dim=1024,
):
super().__init__()
self.num_embed = num_embed
self.clip_name = clip_name
self.normalize = normalize
self.pick_last_embedding = pick_last_embedding
self.keep_seq_len_dim = keep_seq_len_dim
self.additional_last_embedding = additional_last_embedding
model, _ = clip.load(clip_name, device='cpu',jit=False)
model = clip_model.build_model(model.state_dict())
self.token_embedding = model.token_embedding
self.positional_embedding = model.positional_embedding
self.transformer = model.transformer
self.ln_final = model.ln_final
self.text_projection = model.text_projection
if embed_dim == 1024:
self.embed_dim = self.text_projection.shape[1]*2 # to fit 1024 dimension of image embedding
else:
self.embed_dim = self.text_projection.shape[1] # original output, 512 dim
self.trainable = False
self._set_trainable()
@property
def dtype(self):
return self.transformer.resblocks[0].attn.in_proj_weight.dtype
def encode_text(self, text):
text[text < 0] = 0 # some padded text token maybe negative, so set them to 0
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
if self.pick_last_embedding:
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection # [batch_size, transformer.width]
if self.keep_seq_len_dim:
x = x.unsqueeze(dim=1) # [batch_size, 1, transformer.width]
return x
def forward(self, index, **kwargs):
"""
index: B x L, index
mask: B x L, bool type. The value of False indicating padded index
"""
assert index.dim() == 2 # B x L
text_feature = self.encode_text(index)
if self.embed_dim == 1024:
text_features = torch.cat((text_feature, text_feature), dim=2)
else:
text_features = text_feature
if self.normalize:
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
if self.additional_last_embedding == True:
last_feature = text_feature[torch.arange(text_feature.shape[0]), index.argmax(dim=-1)] @ self.text_projection
if self.keep_seq_len_dim:
last_feature = last_feature.unsqueeze(dim=1)
return text_features, last_feature
return text_features
| 3,423 | 37.47191 | 121 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/utils/misc.py | from numpy.core.fromnumeric import resize
from numpy.lib.function_base import kaiser
from numpy.lib.npyio import save
import torch
import random
import math
from image_synthesis.distributed.distributed import all_reduce, get_world_size
def logits_top_k(logits, filter_ratio = 0.5, minimum=1, pad_value=None):
logits = logits.contiguous()
if filter_ratio < 0:
filter_ratio = - filter_ratio
if filter_ratio >= 0 and filter_ratio <= 1.0:
num_logits = logits.shape[-1]
k = max(int((1 - filter_ratio) * num_logits), minimum)
else:
k = max(int(filter_ratio), minimum)
val, ind = torch.topk(input=logits, k=k, dim=-1)
if pad_value is None:
pad_value = float('-inf')
probs = torch.full_like(logits, pad_value)
# probs.scatter_(1, ind, val)
probs.scatter_(-1, ind, val)
return probs
def mask_with_top_k(x, k, largest=True, abs=True, pad_value=None):
"""
mask the input tensor along the last dimension.
The values the not in the topk will be masked as zeros
"""
if abs:
x_ = x.abs()
else:
x_ = x
_, top_k_index = x_.topk(k=k, dim=-1, largest=largest) # BHW x K
mask = torch.zeros_like(x)
ones = torch.ones_like(x)
mask.scatter_(-1, index=top_k_index, src=ones)
x = x * mask
if pad_value is None or pad_value != 0:
if pad_value is None:
pad_value = float('-inf')
x[mask == 0] = x[mask == 0] + pad_value
return x
def sample_index_randomly(x, k, filter_ratio=0, largest=True):
"""
x: should be 2D tensor, randomly smaple along the lat dimension
"""
assert x.dim() == 2, 'currently only two dimensional tensors are supprted!'
if filter_ratio < 0:
filter_ratio = - filter_ratio
if filter_ratio >= 0 and filter_ratio <= 1.0:
num_logits = x.shape[-1]
topk = max(int((1 - filter_ratio) * num_logits), k)
else:
topk = max(int(filter_ratio), k)
_, top_k_index = x.topk(k=topk, dim=-1, largest=largest) # BHW x K
sampled = []
for i in range(x.shape[0]):
index = top_k_index[i]
sampled_ = torch.tensor(random.sample(index.tolist(), k)).to(index)
sampled.append(sampled_)
sampled = torch.stack(sampled, dim=0).to(top_k_index)
return sampled
def get_token_type(mask, token_shape):
"""
Get the token type according to the given mask and token_shape.
Note that we treat tokens into 3 types.
0: masked tokens
1: unmasked tokens
2: partially masked tokens
Args:
mask: 4D tensor, B x 1 x H x W, the mask of the origin image. 1 denotes masked pixles
and 0 denotes unmasked pixels.
token_shape: [H/r, W/r]. the shape of token
"""
mask_float = mask.float()
mask_unshuffle = pixel_unshuffle(mask_float, token_shape) # B x r^2 x H/r x W/r
scale_factor = mask_unshuffle.shape[1]
mask_unshuffle = mask_unshuffle.sum(dim=1, keepdim=True) # B x 1 x H/r x W/r
token_type = torch.zeros_like(mask_unshuffle).long() + 2
token_type[mask_unshuffle==0] = 0 # unmasked tokens
token_type[mask_unshuffle==scale_factor] = 1 # fully masked tokens
return token_type
def gen_attention_mask(H, W, type='full', causal=True, condition_seq_len=0, **kwargs):
content_seq_len = H * W
seq_len = content_seq_len + condition_seq_len
mask = torch.zeros(seq_len, seq_len)
mask[:, :condition_seq_len] = 1
if type == 'full':
mask += 1
elif type == 'dalle_row':
for idx in range(content_seq_len):
h = idx // W
w = idx % W
for w_ in range(w-W, w+1):
i = h * W + w_
mask[idx+condition_seq_len][i+condition_seq_len] = 1
elif type == 'dalle_col':
for idx in range(content_seq_len):
h = idx // W
w = idx % W
for h_ in range(h+1):
i = h_ * W + w
mask[idx+condition_seq_len][i+condition_seq_len] = 1
elif type == 'dalle_conv':
kernel_size = kwargs['kernel_size']
if isinstance(kernel_size, int):
kernel_size = [kernel_size, kernel_size]
k_h, k_w = kernel_size[0], kernel_size[1]
half_k_h = int(k_h/2)
half_k_w = int(k_w/2)
step_over_w = W - k_w
for idx in range(content_seq_len):
max_kernel_count = (half_k_h+1) * k_w
step_over_count = step_over_w * (half_k_h+1)
max_pre = max_kernel_count + step_over_count
max_pre = min(idx+1, max_pre)
for i in range(max_pre):
valid = False
a = i % W
if a > half_k_w and a <= half_k_w + step_over_w:
valid = False
else:
valid = True
if valid:
mask[idx+condition_seq_len][idx-i+condition_seq_len] = 1
else:
raise NotImplementedError('attention type {} not implemented!'.format(type))
if causal:
causal_mask = torch.tril(torch.ones(content_seq_len+condition_seq_len, content_seq_len+condition_seq_len))
mask *= causal_mask
return mask
| 5,282 | 32.01875 | 114 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/transformers/diffusion_transformer.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import math
import torch
from torch import nn
import torch.nn.functional as F
from image_synthesis.utils.misc import instantiate_from_config
import numpy as np
from einops import rearrange
from image_synthesis.distributed.distributed import is_primary, get_rank
from inspect import isfunction
from torch.cuda.amp import autocast
from image_synthesis.modeling.transformers.transformer_utils import Text2ImageTransformer
eps = 1e-8
def sum_except_batch(x, num_dims=1):
return x.reshape(*x.shape[:num_dims], -1).sum(-1)
def log_1_min_a(a):
return torch.log(1 - a.exp() + 1e-40)
def log_add_exp(a, b):
maximum = torch.max(a, b)
return maximum + torch.log(torch.exp(a - maximum) + torch.exp(b - maximum))
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def log_categorical(log_x_start, log_prob):
return (log_x_start.exp() * log_prob).sum(dim=1)
def index_to_log_onehot(x, num_classes):
assert x.max().item() < num_classes, \
f'Error: {x.max().item()} >= {num_classes}'
x_onehot = F.one_hot(x, num_classes)
permute_order = (0, -1) + tuple(range(1, len(x.size())))
x_onehot = x_onehot.permute(permute_order)
log_x = torch.log(x_onehot.float().clamp(min=1e-30))
return log_x
def log_onehot_to_index(log_x):
return log_x.argmax(1)
def alpha_schedule(time_step, N=100, att_1 = 0.99999, att_T = 0.000009, ctt_1 = 0.000009, ctt_T = 0.99999):
att = np.arange(0, time_step)/(time_step-1)*(att_T - att_1) + att_1
att = np.concatenate(([1], att))
at = att[1:]/att[:-1]
ctt = np.arange(0, time_step)/(time_step-1)*(ctt_T - ctt_1) + ctt_1
ctt = np.concatenate(([0], ctt))
one_minus_ctt = 1 - ctt
one_minus_ct = one_minus_ctt[1:] / one_minus_ctt[:-1]
ct = 1-one_minus_ct
bt = (1-at-ct)/N
att = np.concatenate((att[1:], [1]))
ctt = np.concatenate((ctt[1:], [0]))
btt = (1-att-ctt)/N
return at, bt, ct, att, btt, ctt
class DiffusionTransformer(nn.Module):
def __init__(
self,
*,
content_emb_config=None,
condition_emb_config=None,
transformer_config=None,
diffusion_step=100,
alpha_init_type='cos',
auxiliary_loss_weight=0,
adaptive_auxiliary_loss=False,
mask_weight=[1,1],
learnable_cf=False,
):
super().__init__()
if condition_emb_config is None:
self.condition_emb = None
else:
# for condition and config, we learn a seperate embedding
self.condition_emb = instantiate_from_config(condition_emb_config)
self.condition_dim = self.condition_emb.embed_dim
transformer_config['params']['diffusion_step'] = diffusion_step
transformer_config['params']['content_emb_config'] = content_emb_config
self.transformer = instantiate_from_config(transformer_config)
self.content_seq_len = transformer_config['params']['content_seq_len']
self.amp = False
self.num_classes = self.transformer.content_emb.num_embed
self.loss_type = 'vb_stochastic'
self.shape = transformer_config['params']['content_seq_len']
self.num_timesteps = diffusion_step
self.parametrization = 'x0'
self.auxiliary_loss_weight = auxiliary_loss_weight
self.adaptive_auxiliary_loss = adaptive_auxiliary_loss
self.mask_weight = mask_weight
if alpha_init_type == "alpha1":
at, bt, ct, att, btt, ctt = alpha_schedule(self.num_timesteps, N=self.num_classes-1)
else:
print("alpha_init_type is Wrong !! ")
at = torch.tensor(at.astype('float64'))
bt = torch.tensor(bt.astype('float64'))
ct = torch.tensor(ct.astype('float64'))
log_at = torch.log(at)
log_bt = torch.log(bt)
log_ct = torch.log(ct)
att = torch.tensor(att.astype('float64'))
btt = torch.tensor(btt.astype('float64'))
ctt = torch.tensor(ctt.astype('float64'))
log_cumprod_at = torch.log(att)
log_cumprod_bt = torch.log(btt)
log_cumprod_ct = torch.log(ctt)
log_1_min_ct = log_1_min_a(log_ct)
log_1_min_cumprod_ct = log_1_min_a(log_cumprod_ct)
assert log_add_exp(log_ct, log_1_min_ct).abs().sum().item() < 1.e-5
assert log_add_exp(log_cumprod_ct, log_1_min_cumprod_ct).abs().sum().item() < 1.e-5
self.diffusion_acc_list = [0] * self.num_timesteps
self.diffusion_keep_list = [0] * self.num_timesteps
# Convert to float32 and register buffers.
self.register_buffer('log_at', log_at.float())
self.register_buffer('log_bt', log_bt.float())
self.register_buffer('log_ct', log_ct.float())
self.register_buffer('log_cumprod_at', log_cumprod_at.float())
self.register_buffer('log_cumprod_bt', log_cumprod_bt.float())
self.register_buffer('log_cumprod_ct', log_cumprod_ct.float())
self.register_buffer('log_1_min_ct', log_1_min_ct.float())
self.register_buffer('log_1_min_cumprod_ct', log_1_min_cumprod_ct.float())
self.register_buffer('Lt_history', torch.zeros(self.num_timesteps))
self.register_buffer('Lt_count', torch.zeros(self.num_timesteps))
self.zero_vector = None
if learnable_cf:
self.empty_text_embed = torch.nn.Parameter(torch.randn(size=(77, 512), requires_grad=True, dtype=torch.float64))
self.prior_rule = 0 # inference rule: 0 for VQ-Diffusion v1, 1 for only high-quality inference, 2 for purity prior
self.prior_ps = 1024 # max number to sample per step
self.prior_weight = 0 # probability adjust parameter, 'r' in Equation.11 of Improved VQ-Diffusion
self.update_n_sample()
self.learnable_cf = learnable_cf
def update_n_sample(self):
if self.num_timesteps == 100:
if self.prior_ps <= 10:
self.n_sample = [1, 6] + [11, 10, 10] * 32 + [11, 15]
else:
self.n_sample = [1, 10] + [11, 10, 10] * 32 + [11, 11]
elif self.num_timesteps == 50:
self.n_sample = [10] + [21, 20] * 24 + [30]
elif self.num_timesteps == 25:
self.n_sample = [21] + [41] * 23 + [60]
elif self.num_timesteps == 10:
self.n_sample = [69] + [102] * 8 + [139]
def multinomial_kl(self, log_prob1, log_prob2): # compute KL loss on log_prob
kl = (log_prob1.exp() * (log_prob1 - log_prob2)).sum(dim=1)
return kl
def q_pred_one_timestep(self, log_x_t, t): # q(xt|xt_1)
log_at = extract(self.log_at, t, log_x_t.shape) # at
log_bt = extract(self.log_bt, t, log_x_t.shape) # bt
log_ct = extract(self.log_ct, t, log_x_t.shape) # ct
log_1_min_ct = extract(self.log_1_min_ct, t, log_x_t.shape) # 1-ct
log_probs = torch.cat(
[
log_add_exp(log_x_t[:,:-1,:]+log_at, log_bt),
log_add_exp(log_x_t[:, -1:, :] + log_1_min_ct, log_ct)
],
dim=1
)
return log_probs
def q_pred(self, log_x_start, t): # q(xt|x0)
# log_x_start can be onehot or not
t = (t + (self.num_timesteps + 1))%(self.num_timesteps + 1)
log_cumprod_at = extract(self.log_cumprod_at, t, log_x_start.shape) # at~
log_cumprod_bt = extract(self.log_cumprod_bt, t, log_x_start.shape) # bt~
log_cumprod_ct = extract(self.log_cumprod_ct, t, log_x_start.shape) # ct~
log_1_min_cumprod_ct = extract(self.log_1_min_cumprod_ct, t, log_x_start.shape) # 1-ct~
log_probs = torch.cat(
[
log_add_exp(log_x_start[:,:-1,:]+log_cumprod_at, log_cumprod_bt),
log_add_exp(log_x_start[:,-1:,:]+log_1_min_cumprod_ct, log_cumprod_ct)
],
dim=1
)
return log_probs
def predict_start(self, log_x_t, cond_emb, t): # p(x0|xt)
x_t = log_onehot_to_index(log_x_t)
if self.amp == True:
with autocast():
out = self.transformer(x_t, cond_emb, t)
else:
out = self.transformer(x_t, cond_emb, t)
assert out.size(0) == x_t.size(0)
assert out.size(1) == self.num_classes-1
assert out.size()[2:] == x_t.size()[1:]
log_pred = F.log_softmax(out.double(), dim=1).float()
batch_size = log_x_t.size()[0]
if self.zero_vector is None or self.zero_vector.shape[0] != batch_size:
self.zero_vector = torch.zeros(batch_size, 1, self.content_seq_len).type_as(log_x_t)- 70
log_pred = torch.cat((log_pred, self.zero_vector), dim=1)
log_pred = torch.clamp(log_pred, -70, 0)
return log_pred
def cf_predict_start(self, log_x_t, cond_emb, t):
return self.predict_start(log_x_t, cond_emb, t)
def q_posterior(self, log_x_start, log_x_t, t): # p_theta(xt_1|xt) = sum(q(xt-1|xt,x0')*p(x0'))
# notice that log_x_t is onehot
assert t.min().item() >= 0 and t.max().item() < self.num_timesteps
batch_size = log_x_start.size()[0]
onehot_x_t = log_onehot_to_index(log_x_t)
mask = (onehot_x_t == self.num_classes-1).unsqueeze(1)
log_one_vector = torch.zeros(batch_size, 1, 1).type_as(log_x_t)
log_zero_vector = torch.log(log_one_vector+1.0e-30).expand(-1, -1, self.content_seq_len)
log_qt = self.q_pred(log_x_t, t) # q(xt|x0)
# log_qt = torch.cat((log_qt[:,:-1,:], log_zero_vector), dim=1)
log_qt = log_qt[:,:-1,:]
log_cumprod_ct = extract(self.log_cumprod_ct, t, log_x_start.shape) # ct~
ct_cumprod_vector = log_cumprod_ct.expand(-1, self.num_classes-1, -1)
# ct_cumprod_vector = torch.cat((ct_cumprod_vector, log_one_vector), dim=1)
log_qt = (~mask)*log_qt + mask*ct_cumprod_vector
log_qt_one_timestep = self.q_pred_one_timestep(log_x_t, t) # q(xt|xt_1)
log_qt_one_timestep = torch.cat((log_qt_one_timestep[:,:-1,:], log_zero_vector), dim=1)
log_ct = extract(self.log_ct, t, log_x_start.shape) # ct
ct_vector = log_ct.expand(-1, self.num_classes-1, -1)
ct_vector = torch.cat((ct_vector, log_one_vector), dim=1)
log_qt_one_timestep = (~mask)*log_qt_one_timestep + mask*ct_vector
# log_x_start = torch.cat((log_x_start, log_zero_vector), dim=1)
# q = log_x_start - log_qt
q = log_x_start[:,:-1,:] - log_qt
q = torch.cat((q, log_zero_vector), dim=1)
q_log_sum_exp = torch.logsumexp(q, dim=1, keepdim=True)
q = q - q_log_sum_exp
log_EV_xtmin_given_xt_given_xstart = self.q_pred(q, t-1) + log_qt_one_timestep + q_log_sum_exp
return torch.clamp(log_EV_xtmin_given_xt_given_xstart, -70, 0)
def p_pred(self, log_x, cond_emb, t): # if x0, first p(x0|xt), than sum(q(xt-1|xt,x0)*p(x0|xt))
if self.parametrization == 'x0':
log_x_recon = self.cf_predict_start(log_x, cond_emb, t)
log_model_pred = self.q_posterior(
log_x_start=log_x_recon, log_x_t=log_x, t=t)
elif self.parametrization == 'direct':
log_model_pred = self.predict_start(log_x, cond_emb, t)
else:
raise ValueError
return log_model_pred, log_x_recon
@torch.no_grad()
def p_sample(self, log_x, cond_emb, t, sampled=None, to_sample=None): # sample q(xt-1) for next step from xt, actually is p(xt-1|xt)
model_log_prob, log_x_recon = self.p_pred(log_x, cond_emb, t)
max_sample_per_step = self.prior_ps # max number to sample per step
if t[0] > 0 and self.prior_rule > 0 and to_sample is not None: # prior_rule: 0 for VQ-Diffusion v1, 1 for only high-quality inference, 2 for purity prior
log_x_idx = log_onehot_to_index(log_x)
if self.prior_rule == 1:
score = torch.ones((log_x.shape[0], log_x.shape[2])).to(log_x.device)
elif self.prior_rule == 2:
score = torch.exp(log_x_recon).max(dim=1).values.clamp(0, 1)
score /= (score.max(dim=1, keepdim=True).values + 1e-10)
if self.prior_rule != 1 and self.prior_weight > 0:
# probability adjust parameter, prior_weight: 'r' in Equation.11 of Improved VQ-Diffusion
prob = ((1 + score * self.prior_weight).unsqueeze(1) * log_x_recon).softmax(dim=1)
prob = prob.log().clamp(-70, 0)
else:
prob = log_x_recon
out = self.log_sample_categorical(prob)
out_idx = log_onehot_to_index(out)
out2_idx = log_x_idx.clone()
_score = score.clone()
if _score.sum() < 1e-6:
_score += 1
_score[log_x_idx != self.num_classes - 1] = 0
for i in range(log_x.shape[0]):
n_sample = min(to_sample - sampled[i], max_sample_per_step)
if to_sample - sampled[i] - n_sample == 1:
n_sample = to_sample - sampled[i]
if n_sample <= 0:
continue
sel = torch.multinomial(_score[i], n_sample)
out2_idx[i][sel] = out_idx[i][sel]
sampled[i] += ((out2_idx[i] != self.num_classes - 1).sum() - (log_x_idx[i] != self.num_classes - 1).sum()).item()
out = index_to_log_onehot(out2_idx, self.num_classes)
else:
# Gumbel sample
out = self.log_sample_categorical(model_log_prob)
sampled = [1024] * log_x.shape[0]
if to_sample is not None:
return out, sampled
else:
return out
def log_sample_categorical(self, logits): # use gumbel to sample onehot vector from log probability
uniform = torch.rand_like(logits)
gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30)
sample = (gumbel_noise + logits).argmax(dim=1)
log_sample = index_to_log_onehot(sample, self.num_classes)
return log_sample
def q_sample(self, log_x_start, t): # diffusion step, q(xt|x0) and sample xt
log_EV_qxt_x0 = self.q_pred(log_x_start, t)
log_sample = self.log_sample_categorical(log_EV_qxt_x0)
return log_sample
def sample_time(self, b, device, method='uniform'):
if method == 'importance':
if not (self.Lt_count > 10).all():
return self.sample_time(b, device, method='uniform')
Lt_sqrt = torch.sqrt(self.Lt_history + 1e-10) + 0.0001
Lt_sqrt[0] = Lt_sqrt[1] # Overwrite decoder term with L1.
pt_all = Lt_sqrt / Lt_sqrt.sum()
t = torch.multinomial(pt_all, num_samples=b, replacement=True)
pt = pt_all.gather(dim=0, index=t)
return t, pt
elif method == 'uniform':
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
pt = torch.ones_like(t).float() / self.num_timesteps
return t, pt
else:
raise ValueError
def _train_loss(self, x, cond_emb, is_train=True): # get the KL loss
b, device = x.size(0), x.device
assert self.loss_type == 'vb_stochastic'
x_start = x
t, pt = self.sample_time(b, device, 'importance')
log_x_start = index_to_log_onehot(x_start, self.num_classes)
log_xt = self.q_sample(log_x_start=log_x_start, t=t)
xt = log_onehot_to_index(log_xt)
############### go to p_theta function ###############
log_x0_recon = self.predict_start(log_xt, cond_emb, t=t) # P_theta(x0|xt)
log_model_prob = self.q_posterior(log_x_start=log_x0_recon, log_x_t=log_xt, t=t) # go through q(xt_1|xt,x0)
################## compute acc list ################
x0_recon = log_onehot_to_index(log_x0_recon)
x0_real = x_start
xt_1_recon = log_onehot_to_index(log_model_prob)
xt_recon = log_onehot_to_index(log_xt)
for index in range(t.size()[0]):
this_t = t[index].item()
same_rate = (x0_recon[index] == x0_real[index]).sum().cpu()/x0_real.size()[1]
self.diffusion_acc_list[this_t] = same_rate.item()*0.1 + self.diffusion_acc_list[this_t]*0.9
same_rate = (xt_1_recon[index] == xt_recon[index]).sum().cpu()/xt_recon.size()[1]
self.diffusion_keep_list[this_t] = same_rate.item()*0.1 + self.diffusion_keep_list[this_t]*0.9
# compute log_true_prob now
log_true_prob = self.q_posterior(log_x_start=log_x_start, log_x_t=log_xt, t=t)
kl = self.multinomial_kl(log_true_prob, log_model_prob)
mask_region = (xt == self.num_classes-1).float()
mask_weight = mask_region * self.mask_weight[0] + (1. - mask_region) * self.mask_weight[1]
kl = kl * mask_weight
kl = sum_except_batch(kl)
decoder_nll = -log_categorical(log_x_start, log_model_prob)
decoder_nll = sum_except_batch(decoder_nll)
mask = (t == torch.zeros_like(t)).float()
kl_loss = mask * decoder_nll + (1. - mask) * kl
Lt2 = kl_loss.pow(2)
Lt2_prev = self.Lt_history.gather(dim=0, index=t)
new_Lt_history = (0.1 * Lt2 + 0.9 * Lt2_prev).detach()
self.Lt_history.scatter_(dim=0, index=t, src=new_Lt_history)
self.Lt_count.scatter_add_(dim=0, index=t, src=torch.ones_like(Lt2))
# Upweigh loss term of the kl
# vb_loss = kl_loss / pt + kl_prior
loss1 = kl_loss / pt
vb_loss = loss1
if self.auxiliary_loss_weight != 0 and is_train==True:
kl_aux = self.multinomial_kl(log_x_start[:,:-1,:], log_x0_recon[:,:-1,:])
kl_aux = kl_aux * mask_weight
kl_aux = sum_except_batch(kl_aux)
kl_aux_loss = mask * decoder_nll + (1. - mask) * kl_aux
if self.adaptive_auxiliary_loss == True:
addition_loss_weight = (1-t/self.num_timesteps) + 1.0
else:
addition_loss_weight = 1.0
loss2 = addition_loss_weight * self.auxiliary_loss_weight * kl_aux_loss / pt
vb_loss += loss2
return log_model_prob, vb_loss
@property
def device(self):
return self.transformer.to_logits[-1].weight.device
def parameters(self, recurse=True, name=None):
"""
Following minGPT:
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# return super().parameters(recurse=True)
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
# separate out all parameters to those that will and won't experience regularizing weight decay
print("GPTLikeTransformer: get parameters by the overwrite method!")
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter as not decayed
module_name = ['condition_emb', 'content_emb']
pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb']
for mn in module_name:
if hasattr(self, mn) and getattr(self, mn) is not None:
for pn in pos_emb_name:
if hasattr(getattr(self, mn), pn):
if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter):
no_decay.add('{}.{}'.format(mn, pn))
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
return optim_groups
def forward(
self,
input,
return_loss=False,
return_logits=True,
return_att_weight=False,
is_train=True,
**kwargs):
if kwargs.get('autocast') == True:
self.amp = True
batch_size = input['content_token'].shape[0]
device = input['content_token'].device
# 1) get embeddding for condition and content prepare input
sample_image = input['content_token'].type_as(input['content_token'])
# cont_emb = self.content_emb(sample_image)
if self.condition_emb is not None:
with autocast(enabled=False):
with torch.no_grad():
cond_emb = self.condition_emb(input['condition_token']) # B x Ld x D #256*1024
if self.learnable_cf:
is_empty_text = torch.logical_not(input['condition_mask'][:, 2]).unsqueeze(1).unsqueeze(2).repeat(1, 77, 512)
cond_emb = torch.where(is_empty_text, self.empty_text_embed.unsqueeze(0).repeat(cond_emb.shape[0], 1, 1), cond_emb.type_as(self.empty_text_embed))
cond_emb = cond_emb.float()
else: # share condition embeding with content
if input.get('condition_embed_token') == None:
cond_emb = None
else:
cond_emb = input['condition_embed_token'].float()
# now we get cond_emb and sample_image
if is_train == True:
log_model_prob, loss = self._train_loss(sample_image, cond_emb)
loss = loss.sum()/(sample_image.size()[0] * sample_image.size()[1])
# 4) get output, especially loss
out = {}
if return_logits:
out['logits'] = torch.exp(log_model_prob)
if return_loss:
out['loss'] = loss
self.amp = False
return out
def sample(
self,
condition_token,
condition_mask,
condition_embed,
content_token = None,
filter_ratio = 0.5,
temperature = 1.0,
return_att_weight = False,
return_logits = False,
content_logits = None,
print_log = True,
**kwargs):
input = {'condition_token': condition_token,
'content_token': content_token,
'condition_mask': condition_mask,
'condition_embed_token': condition_embed,
'content_logits': content_logits,
}
if input['condition_token'] != None:
batch_size = input['condition_token'].shape[0]
else:
batch_size = kwargs['batch_size']
device = self.log_at.device
start_step = int(self.num_timesteps * filter_ratio)
# get cont_emb and cond_emb
if content_token != None:
sample_image = input['content_token'].type_as(input['content_token'])
if self.condition_emb is not None: # do this
with torch.no_grad():
cond_emb = self.condition_emb(input['condition_token']) # B x Ld x D #256*1024
cond_emb = cond_emb.float()
else: # share condition embeding with content
if input.get('condition_embed_token', None) != None:
cond_emb = input['condition_embed_token'].float()
else:
cond_emb = None
if start_step == 0:
# use full mask sample
zero_logits = torch.zeros((batch_size, self.num_classes-1, self.shape),device=device)
one_logits = torch.ones((batch_size, 1, self.shape),device=device)
mask_logits = torch.cat((zero_logits, one_logits), dim=1)
log_z = torch.log(mask_logits)
start_step = self.num_timesteps
with torch.no_grad():
for diffusion_index in range(start_step-1, -1, -1):
t = torch.full((batch_size,), diffusion_index, device=device, dtype=torch.long)
sampled = [0] * log_z.shape[0]
while min(sampled) < self.n_sample[diffusion_index]:
log_z, sampled = self.p_sample(log_z, cond_emb, t, sampled, self.n_sample[diffusion_index]) # log_z is log_onehot
else:
t = torch.full((batch_size,), start_step-1, device=device, dtype=torch.long)
log_x_start = index_to_log_onehot(sample_image, self.num_classes)
log_xt = self.q_sample(log_x_start=log_x_start, t=t)
log_z = log_xt
with torch.no_grad():
for diffusion_index in range(start_step-1, -1, -1):
t = torch.full((batch_size,), diffusion_index, device=device, dtype=torch.long)
log_z = self.p_sample(log_z, cond_emb, t) # log_z is log_onehot
content_token = log_onehot_to_index(log_z)
output = {'content_token': content_token}
if return_logits:
output['logits'] = torch.exp(log_z)
return output
def sample_fast(
self,
condition_token,
condition_mask,
condition_embed,
content_token = None,
filter_ratio = 0.5,
temperature = 1.0,
return_att_weight = False,
return_logits = False,
content_logits = None,
print_log = True,
skip_step = 1,
**kwargs):
input = {'condition_token': condition_token,
'content_token': content_token,
'condition_mask': condition_mask,
'condition_embed_token': condition_embed,
'content_logits': content_logits,
}
batch_size = input['condition_token'].shape[0]
device = self.log_at.device
start_step = int(self.num_timesteps * filter_ratio)
# get cont_emb and cond_emb
if content_token != None:
sample_image = input['content_token'].type_as(input['content_token'])
if self.condition_emb is not None:
with torch.no_grad():
cond_emb = self.condition_emb(input['condition_token']) # B x Ld x D #256*1024
cond_emb = cond_emb.float()
else: # share condition embeding with content
cond_emb = input['condition_embed_token'].float()
assert start_step == 0
zero_logits = torch.zeros((batch_size, self.num_classes-1, self.shape),device=device)
one_logits = torch.ones((batch_size, 1, self.shape),device=device)
mask_logits = torch.cat((zero_logits, one_logits), dim=1)
log_z = torch.log(mask_logits)
start_step = self.num_timesteps
with torch.no_grad():
# skip_step = 1
diffusion_list = [index for index in range(start_step-1, -1, -1-skip_step)]
if diffusion_list[-1] != 0:
diffusion_list.append(0)
# for diffusion_index in range(start_step-1, -1, -1):
for diffusion_index in diffusion_list:
t = torch.full((batch_size,), diffusion_index, device=device, dtype=torch.long)
log_x_recon = self.cf_predict_start(log_z, cond_emb, t)
if diffusion_index > skip_step:
model_log_prob = self.q_posterior(log_x_start=log_x_recon, log_x_t=log_z, t=t-skip_step)
else:
model_log_prob = self.q_posterior(log_x_start=log_x_recon, log_x_t=log_z, t=t)
log_z = self.log_sample_categorical(model_log_prob)
content_token = log_onehot_to_index(log_z)
output = {'content_token': content_token}
if return_logits:
output['logits'] = torch.exp(log_z)
return output
| 29,919 | 42.678832 | 166 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/modeling/transformers/transformer_utils.py | # ------------------------------------------
# VQ-Diffusion
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# written By Shuyang Gu
# ------------------------------------------
import math
import torch
from torch import nn
import torch.nn.functional as F
from image_synthesis.utils.misc import instantiate_from_config
import numpy as np
from einops import rearrange
from image_synthesis.distributed.distributed import is_primary, get_rank
from inspect import isfunction
from torch.cuda.amp import autocast
from torch.utils.checkpoint import checkpoint
class FullAttention(nn.Module):
def __init__(self,
n_embd, # the embed dim
n_head, # the number of heads
seq_len=None, # the max length of sequence
attn_pdrop=0.1, # attention dropout prob
resid_pdrop=0.1, # residual attention dropout prob
causal=True,
):
super().__init__()
assert n_embd % n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(n_embd, n_embd)
self.query = nn.Linear(n_embd, n_embd)
self.value = nn.Linear(n_embd, n_embd)
# regularization
self.attn_drop = nn.Dropout(attn_pdrop)
self.resid_drop = nn.Dropout(resid_pdrop)
# output projection
self.proj = nn.Linear(n_embd, n_embd)
self.n_head = n_head
self.causal = causal
def forward(self, x, encoder_output, mask=None):
B, T, C = x.size()
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) # (B, nh, T, T)
att = F.softmax(att, dim=-1) # (B, nh, T, T)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side, (B, T, C)
att = att.mean(dim=1, keepdim=False) # (B, T, T)
# output projection
y = self.resid_drop(self.proj(y))
return y, att
class CrossAttention(nn.Module):
def __init__(self,
condition_seq_len,
n_embd, # the embed dim
condition_embd, # condition dim
n_head, # the number of heads
seq_len=None, # the max length of sequence
attn_pdrop=0.1, # attention dropout prob
resid_pdrop=0.1, # residual attention dropout prob
causal=True,
):
super().__init__()
assert n_embd % n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(condition_embd, n_embd)
self.query = nn.Linear(n_embd, n_embd)
self.value = nn.Linear(condition_embd, n_embd)
# regularization
self.attn_drop = nn.Dropout(attn_pdrop)
self.resid_drop = nn.Dropout(resid_pdrop)
# output projection
self.proj = nn.Linear(n_embd, n_embd)
self.n_head = n_head
self.causal = causal
# causal mask to ensure that attention is only applied to the left in the input sequence
if self.causal:
self.register_buffer("mask", torch.tril(torch.ones(seq_len, seq_len))
.view(1, 1, seq_len, seq_len))
def forward(self, x, encoder_output, mask=None):
B, T, C = x.size()
B, T_E, _ = encoder_output.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(encoder_output).view(B, T_E, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(encoder_output).view(B, T_E, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) # (B, nh, T, T)
att = F.softmax(att, dim=-1) # (B, nh, T, T)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side, (B, T, C)
att = att.mean(dim=1, keepdim=False) # (B, T, T)
# output projection
y = self.resid_drop(self.proj(y))
return y, att
class GELU2(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * F.sigmoid(1.702 * x)
class SinusoidalPosEmb(nn.Module):
def __init__(self, num_steps, dim, rescale_steps=4000):
super().__init__()
self.dim = dim
self.num_steps = float(num_steps)
self.rescale_steps = float(rescale_steps)
def forward(self, x):
x = x / self.num_steps * self.rescale_steps
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class AdaLayerNorm(nn.Module):
def __init__(self, n_embd, diffusion_step, emb_type="adalayernorm_abs"):
super().__init__()
if "abs" in emb_type:
self.emb = SinusoidalPosEmb(diffusion_step, n_embd)
else:
self.emb = nn.Embedding(diffusion_step, n_embd)
self.silu = nn.SiLU()
self.linear = nn.Linear(n_embd, n_embd*2)
self.layernorm = nn.LayerNorm(n_embd, elementwise_affine=False)
self.diff_step = diffusion_step
def forward(self, x, timestep):
if timestep[0] >= self.diff_step:
_emb = self.emb.weight.mean(dim=0, keepdim=True).repeat(len(timestep), 1)
emb = self.linear(self.silu(_emb)).unsqueeze(1)
else:
emb = self.linear(self.silu(self.emb(timestep))).unsqueeze(1)
scale, shift = torch.chunk(emb, 2, dim=2)
x = self.layernorm(x) * (1 + scale) + shift
return x
class AdaInsNorm(nn.Module):
def __init__(self, n_embd, diffusion_step, emb_type="adainsnorm_abs"):
super().__init__()
if "abs" in emb_type:
self.emb = SinusoidalPosEmb(diffusion_step, n_embd)
else:
self.emb = nn.Embedding(diffusion_step, n_embd)
self.silu = nn.SiLU()
self.linear = nn.Linear(n_embd, n_embd*2)
self.instancenorm = nn.InstanceNorm1d(n_embd)
def forward(self, x, timestep):
emb = self.linear(self.silu(self.emb(timestep))).unsqueeze(1)
scale, shift = torch.chunk(emb, 2, dim=2)
x = self.instancenorm(x.transpose(-1, -2)).transpose(-1,-2) * (1 + scale) + shift
return x
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self,
class_type='adalayernorm',
class_number=1000,
condition_seq_len=77,
n_embd=1024,
n_head=16,
seq_len=256,
attn_pdrop=0.1,
resid_pdrop=0.1,
mlp_hidden_times=4,
activate='GELU',
attn_type='full',
if_upsample=False,
upsample_type='bilinear',
upsample_pre_channel=0,
content_spatial_size=None, # H , W
conv_attn_kernel_size=None, # only need for dalle_conv attention
condition_dim=1024,
diffusion_step=100,
timestep_type='adalayernorm',
window_size = 8,
mlp_type = 'fc',
):
super().__init__()
self.if_upsample = if_upsample
self.attn_type = attn_type
if attn_type in ['selfcross', 'selfcondition', 'self']:
if 'adalayernorm' in timestep_type:
self.ln1 = AdaLayerNorm(n_embd, diffusion_step, timestep_type)
else:
print("timestep_type wrong")
else:
self.ln1 = nn.LayerNorm(n_embd)
self.ln2 = nn.LayerNorm(n_embd)
# self.if_selfcross = False
if attn_type in ['self', 'selfcondition']:
self.attn = FullAttention(
n_embd=n_embd,
n_head=n_head,
seq_len=seq_len,
attn_pdrop=attn_pdrop,
resid_pdrop=resid_pdrop,
)
if attn_type == 'selfcondition':
if 'adalayernorm' in class_type:
self.ln2 = AdaLayerNorm(n_embd, class_number, class_type)
else:
self.ln2 = AdaInsNorm(n_embd, class_number, class_type)
elif attn_type == 'selfcross':
self.attn1 = FullAttention(
n_embd=n_embd,
n_head=n_head,
seq_len=seq_len,
attn_pdrop=attn_pdrop,
resid_pdrop=resid_pdrop,
)
self.attn2 = CrossAttention(
condition_seq_len,
n_embd=n_embd,
condition_embd=condition_dim,
n_head=n_head,
seq_len=seq_len,
attn_pdrop=attn_pdrop,
resid_pdrop=resid_pdrop,
)
if 'adalayernorm' in timestep_type:
self.ln1_1 = AdaLayerNorm(n_embd, diffusion_step, timestep_type)
else:
print("timestep_type wrong")
else:
print("attn_type error")
assert activate in ['GELU', 'GELU2']
act = nn.GELU() if activate == 'GELU' else GELU2()
if mlp_type == 'conv_mlp':
self.mlp = Conv_MLP(n_embd, mlp_hidden_times, act, resid_pdrop)
else:
self.mlp = nn.Sequential(
nn.Linear(n_embd, mlp_hidden_times * n_embd),
act,
nn.Linear(mlp_hidden_times * n_embd, n_embd),
nn.Dropout(resid_pdrop),
)
def forward(self, x, encoder_output, timestep, mask=None):
if self.attn_type == "selfcross":
a, att = self.attn1(self.ln1(x, timestep), encoder_output, mask=mask)
x = x + a
a, att = self.attn2(self.ln1_1(x, timestep), encoder_output, mask=mask)
x = x + a
elif self.attn_type == "selfcondition":
a, att = self.attn(self.ln1(x, timestep), encoder_output, mask=mask)
x = x + a
x = x + self.mlp(self.ln2(x, encoder_output.long())) # only one really use encoder_output
return x, att
else: # 'self'
a, att = self.attn(self.ln1(x, timestep), encoder_output, mask=mask)
x = x + a
x = x + self.mlp(self.ln2(x))
return x, att
class Conv_MLP(nn.Module):
def __init__(self, n_embd, mlp_hidden_times, act, resid_pdrop):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=n_embd, out_channels=int(mlp_hidden_times * n_embd), kernel_size=3, stride=1, padding=1)
self.act = act
self.conv2 = nn.Conv2d(in_channels=int(mlp_hidden_times * n_embd), out_channels=n_embd, kernel_size=3, stride=1, padding=1)
self.dropout = nn.Dropout(resid_pdrop)
def forward(self, x):
n = x.size()[1]
x = rearrange(x, 'b (h w) c -> b c h w', h=int(math.sqrt(n)))
x = self.conv2(self.act(self.conv1(x)))
x = rearrange(x, 'b c h w -> b (h w) c')
return self.dropout(x)
class Text2ImageTransformer(nn.Module):
def __init__(
self,
condition_seq_len=77,
n_layer=14,
n_embd=1024,
n_head=16,
content_seq_len=1024,
attn_pdrop=0,
resid_pdrop=0,
mlp_hidden_times=4,
block_activate=None,
attn_type='selfcross',
content_spatial_size=[32,32], # H , W
condition_dim=512,
diffusion_step=1000,
timestep_type='adalayernorm',
content_emb_config=None,
mlp_type='fc',
checkpoint=False,
):
super().__init__()
self.use_checkpoint = checkpoint
self.content_emb = instantiate_from_config(content_emb_config)
# transformer
assert attn_type == 'selfcross'
all_attn_type = [attn_type] * n_layer
if content_spatial_size is None:
s = int(math.sqrt(content_seq_len))
assert s * s == content_seq_len
content_spatial_size = (s, s)
self.blocks = nn.Sequential(*[Block(
condition_seq_len,
n_embd=n_embd,
n_head=n_head,
seq_len=content_seq_len,
attn_pdrop=attn_pdrop,
resid_pdrop=resid_pdrop,
mlp_hidden_times=mlp_hidden_times,
activate=block_activate,
attn_type=all_attn_type[n],
content_spatial_size=content_spatial_size, # H , W
condition_dim = condition_dim,
diffusion_step = diffusion_step,
timestep_type = timestep_type,
mlp_type = mlp_type,
) for n in range(n_layer)])
# final prediction head
out_cls = self.content_emb.num_embed-1
self.to_logits = nn.Sequential(
nn.LayerNorm(n_embd),
nn.Linear(n_embd, out_cls),
)
self.condition_seq_len = condition_seq_len
self.content_seq_len = content_seq_len
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
if module.elementwise_affine == True:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def parameters(self, recurse=True, name=None):
"""
Following minGPT:
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# return super().parameters(recurse=True)
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
# separate out all parameters to those that will and won't experience regularizing weight decay
print("GPTLikeTransformer: get parameters by the overwrite method!")
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter as not decayed
module_name = ['condition_emb', 'content_emb']
pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb']
for mn in module_name:
if hasattr(self, mn) and getattr(self, mn) is not None:
for pn in pos_emb_name:
if hasattr(getattr(self, mn), pn):
if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter):
no_decay.add('{}.{}'.format(mn, pn))
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
return optim_groups
def forward(
self,
input,
cond_emb,
t):
cont_emb = self.content_emb(input)
emb = cont_emb
for block_idx in range(len(self.blocks)):
if self.use_checkpoint == False:
emb, att_weight = self.blocks[block_idx](emb, cond_emb, t.cuda()) # B x (Ld+Lt) x D, B x (Ld+Lt) x (Ld+Lt)
else:
emb, att_weight = checkpoint(self.blocks[block_idx], emb, cond_emb, t.cuda())
logits = self.to_logits(emb) # B x (Ld+Lt) x n
out = rearrange(logits, 'b l c -> b c l')
return out
class Condition2ImageTransformer(nn.Module):
def __init__(
self,
class_type='adalayernorm',
class_number=1000,
n_layer=24,
n_embd=1024,
n_head=16,
content_seq_len=1024,
attn_pdrop=0,
resid_pdrop=0,
mlp_hidden_times=4,
block_activate=None,
attn_type='selfcondition',
content_spatial_size=[32,32], # H , W
diffusion_step=100,
timestep_type='adalayernorm',
content_emb_config=None,
mlp_type="conv_mlp",
):
super().__init__()
self.content_emb = instantiate_from_config(content_emb_config)
# transformer
assert attn_type == 'selfcondition'
all_attn_type = [attn_type] * n_layer
if content_spatial_size is None:
s = int(math.sqrt(content_seq_len))
assert s * s == content_seq_len
content_spatial_size = (s, s)
self.blocks = nn.Sequential(*[Block(
class_type=class_type,
class_number=class_number,
n_embd=n_embd,
n_head=n_head,
seq_len=content_seq_len,
attn_pdrop=attn_pdrop,
resid_pdrop=resid_pdrop,
mlp_hidden_times=mlp_hidden_times,
activate=block_activate,
attn_type=all_attn_type[n],
content_spatial_size=content_spatial_size, # H , W
diffusion_step = diffusion_step,
timestep_type = timestep_type,
mlp_type = mlp_type,
) for n in range(n_layer)])
# final prediction head
out_cls = self.content_emb.num_embed-1
self.to_logits = nn.Sequential(
nn.LayerNorm(n_embd),
nn.Linear(n_embd, out_cls),
)
self.content_seq_len = content_seq_len
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
if module.elementwise_affine == True:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def parameters(self, recurse=True, name=None):
"""
Following minGPT:
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# return super().parameters(recurse=True)
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
# separate out all parameters to those that will and won't experience regularizing weight decay
print("GPTLikeTransformer: get parameters by the overwrite method!")
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter as not decayed
module_name = ['condition_emb', 'content_emb']
pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb']
for mn in module_name:
if hasattr(self, mn) and getattr(self, mn) is not None:
for pn in pos_emb_name:
if hasattr(getattr(self, mn), pn):
if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter):
no_decay.add('{}.{}'.format(mn, pn))
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
return optim_groups
def forward(
self,
input,
cond_emb,
t):
cont_emb = self.content_emb(input)
emb = cont_emb
for block_idx in range(len(self.blocks)):
emb, att_weight = self.blocks[block_idx](emb, cond_emb, t.cuda()) # B x (Ld+Lt) x D, B x (Ld+Lt) x (Ld+Lt)
logits = self.to_logits(emb) # B x (Ld+Lt) x n
out = rearrange(logits, 'b l c -> b c l')
return out
class UnCondition2ImageTransformer(nn.Module):
def __init__(
self,
class_type='adalayernorm',
n_layer=24,
n_embd=512,
n_head=16,
content_seq_len=256,
attn_pdrop=0,
resid_pdrop=0,
mlp_hidden_times=4,
block_activate=None,
attn_type='self',
content_spatial_size=[16,16], # H , W
diffusion_step=100,
timestep_type='adalayernorm',
content_emb_config=None,
mlp_type="conv_mlp",
):
super().__init__()
self.content_emb = instantiate_from_config(content_emb_config)
# transformer
assert attn_type == 'self'
all_attn_type = [attn_type] * n_layer
if content_spatial_size is None:
s = int(math.sqrt(content_seq_len))
assert s * s == content_seq_len
content_spatial_size = (s, s)
self.blocks = nn.Sequential(*[Block(
n_embd=n_embd,
n_head=n_head,
seq_len=content_seq_len,
attn_pdrop=attn_pdrop,
resid_pdrop=resid_pdrop,
mlp_hidden_times=mlp_hidden_times,
activate=block_activate,
attn_type=all_attn_type[n],
content_spatial_size=content_spatial_size, # H , W
diffusion_step = diffusion_step,
timestep_type = timestep_type,
mlp_type = mlp_type,
) for n in range(n_layer)])
# final prediction head
out_cls = self.content_emb.num_embed-1
self.to_logits = nn.Sequential(
nn.LayerNorm(n_embd),
nn.Linear(n_embd, out_cls),
)
self.content_seq_len = content_seq_len
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
if module.elementwise_affine == True:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def parameters(self, recurse=True, name=None):
"""
Following minGPT:
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# return super().parameters(recurse=True)
if name is None or name == 'none':
return super().parameters(recurse=recurse)
else:
# separate out all parameters to those that will and won't experience regularizing weight decay
print("GPTLikeTransformer: get parameters by the overwrite method!")
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter as not decayed
module_name = ['condition_emb', 'content_emb']
pos_emb_name = ['pos_emb', 'width_emb', 'height_emb', 'pad_emb', 'token_type_emb']
for mn in module_name:
if hasattr(self, mn) and getattr(self, mn) is not None:
for pn in pos_emb_name:
if hasattr(getattr(self, mn), pn):
if isinstance(getattr(getattr(self, mn), pn), torch.nn.Parameter):
no_decay.add('{}.{}'.format(mn, pn))
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.transformer.named_parameters()}# if p.requires_grad}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
return optim_groups
def forward(
self,
input,
cond_emb,
t):
cont_emb = self.content_emb(input)
emb = cont_emb
for block_idx in range(len(self.blocks)):
emb, att_weight = self.blocks[block_idx](emb, cond_emb, t.cuda()) # B x (Ld+Lt) x D, B x (Ld+Lt) x (Ld+Lt)
logits = self.to_logits(emb) # B x (Ld+Lt) x n
out = rearrange(logits, 'b l c -> b c l')
return out
| 30,407 | 41 | 131 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/util.py | import torch
import torch.nn as nn
def count_params(model):
total_params = sum(p.numel() for p in model.parameters())
return total_params
class ActNorm(nn.Module):
def __init__(self, num_features, logdet=False, affine=True,
allow_reverse_init=False):
assert affine
super().__init__()
self.logdet = logdet
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.allow_reverse_init = allow_reverse_init
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input, reverse=False):
if reverse:
return self.reverse(input)
if len(input.shape) == 2:
input = input[:,:,None,None]
squeeze = True
else:
squeeze = False
_, _, height, width = input.shape
if self.training and self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
h = self.scale * (input + self.loc)
if squeeze:
h = h.squeeze(-1).squeeze(-1)
if self.logdet:
log_abs = torch.log(torch.abs(self.scale))
logdet = height*width*torch.sum(log_abs)
logdet = logdet * torch.ones(input.shape[0]).to(input)
return h, logdet
return h
def reverse(self, output):
if self.training and self.initialized.item() == 0:
if not self.allow_reverse_init:
raise RuntimeError(
"Initializing ActNorm in reverse direction is "
"disabled by default. Use allow_reverse_init=True to enable."
)
else:
self.initialize(output)
self.initialized.fill_(1)
if len(output.shape) == 2:
output = output[:,:,None,None]
squeeze = True
else:
squeeze = False
h = output / self.scale - self.loc
if squeeze:
h = h.squeeze(-1).squeeze(-1)
return h
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class Labelator(AbstractEncoder):
"""Net2Net Interface for Class-Conditional Model"""
def __init__(self, n_classes, quantize_interface=True):
super().__init__()
self.n_classes = n_classes
self.quantize_interface = quantize_interface
def encode(self, c):
c = c[:,None]
if self.quantize_interface:
return c, None, [None, None, c.long()]
return c
class SOSProvider(AbstractEncoder):
# for unconditional training
def __init__(self, sos_token, quantize_interface=True):
super().__init__()
self.sos_token = sos_token
self.quantize_interface = quantize_interface
def encode(self, x):
# get batch size from data and replicate sos_token
c = torch.ones(x.shape[0], 1)*self.sos_token
c = c.long().to(x.device)
if self.quantize_interface:
return c, None, [None, None, c]
return c
| 3,847 | 28.374046 | 85 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/vqvae/quantize.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch import einsum
from einops import rearrange
class VectorQuantizer(nn.Module):
"""
see https://github.com/MishaLaskin/vqvae/blob/d761a999e2267766400dc646d82d3ac3657771d4/models/quantizer.py
____________________________________________
Discretization bottleneck part of the VQ-VAE.
Inputs:
- n_e : number of embeddings
- e_dim : dimension of embedding
- beta : commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
_____________________________________________
"""
# NOTE: this class contains a bug regarding beta; see VectorQuantizer2 for
# a fix and use legacy=False to apply that fix. VectorQuantizer2 can be
# used wherever VectorQuantizer has been used before and is additionally
# more efficient.
def __init__(self, n_e, e_dim, beta):
super(VectorQuantizer, self).__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
def forward(self, z):
"""
Inputs the output of the encoder network z and maps it to a discrete
one-hot vector that is the index of the closest embedding vector e_j
z (continuous) -> z_q (discrete)
z.shape = (batch, channel, height, width)
quantization pipeline:
1. get encoder input (B,C,H,W)
2. flatten input to (B*H*W,C)
"""
# reshape z -> (batch, height, width, channel) and flatten
z = z.permute(0, 2, 3, 1).contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight**2, dim=1) - 2 * \
torch.matmul(z_flattened, self.embedding.weight.t())
## could possible replace this here
# #\start...
# find closest encodings
min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
min_encodings = torch.zeros(
min_encoding_indices.shape[0], self.n_e).to(z)
min_encodings.scatter_(1, min_encoding_indices, 1)
# dtype min encodings: torch.float32
# min_encodings shape: torch.Size([2048, 512])
# min_encoding_indices.shape: torch.Size([2048, 1])
# get quantized latent vectors
z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
#.........\end
# with:
# .........\start
#min_encoding_indices = torch.argmin(d, dim=1)
#z_q = self.embedding(min_encoding_indices)
# ......\end......... (TODO)
# compute loss for embedding
loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# perplexity
e_mean = torch.mean(min_encodings, dim=0)
perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
# TODO: check for more easy handling with nn.Embedding
min_encodings = torch.zeros(indices.shape[0], self.n_e).to(indices)
min_encodings.scatter_(1, indices[:,None], 1)
# get quantized latent vectors
z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
class GumbelQuantize(nn.Module):
"""
credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!)
Gumbel Softmax trick quantizer
Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
https://arxiv.org/abs/1611.01144
"""
def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True,
kl_weight=5e-4, temp_init=1.0, use_vqinterface=True,
remap=None, unknown_index="random"):
super().__init__()
self.embedding_dim = embedding_dim
self.n_embed = n_embed
self.straight_through = straight_through
self.temperature = temp_init
self.kl_weight = kl_weight
self.proj = nn.Conv2d(num_hiddens, n_embed, 1)
self.embed = nn.Embedding(n_embed, embedding_dim)
self.use_vqinterface = use_vqinterface
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed+1
print(f"Remapping {self.n_embed} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_embed
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
match = (inds[:,:,None]==used[None,None,...]).long()
new = match.argmax(-1)
unknown = match.sum(2)<1
if self.unknown_index == "random":
new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds>=self.used.shape[0]] = 0 # simply set to zero
back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, return_logits=False):
# force hard = True when we are in eval mode, as we must quantize. actually, always true seems to work
hard = self.straight_through if self.training else True
temp = self.temperature if temp is None else temp
logits = self.proj(z)
if self.remap is not None:
# continue only with used logits
full_zeros = torch.zeros_like(logits)
logits = logits[:,self.used,...]
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=hard)
if self.remap is not None:
# go back to all entries but unused set to zero
full_zeros[:,self.used,...] = soft_one_hot
soft_one_hot = full_zeros
z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight)
# + kl divergence to the prior loss
qy = F.softmax(logits, dim=1)
diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.n_embed + 1e-10), dim=1).mean()
ind = soft_one_hot.argmax(dim=1)
if self.remap is not None:
ind = self.remap_to_used(ind)
if self.use_vqinterface:
if return_logits:
return z_q, diff, (None, None, ind), logits
return z_q, diff, (None, None, ind)
return z_q, diff, ind
def get_codebook_entry(self, indices, shape):
b, h, w, c = shape
assert b*h*w == indices.shape[0]
indices = rearrange(indices, '(b h w) -> b h w', b=b, h=h, w=w)
if self.remap is not None:
indices = self.unmap_to_all(indices)
one_hot = F.one_hot(indices, num_classes=self.n_embed).permute(0, 3, 1, 2).float()
z_q = einsum('b n h w, n d -> b d h w', one_hot, self.embed.weight)
return z_q
class VectorQuantizer2(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
"""
# NOTE: due to a bug the beta term was applied to the wrong term. for
# backwards compatibility we use the buggy version by default, but you can
# specify legacy=False to fix it.
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
sane_index_shape=False, legacy=True):
super().__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.legacy = legacy
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed+1
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_e
self.sane_index_shape = sane_index_shape
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
match = (inds[:,:,None]==used[None,None,...]).long()
new = match.argmax(-1)
unknown = match.sum(2)<1
if self.unknown_index == "random":
new[unknown]=torch.randint(0,self.re_embed,size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape)>1
inds = inds.reshape(ishape[0],-1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds>=self.used.shape[0]] = 0 # simply set to zero
back=torch.gather(used[None,:][inds.shape[0]*[0],:], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
assert temp is None or temp==1.0, "Only for interface compatible with Gumbel"
assert rescale_logits==False, "Only for interface compatible with Gumbel"
assert return_logits==False, "Only for interface compatible with Gumbel"
# reshape z -> (batch, height, width, channel) and flatten
z = rearrange(z, 'b c h w -> b h w c').contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight**2, dim=1) - 2 * \
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
min_encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
perplexity = None
min_encodings = None
# compute loss for embedding
if not self.legacy:
loss = self.beta * torch.mean((z_q.detach()-z)**2) + \
torch.mean((z_q - z.detach()) ** 2)
else:
loss = torch.mean((z_q.detach()-z)**2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
if self.remap is not None:
min_encoding_indices = min_encoding_indices.reshape(z.shape[0],-1) # add batch axis
min_encoding_indices = self.remap_to_used(min_encoding_indices)
min_encoding_indices = min_encoding_indices.reshape(-1,1) # flatten
if self.sane_index_shape:
min_encoding_indices = min_encoding_indices.reshape(
z_q.shape[0], z_q.shape[2], z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
indices = indices.reshape(shape[0],-1) # add batch axis
indices = self.unmap_to_all(indices)
indices = indices.reshape(-1) # flatten again
# get quantized latent vectors
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
| 13,259 | 39.181818 | 110 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/discriminator/model.py | import functools
import torch.nn as nn
from image_synthesis.taming.modules.util import ActNorm
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator as in Pix2Pix
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
"""
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if not use_actnorm:
norm_layer = nn.BatchNorm2d
else:
norm_layer = ActNorm
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.main = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.main(input)
| 2,566 | 36.75 | 116 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/misc/coord.py | import torch
class CoordStage(object):
def __init__(self, n_embed, down_factor):
self.n_embed = n_embed
self.down_factor = down_factor
def eval(self):
return self
def encode(self, c):
"""fake vqmodel interface"""
assert 0.0 <= c.min() and c.max() <= 1.0
b,ch,h,w = c.shape
assert ch == 1
c = torch.nn.functional.interpolate(c, scale_factor=1/self.down_factor,
mode="area")
c = c.clamp(0.0, 1.0)
c = self.n_embed*c
c_quant = c.round()
c_ind = c_quant.to(dtype=torch.long)
info = None, None, c_ind
return c_quant, None, info
def decode(self, c):
c = c/self.n_embed
c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor,
mode="nearest")
return c
| 904 | 27.28125 | 79 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/diffusionmodules/model.py | # pytorch_diffusion + derived encoder decoder
import math
import torch
import torch.nn as nn
import numpy as np
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0,1,0,0))
return emb
def nonlinearity(x):
# swish
return x*torch.sigmoid(x)
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=2,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout, temb_channels=512):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels,
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x+h
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b,c,h,w = q.shape
q = q.reshape(b,c,h*w)
q = q.permute(0,2,1) # b,hw,c
k = k.reshape(b,c,h*w) # b,c,hw
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b,c,h*w)
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b,c,h,w)
h_ = self.proj_out(h_)
return x+h_
class Model(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, use_timestep=True):
super().__init__()
self.ch = ch
self.temb_ch = self.ch*4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
skip_in = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
if i_block == self.num_res_blocks:
skip_in = ch*in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in+skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, t=None):
#assert x.shape[2] == x.shape[3] == self.resolution
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, **ignore_kwargs):
super().__init__()
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
#assert x.shape[2] == x.shape[3] == self.resolution, "{}, {}, {}".format(x.shape[2], x.shape[3], self.resolution)
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, **ignorekwargs):
super().__init__()
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class VUNet(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True,
in_channels, c_channels,
resolution, z_channels, use_timestep=False, **ignore_kwargs):
super().__init__()
self.ch = ch
self.temb_ch = self.ch*4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(c_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
self.z_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=1,
stride=1,
padding=0)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=2*block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = AttnBlock(block_in)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
skip_in = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
if i_block == self.num_res_blocks:
skip_in = ch*in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in+skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(AttnBlock(block_in))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, z):
#assert x.shape[2] == x.shape[3] == self.resolution
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
z = self.z_in(z)
h = torch.cat((h,z),dim=1)
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class SimpleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__()
self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
ResnetBlock(in_channels=in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=2 * in_channels,
out_channels=4 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=4 * in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
nn.Conv2d(2*in_channels, in_channels, 1),
Upsample(in_channels, with_conv=True)])
# end
self.norm_out = Normalize(in_channels)
self.conv_out = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
for i, layer in enumerate(self.model):
if i in [1,2,3]:
x = layer(x, None)
else:
x = layer(x)
h = self.norm_out(x)
h = nonlinearity(h)
x = self.conv_out(h)
return x
class UpsampleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
ch_mult=(2,2), dropout=0.0):
super().__init__()
# upsampling
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
block_in = in_channels
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.res_blocks = nn.ModuleList()
self.upsample_blocks = nn.ModuleList()
for i_level in range(self.num_resolutions):
res_block = []
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
res_block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
self.res_blocks.append(nn.ModuleList(res_block))
if i_level != self.num_resolutions - 1:
self.upsample_blocks.append(Upsample(block_in, True))
curr_res = curr_res * 2
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# upsampling
h = x
for k, i_level in enumerate(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.res_blocks[i_level][i_block](h, None)
if i_level != self.num_resolutions - 1:
h = self.upsample_blocks[k](h)
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
| 30,221 | 37.895753 | 121 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/transformer/mingpt.py | """
taken from: https://github.com/karpathy/minGPT/
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
mask = torch.tril(torch.ones(config.block_size,
config.block_size))
if hasattr(config, "n_unmasked"):
mask[:config.n_unmasked, :config.n_unmasked] = 1
self.register_buffer("mask", mask.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
present = torch.stack((k, v))
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
if layer_past is None:
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y, present # TODO: check that this does not break anything
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(), # nice
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, layer_past=None, return_present=False):
# TODO: check that training still works
if return_present: assert not self.training
# layer past: tuple of length two with B, nh, T, hs
attn, present = self.attn(self.ln1(x), layer_past=layer_past)
x = x + attn
x = x + self.mlp(self.ln2(x))
if layer_past is not None or return_present:
return x, present
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
def forward_with_past(self, idx, embeddings=None, targets=None, past=None, past_length=None):
# inference only
assert not self.training
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
if past is not None:
assert past_length is not None
past = torch.cat(past, dim=-2) # n_layer, 2, b, nh, len_past, dim_head
past_shape = list(past.shape)
expected_shape = [self.config.n_layer, 2, idx.shape[0], self.config.n_head, past_length, self.config.n_embd//self.config.n_head]
assert past_shape == expected_shape, f"{past_shape} =/= {expected_shape}"
position_embeddings = self.pos_emb[:, past_length, :] # each position maps to a (learnable) vector
else:
position_embeddings = self.pos_emb[:, :token_embeddings.shape[1], :]
x = self.drop(token_embeddings + position_embeddings)
presents = [] # accumulate over layers
for i, block in enumerate(self.blocks):
x, present = block(x, layer_past=past[i, ...] if past is not None else None, return_present=True)
presents.append(present)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss, torch.stack(presents) # _, _, n_layer, 2, b, nh, 1, dim_head
class DummyGPT(nn.Module):
# for debugging
def __init__(self, add_value=1):
super().__init__()
self.add_value = add_value
def forward(self, idx):
return idx + self.add_value, None
class CodeGPT(nn.Module):
"""Takes in semi-embeddings"""
def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Linear(in_channels, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.taming_cinln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
#### sampling utils
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in range(steps):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x
#### clustering utils
class KMeans(nn.Module):
def __init__(self, ncluster=512, nc=3, niter=10):
super().__init__()
self.ncluster = ncluster
self.nc = nc
self.niter = niter
self.shape = (3,32,32)
self.register_buffer("C", torch.zeros(self.ncluster,nc))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def is_initialized(self):
return self.initialized.item() == 1
@torch.no_grad()
def initialize(self, x):
N, D = x.shape
assert D == self.nc, D
c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random
for i in range(self.niter):
# assign all pixels to the closest codebook element
a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1)
# move each codebook element to be the mean of the pixels that assigned to it
c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)])
# re-assign any poorly positioned codebook elements
nanix = torch.any(torch.isnan(c), dim=1)
ndead = nanix.sum().item()
print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead))
c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters
self.C.copy_(c)
self.initialized.fill_(1)
def forward(self, x, reverse=False, shape=None):
if not reverse:
# flatten
bs,c,h,w = x.shape
assert c == self.nc
x = x.reshape(bs,c,h*w,1)
C = self.C.permute(1,0)
C = C.reshape(1,c,1,self.ncluster)
a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices
return a
else:
# flatten
bs, HW = x.shape
"""
c = self.C.reshape( 1, self.nc, 1, self.ncluster)
c = c[bs*[0],:,:,:]
c = c[:,:,HW*[0],:]
x = x.reshape(bs, 1, HW, 1)
x = x[:,3*[0],:,:]
x = torch.gather(c, dim=3, index=x)
"""
x = self.C[x]
x = x.permute(0,2,1)
shape = shape if shape is not None else self.shape
x = x.reshape(bs, *shape)
return x
| 15,743 | 40.10705 | 140 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/transformer/permuter.py | import torch
import torch.nn as nn
import numpy as np
class AbstractPermuter(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, reverse=False):
raise NotImplementedError
class Identity(AbstractPermuter):
def __init__(self):
super().__init__()
def forward(self, x, reverse=False):
return x
class Subsample(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
C = 1
indices = np.arange(H*W).reshape(C,H,W)
while min(H, W) > 1:
indices = indices.reshape(C,H//2,2,W//2,2)
indices = indices.transpose(0,2,4,1,3)
indices = indices.reshape(C*4,H//2, W//2)
H = H//2
W = W//2
C = C*4
assert H == W == 1
idx = torch.tensor(indices.ravel())
self.register_buffer('forward_shuffle_idx',
nn.Parameter(idx, requires_grad=False))
self.register_buffer('backward_shuffle_idx',
nn.Parameter(torch.argsort(idx), requires_grad=False))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
def mortonify(i, j):
"""(i,j) index to linear morton code"""
i = np.uint64(i)
j = np.uint64(j)
z = np.uint(0)
for pos in range(32):
z = (z |
((j & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos)) |
((i & (np.uint64(1) << np.uint64(pos))) << np.uint64(pos+1))
)
return z
class ZCurve(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
reverseidx = [np.int64(mortonify(i,j)) for i in range(H) for j in range(W)]
idx = np.argsort(reverseidx)
idx = torch.tensor(idx)
reverseidx = torch.tensor(reverseidx)
self.register_buffer('forward_shuffle_idx',
idx)
self.register_buffer('backward_shuffle_idx',
reverseidx)
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class SpiralOut(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
assert H == W
size = W
indices = np.arange(size*size).reshape(size,size)
i0 = size//2
j0 = size//2-1
i = i0
j = j0
idx = [indices[i0, j0]]
step_mult = 0
for c in range(1, size//2+1):
step_mult += 1
# steps left
for k in range(step_mult):
i = i - 1
j = j
idx.append(indices[i, j])
# step down
for k in range(step_mult):
i = i
j = j + 1
idx.append(indices[i, j])
step_mult += 1
if c < size//2:
# step right
for k in range(step_mult):
i = i + 1
j = j
idx.append(indices[i, j])
# step up
for k in range(step_mult):
i = i
j = j - 1
idx.append(indices[i, j])
else:
# end reached
for k in range(step_mult-1):
i = i + 1
idx.append(indices[i, j])
assert len(idx) == size*size
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class SpiralIn(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
assert H == W
size = W
indices = np.arange(size*size).reshape(size,size)
i0 = size//2
j0 = size//2-1
i = i0
j = j0
idx = [indices[i0, j0]]
step_mult = 0
for c in range(1, size//2+1):
step_mult += 1
# steps left
for k in range(step_mult):
i = i - 1
j = j
idx.append(indices[i, j])
# step down
for k in range(step_mult):
i = i
j = j + 1
idx.append(indices[i, j])
step_mult += 1
if c < size//2:
# step right
for k in range(step_mult):
i = i + 1
j = j
idx.append(indices[i, j])
# step up
for k in range(step_mult):
i = i
j = j - 1
idx.append(indices[i, j])
else:
# end reached
for k in range(step_mult-1):
i = i + 1
idx.append(indices[i, j])
assert len(idx) == size*size
idx = idx[::-1]
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class Random(nn.Module):
def __init__(self, H, W):
super().__init__()
indices = np.random.RandomState(1).permutation(H*W)
idx = torch.tensor(indices.ravel())
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
class AlternateParsing(AbstractPermuter):
def __init__(self, H, W):
super().__init__()
indices = np.arange(W*H).reshape(H,W)
for i in range(1, H, 2):
indices[i, :] = indices[i, ::-1]
idx = indices.flatten()
assert len(idx) == H*W
idx = torch.tensor(idx)
self.register_buffer('forward_shuffle_idx', idx)
self.register_buffer('backward_shuffle_idx', torch.argsort(idx))
def forward(self, x, reverse=False):
if not reverse:
return x[:, self.forward_shuffle_idx]
else:
return x[:, self.backward_shuffle_idx]
if __name__ == "__main__":
p0 = AlternateParsing(16, 16)
print(p0.forward_shuffle_idx)
print(p0.backward_shuffle_idx)
x = torch.randint(0, 768, size=(11, 256))
y = p0(x)
xre = p0(y, reverse=True)
assert torch.equal(x, xre)
p1 = SpiralOut(2, 2)
print(p1.forward_shuffle_idx)
print(p1.backward_shuffle_idx)
| 7,093 | 27.48996 | 83 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/losses/lpips.py | """Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
import torch
import torch.nn as nn
from torchvision import models
from collections import namedtuple
from image_synthesis.taming.util import get_ckpt_path
class LPIPS(nn.Module):
# Learned perceptual metric
def __init__(self, use_dropout=True):
super().__init__()
self.scaling_layer = ScalingLayer()
self.chns = [64, 128, 256, 512, 512] # vg16 features
self.net = vgg16(pretrained=True, requires_grad=False)
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.load_from_pretrained()
for param in self.parameters():
param.requires_grad = False
def load_from_pretrained(self, name="vgg_lpips"):
ckpt = get_ckpt_path(name, "taming/modules/autoencoder/lpips")
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
print("loaded pretrained LPIPS loss from {}".format(ckpt))
@classmethod
def from_pretrained(cls, name="vgg_lpips"):
model = cls()
ckpt = get_ckpt_path(name)
model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
return model
def forward(self, input, target):
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
outs0, outs1 = self.net(in0_input), self.net(in1_input)
feats0, feats1, diffs = {}, {}, {}
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
for kk in range(len(self.chns)):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
val = res[0]
for l in range(1, len(self.chns)):
val += res[l]
return val
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
""" A single linear layer which does a 1x1 conv """
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(), ] if (use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
self.model = nn.Sequential(*layers)
class vgg16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super(vgg16, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
def normalize_tensor(x,eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
return x/(norm_factor+eps)
def spatial_average(x, keepdim=True):
return x.mean([2,3],keepdim=keepdim)
| 4,778 | 38.172131 | 104 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/losses/segmentation.py | import torch.nn as nn
import torch.nn.functional as F
class BCELoss(nn.Module):
def forward(self, prediction, target):
loss = F.binary_cross_entropy_with_logits(prediction,target)
return loss, {}
class BCELossWithQuant(nn.Module):
def __init__(self, codebook_weight=1.):
super().__init__()
self.codebook_weight = codebook_weight
def forward(self, qloss, target, prediction, split):
bce_loss = F.binary_cross_entropy_with_logits(prediction,target)
loss = bce_loss + self.codebook_weight*qloss
return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/bce_loss".format(split): bce_loss.detach().mean(),
"{}/quant_loss".format(split): qloss.detach().mean()
}
| 816 | 34.521739 | 82 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/modules/losses/vqperceptual.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from image_synthesis.taming.modules.losses.lpips import LPIPS
from image_synthesis.taming.modules.discriminator.model import NLayerDiscriminator, weights_init
class DummyLoss(nn.Module):
def __init__(self):
super().__init__()
def adopt_weight(weight, global_step, threshold=0, value=0.):
if global_step < threshold:
weight = value
return weight
def hinge_d_loss(logits_real, logits_fake):
loss_real = torch.mean(F.relu(1. - logits_real))
loss_fake = torch.mean(F.relu(1. + logits_fake))
d_loss = 0.5 * (loss_real + loss_fake)
return d_loss
def vanilla_d_loss(logits_real, logits_fake):
d_loss = 0.5 * (
torch.mean(torch.nn.functional.softplus(-logits_real)) +
torch.mean(torch.nn.functional.softplus(logits_fake)))
return d_loss
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
disc_ndf=64, disc_loss="hinge"):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
ndf=disc_ndf
).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
global_step, last_layer=None, cond=None, split="train"):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
#nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
nll_loss = torch.mean(nll_loss)
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = -torch.mean(logits_fake)
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/quant_loss".format(split): codebook_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/p_loss".format(split): p_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
}
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean()
}
return d_loss, log
| 6,211 | 44.343066 | 113 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/models/vqgan.py | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.taming.modules.diffusionmodules.model import Encoder, Decoder
from image_synthesis.taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from image_synthesis.taming.modules.vqvae.quantize import GumbelQuantize
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
remap=remap, sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.image_key = image_key
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
quant, emb_loss, info = self.quantize(h)
return quant, emb_loss, info
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input):
quant, diff, _ = self.encode(input)
dec = self.decode(quant)
return dec, diff
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def training_step(self, batch, batch_idx, optimizer_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("train/aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("train/discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
rec_loss = log_dict_ae["val/rec_loss"]
self.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
xrec, _ = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["inputs"] = x
log["reconstructions"] = xrec
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class GumbelVQ(VQModel):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
temperature_scheduler_config,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
kl_weight=1e-8,
remap=None,
):
z_channels = ddconfig["z_channels"]
super().__init__(ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=ignore_keys,
image_key=image_key,
colorize_nlabels=colorize_nlabels,
monitor=monitor,
)
self.loss.n_classes = n_embed
self.vocab_size = n_embed
self.quantize = GumbelQuantize(z_channels, embed_dim,
n_embed=n_embed,
kl_weight=kl_weight, temp_init=1.0,
remap=remap)
self.temperature_scheduler = instantiate_from_config(temperature_scheduler_config) # annealing of temp
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
def temperature_scheduling(self):
self.quantize.temperature = self.temperature_scheduler(self.global_step)
def encode_to_prequant(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode_code(self, code_b):
raise NotImplementedError
def training_step(self, batch, batch_idx, optimizer_idx):
self.temperature_scheduling()
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
self.log("temperature", self.quantize.temperature, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss = self(x, return_pred_indices=True)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
rec_loss = log_dict_ae["val/rec_loss"]
self.log("val/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log("val/aeloss", aeloss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
# encode
h = self.encoder(x)
h = self.quant_conv(h)
quant, _, _ = self.quantize(h)
# decode
x_rec = self.decode(quant)
log["inputs"] = x
log["reconstructions"] = x_rec
return log
| 10,554 | 39.28626 | 120 | py |
VQ-Diffusion | VQ-Diffusion-main/image_synthesis/taming/models/cond_transformer.py | import os, math
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from image_synthesis.utils.misc import instantiate_from_config
from image_synthesis.taming.modules.util import SOSProvider
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class Net2NetTransformer(pl.LightningModule):
def __init__(self,
transformer_config,
first_stage_config,
cond_stage_config,
permuter_config=None,
ckpt_path=None,
ignore_keys=[],
first_stage_key="image",
cond_stage_key="depth",
downsample_cond_size=-1,
pkeep=1.0,
sos_token=0,
unconditional=False,
):
super().__init__()
self.be_unconditional = unconditional
self.sos_token = sos_token
self.first_stage_key = first_stage_key
self.cond_stage_key = cond_stage_key
self.init_first_stage_from_ckpt(first_stage_config)
self.init_cond_stage_from_ckpt(cond_stage_config)
if permuter_config is None:
permuter_config = {"target": "image_synthesis.taming.modules.transformer.permuter.Identity"}
self.permuter = instantiate_from_config(config=permuter_config)
self.transformer = instantiate_from_config(config=transformer_config)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.downsample_cond_size = downsample_cond_size
self.pkeep = pkeep
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
for k in sd.keys():
for ik in ignore_keys:
if k.startswith(ik):
self.print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def init_first_stage_from_ckpt(self, config):
model = instantiate_from_config(config)
model = model.eval()
model.train = disabled_train
self.first_stage_model = model
def init_cond_stage_from_ckpt(self, config):
if config == "__is_first_stage__":
print("Using first stage also as cond stage.")
self.cond_stage_model = self.first_stage_model
elif config == "__is_unconditional__" or self.be_unconditional:
print(f"Using no cond stage. Assuming the training is intended to be unconditional. "
f"Prepending {self.sos_token} as a sos token.")
self.be_unconditional = True
self.cond_stage_key = self.first_stage_key
self.cond_stage_model = SOSProvider(self.sos_token)
else:
model = instantiate_from_config(config)
model = model.eval()
model.train = disabled_train
self.cond_stage_model = model
def forward(self, x, c):
# one step to produce the logits
_, z_indices = self.encode_to_z(x)
_, c_indices = self.encode_to_c(c)
if self.training and self.pkeep < 1.0:
mask = torch.bernoulli(self.pkeep*torch.ones(z_indices.shape,
device=z_indices.device))
mask = mask.round().to(dtype=torch.int64)
r_indices = torch.randint_like(z_indices, self.transformer.config.vocab_size)
a_indices = mask*z_indices+(1-mask)*r_indices
else:
a_indices = z_indices
cz_indices = torch.cat((c_indices, a_indices), dim=1)
# target includes all sequence elements (no need to handle first one
# differently because we are conditioning)
target = z_indices
# make the prediction
logits, _ = self.transformer(cz_indices[:, :-1])
# cut off conditioning outputs - output i corresponds to p(z_i | z_{<i}, c)
logits = logits[:, c_indices.shape[1]-1:]
return logits, target
def top_k_logits(self, logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[..., [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(self, x, c, steps, temperature=1.0, sample=False, top_k=None,
callback=lambda k: None):
x = torch.cat((c,x),dim=1)
block_size = self.transformer.get_block_size()
assert not self.transformer.training
if self.pkeep <= 0.0:
# one pass suffices since input is pure noise anyway
assert len(x.shape)==2
noise_shape = (x.shape[0], steps-1)
#noise = torch.randint(self.transformer.config.vocab_size, noise_shape).to(x)
noise = c.clone()[:,x.shape[1]-c.shape[1]:-1]
x = torch.cat((x,noise),dim=1)
logits, _ = self.transformer(x)
# take all logits for now and scale by temp
logits = logits / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
shape = probs.shape
probs = probs.reshape(shape[0]*shape[1],shape[2])
ix = torch.multinomial(probs, num_samples=1)
probs = probs.reshape(shape[0],shape[1],shape[2])
ix = ix.reshape(shape[0],shape[1])
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# cut off conditioning
x = ix[:, c.shape[1]-1:]
else:
for k in range(steps):
callback(k)
assert x.size(1) <= block_size # make sure model can see conditioning
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = self.transformer(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
# cut off conditioning
x = x[:, c.shape[1]:]
return x
@torch.no_grad()
def encode_to_z(self, x):
quant_z, _, info = self.first_stage_model.encode(x)
indices = info[2].view(quant_z.shape[0], -1)
indices = self.permuter(indices)
return quant_z, indices
@torch.no_grad()
def encode_to_c(self, c):
if self.downsample_cond_size > -1:
c = F.interpolate(c, size=(self.downsample_cond_size, self.downsample_cond_size))
quant_c, _, [_,_,indices] = self.cond_stage_model.encode(c)
if len(indices.shape) > 2:
indices = indices.view(c.shape[0], -1)
return quant_c, indices
@torch.no_grad()
def decode_to_img(self, index, zshape):
index = self.permuter(index, reverse=True)
bhwc = (zshape[0],zshape[2],zshape[3],zshape[1])
quant_z = self.first_stage_model.quantize.get_codebook_entry(
index.reshape(-1), shape=bhwc)
x = self.first_stage_model.decode(quant_z)
return x
@torch.no_grad()
def log_images(self, batch, temperature=None, top_k=None, callback=None, lr_interface=False, **kwargs):
log = dict()
N = 4
if lr_interface:
x, c = self.get_xc(batch, N, diffuse=False, upsample_factor=8)
else:
x, c = self.get_xc(batch, N)
x = x.to(device=self.device)
c = c.to(device=self.device)
quant_z, z_indices = self.encode_to_z(x)
quant_c, c_indices = self.encode_to_c(c)
# create a "half"" sample
z_start_indices = z_indices[:,:z_indices.shape[1]//2]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1]-z_start_indices.shape[1],
temperature=temperature if temperature is not None else 1.0,
sample=True,
top_k=top_k if top_k is not None else 100,
callback=callback if callback is not None else lambda k: None)
x_sample = self.decode_to_img(index_sample, quant_z.shape)
# sample
z_start_indices = z_indices[:, :0]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1],
temperature=temperature if temperature is not None else 1.0,
sample=True,
top_k=top_k if top_k is not None else 100,
callback=callback if callback is not None else lambda k: None)
x_sample_nopix = self.decode_to_img(index_sample, quant_z.shape)
# det sample
z_start_indices = z_indices[:, :0]
index_sample = self.sample(z_start_indices, c_indices,
steps=z_indices.shape[1],
sample=False,
callback=callback if callback is not None else lambda k: None)
x_sample_det = self.decode_to_img(index_sample, quant_z.shape)
# reconstruction
x_rec = self.decode_to_img(z_indices, quant_z.shape)
log["inputs"] = x
log["reconstructions"] = x_rec
if self.cond_stage_key != "image":
cond_rec = self.cond_stage_model.decode(quant_c)
if self.cond_stage_key == "segmentation":
# get image from segmentation mask
num_classes = cond_rec.shape[1]
c = torch.argmax(c, dim=1, keepdim=True)
c = F.one_hot(c, num_classes=num_classes)
c = c.squeeze(1).permute(0, 3, 1, 2).float()
c = self.cond_stage_model.to_rgb(c)
cond_rec = torch.argmax(cond_rec, dim=1, keepdim=True)
cond_rec = F.one_hot(cond_rec, num_classes=num_classes)
cond_rec = cond_rec.squeeze(1).permute(0, 3, 1, 2).float()
cond_rec = self.cond_stage_model.to_rgb(cond_rec)
log["conditioning_rec"] = cond_rec
log["conditioning"] = c
log["samples_half"] = x_sample
log["samples_nopix"] = x_sample_nopix
log["samples_det"] = x_sample_det
return log
def get_input(self, key, batch):
x = batch[key]
if len(x.shape) == 3:
x = x[..., None]
if len(x.shape) == 4:
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
if x.dtype == torch.double:
x = x.float()
return x
def get_xc(self, batch, N=None):
x = self.get_input(self.first_stage_key, batch)
c = self.get_input(self.cond_stage_key, batch)
if N is not None:
x = x[:N]
c = c[:N]
return x, c
def shared_step(self, batch, batch_idx):
x, c = self.get_xc(batch)
logits, target = self(x, c)
loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
return loss
def training_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
self.log("train/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self.shared_step(batch, batch_idx)
self.log("val/loss", loss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
return loss
def configure_optimizers(self):
"""
Following minGPT:
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.transformer.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.transformer.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=(0.9, 0.95))
return optimizer
| 15,049 | 42.75 | 127 | py |
Reflect | Reflect-master/distill/distill_util.py | import tensorflow as tf
from tf2_models.metrics import distill_loss, sequence_distill_loss
@tf.function(experimental_relax_shapes=True)
def get_topk_mask(inputs, k):
inputs_shape = tf.shape(inputs)
inputs_shape = tf.cast(inputs_shape, dtype=tf.int64)
values, indices = tf.nn.top_k(inputs, k=k, sorted=False)
indices = tf.cast(indices, dtype=tf.int64)
k = tf.cast(k, dtype=tf.int64)
temp_indices = tf.meshgrid(*[tf.range(d, dtype=tf.int64) for d in (tf.unstack(
inputs_shape[:(inputs.get_shape().ndims - 1)]) + [k])], indexing='ij')
temp_indices = tf.stack(temp_indices[:-1] + [indices], axis=-1)
full_indices = tf.reshape(temp_indices, [-1, inputs.get_shape().ndims])
values = tf.reshape(values, [-1])
mask_vals = tf.ones_like(values, dtype=tf.int64)
full_indices = tf.cast(
full_indices, dtype=tf.int64)
mask_st = tf.SparseTensor(indices=full_indices, values=mask_vals, dense_shape=inputs_shape)
mask = tf.sparse.to_dense(tf.sparse.reorder(mask_st))
return mask
@tf.function(experimental_relax_shapes=True)
def get_topk_masked_probs(logits, labels, temperature, k=100, padding_symbol=0):
topk_mask = (1 - tf.cast(get_topk_mask(logits, k), dtype=tf.float32)) * -10e8
teacher_probs = tf.nn.softmax((logits + topk_mask) / temperature, axis=-1)
sequence_mask = tf.cast(labels != padding_symbol, dtype=tf.float32)
masked_teacher_probs = teacher_probs * sequence_mask[..., None] + tf.eye(tf.shape(teacher_probs)[-1])[0] * (
1 - sequence_mask[..., None])
return masked_teacher_probs
@tf.function(experimental_relax_shapes=True)
def get_masked_probs(logits, labels, temperature, padding_symbol=0):
teacher_probs = tf.nn.softmax(logits / temperature, axis=-1)
sequence_mask = tf.cast(labels != padding_symbol, dtype=tf.float32)
masked_teacher_probs = teacher_probs * sequence_mask[..., None] + tf.eye(tf.shape(teacher_probs)[-1])[0] * (
1 - sequence_mask[..., None])
return masked_teacher_probs
@tf.function(experimental_relax_shapes=True)
def get_probs(logits, labels, temperature):
teacher_probs = tf.nn.softmax(logits / temperature, axis=-1)
return teacher_probs
class DistillLoss(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0, tmp=1.0,
**kwargs):
super(DistillLoss, self).__init__(**kwargs)
self.tmp = tf.Variable(tmp, dtype=tf.float32, name="temp")
self.padding_symbol = tf.Variable(padding_symbol, dtype=tf.int64, name="padding_symbol")
def call(self, y_true, y_pred):
return distill_loss(y_true, y_pred, self.tmp)
class SequenceDistillLoss(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0, tmp=1.0,
**kwargs):
super(SequenceDistillLoss, self).__init__(**kwargs)
self.tmp = tf.Variable(tmp, dtype=tf.float32, name="tmp")
self.padding_symbol = tf.Variable(padding_symbol, dtype=tf.int64, name="padding_symbol")
def call(self, y_true, y_pred):
return sequence_distill_loss(y_true, y_pred, self.padding_symbol, self.tmp)
def get_distill_scheduler(schedule, min=0.0, max=1.0, decay_steps=10000):
if schedule is "exp":
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
max,
decay_steps=1000,
decay_rate=0.96,
staircase=True)
elif schedule is 'crs':
scheduler = tf.keras.experimental.CosineDecayRestarts(
max,
decay_steps,
t_mul=2.0,
m_mul=0.9,
alpha=0.001,
)
elif schedule is 'lnr':
a = (max - min) / decay_steps
scheduler = lambda x: max - a*x
elif schedule is 'stp':
scheduler = lambda x: max if x < decay_steps else min
else:
scheduler = lambda x: max
return scheduler | 3,653 | 35.54 | 110 | py |
Reflect | Reflect-master/distill/distiller.py | import tensorflow as tf
import os
from distill.distill_util import get_distill_scheduler
from tf2_models.train_utils import ExponentialDecayWithWarmpUp
from tf2_models.trainer import OPTIMIZER_DIC
import numpy as np
class Distiller(object):
''' Pipeline for offline distillation.
'''
def __init__(self, hparams, distill_params, teacher_model, student_model, task,
teacher_log_dir, student_log_dir, teacher_ckpt_dir, student_ckpt_dir):
self.teacher_model = teacher_model
self.student_model = student_model
self.task = task
self.distill_params = distill_params
self.temperature = tf.convert_to_tensor(distill_params.distill_temp)
self.distill_loss = self.task.get_distill_loss_fn(self.distill_params)
self.task_loss = self.task.get_loss_fn()
self.metrics = self.task.metrics()
self.task_probs_fn = self.task.get_probs_fn()
self.hparams = hparams
self.create_student_optimizer()
self.setup_ckp_and_summary(student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir)
self.setup_models(distill_params, task)
self.distillrate_scheduler = get_distill_scheduler(distill_params.distill_schedule,
min=distill_params.distill_min_rate,
max=distill_params.student_distill_rate)
def create_student_optimizer(self):
student_initial_learning_rate = self.distill_params.student_learning_rate
if 'crs' in self.distill_params.schedule:
lr_schedule = (
tf.keras.experimental.CosineDecayRestarts(
student_initial_learning_rate,
first_decay_steps=self.distill_params.student_decay_steps,
t_mul=5.0, #0.2
m_mul=self.distill_params.student_decay_rate,
alpha=0.001,
))
else:
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=student_initial_learning_rate,
decay_steps=self.distill_params.student_decay_steps,
decay_rate=self.distill_params.student_decay_rate,
warmup_steps=self.distill_params.student_warmup_steps,
hold_base_rate_steps=self.distill_params.student_hold_base_rate_steps)
self.student_optimizer = OPTIMIZER_DIC[self.distill_params.student_optimizer](
learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0)
def setup_ckp_and_summary(self, student_ckpt_dir, student_log_dir, teacher_ckpt_dir, teacher_log_dir):
# Init checkpoints
self.teacher_ckpt = tf.train.Checkpoint(net=self.teacher_model)
self.teacher_manager = tf.train.CheckpointManager(self.teacher_ckpt, teacher_ckpt_dir, max_to_keep=self.hparams.max_checkpoints)
self.student_ckpt = tf.train.Checkpoint(step=tf.Variable(1),
optimizer=self.student_optimizer,
net=self.student_model)
self.student_manager = tf.train.CheckpointManager(self.student_ckpt, student_ckpt_dir,
keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours,
max_to_keep=self.hparams.max_checkpoints)
# Init summary
student_summary_dir = os.path.join(student_log_dir, 'summaries')
tf.io.gfile.makedirs(student_log_dir)
self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(student_summary_dir, 'train'))
tf.compat.v2.summary.experimental.set_step(self.student_optimizer.iterations)
def setup_models(self, distill_params, task):
x, y = iter(self.task.valid_dataset).next()
self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
self.student_model.summary()
self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
self.teacher_model.summary()
self.student_model.compile(
optimizer=self.student_optimizer,
loss=self.task_loss,
metrics=[self.metrics])
self.teacher_model.compile(
loss=self.task_loss,
metrics=[self.metrics])
def restore_teacher(self):
''' Restore the teacher model from its checkpoint.
'''
self.teacher_ckpt.restore(self.teacher_manager.latest_checkpoint)
if self.teacher_manager.latest_checkpoint:
print("Restored teacher from {}".format(self.teacher_manager.latest_checkpoint))
else:
print("Initializing teacher from scratch.")
def restore_student(self):
''' Restore the student model from its checkpoint.
'''
self.student_ckpt.restore(self.student_manager.latest_checkpoint)
if self.student_manager.latest_checkpoint:
print("Restored student from {}".format(self.student_manager.latest_checkpoint))
else:
print("Initializing student from scratch.")
def save_student(self):
self.student_ckpt.step.assign_add(1)
save_path = self.student_manager.save()
tf.print("Saved student checkpoint", save_path)
def distill_loop(self):
''' Offline Distillation main loop.
'''
# logging.info('Distribute strategy: mirrored.')
# strategy = tf.distribute.MirroredStrategy()
# train_dataset = strategy.experimental_distribute_dataset(self.task.train_dataset)
# valid_dataset = strategy.experimental_distribute_dataset(self.task.valid_dataset)
@tf.function(experimental_relax_shapes=True)
def student_train_step(x, teacher_y, y_true):
''' Training step for the student model (this is the only training step for offline distillation).
:param x: input
:param y: output of the teacher model, used to compute distill loss
:param y_true: actual outputs, used to compute actual loss
:return:
distill_loss
actual_loss
'''
student_distill_rate = self.distillrate_scheduler(self.student_optimizer.iterations)
student_gold_rate = 1 - student_distill_rate
with tf.GradientTape() as tape:
logits = self.student_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
distill_loss = self.distill_loss(y_pred=logits, y_true=teacher_y)
reg_loss = tf.math.add_n(self.student_model.losses)
actual_loss = self.task_loss(y_pred=logits, y_true=y_true)
final_loss = student_distill_rate * distill_loss + \
student_gold_rate * actual_loss + reg_loss
grads = tape.gradient(final_loss, self.student_model.trainable_weights)
self.student_model.optimizer.apply_gradients(zip(grads, self.student_model.trainable_weights),
name="student_optimizer")
return distill_loss, actual_loss, student_distill_rate
@tf.function
def epoch_loop():
step = 0
for x,y in self.task.train_dataset:
teacher_logits = self.teacher_model(x, padding_symbol=self.task.input_padding_symbol, training=True)
teacher_probs = self.task_probs_fn(logits=teacher_logits, labels=y, temperature=self.temperature)
distill_loss, actual_loss, student_distill_rate = student_train_step(x=x, teacher_y=teacher_probs, y_true=y)
# Log every 200 batches.
if step % 200 == 0:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('student_learning_rate',
self.student_model.optimizer.learning_rate(self.student_model.optimizer.iterations),
)
tf.summary.scalar('fine_distill_loss',
distill_loss)
tf.summary.scalar('student_distill_rate',
student_distill_rate)
step += 1
# Stop at the end of the epoch
if (step % self.task.n_train_batches) == 0:
with tf.summary.experimental.summary_scope("student_train"):
tf.summary.scalar('distill_loss', distill_loss)
tf.summary.scalar('actual_loss', actual_loss)
break
@tf.function
def summarize(teacher_eval_results, student_eval_results):
with tf.summary.experimental.summary_scope("eval_teacher"):
for i, m_name in enumerate(self.teacher_model.metrics_names):
tf.summary.scalar(m_name, teacher_eval_results[i])
with tf.summary.experimental.summary_scope("eval_student"):
for i, m_name in enumerate(self.student_model.metrics_names):
tf.summary.scalar(m_name, student_eval_results[i])
with self.summary_writer.as_default():
for epoch in np.arange(self.distill_params.n_epochs):
epoch_loop()
# Evaluate Teacher
teacher_eval_results = self.teacher_model.evaluate(self.task.valid_dataset,
steps=self.task.n_valid_batches)
# Evaluate Student
student_eval_results = self.student_model.evaluate(self.task.valid_dataset,
steps=self.task.n_valid_batches)
summarize(teacher_eval_results, student_eval_results)
pow2 = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
if self.hparams.keep_some_checkpoints:
if (epoch in pow2) or (epoch == (self.distill_params.n_epochs - 1)):
self.save_student()
else:
self.save_student()
| 9,284 | 44.292683 | 132 | py |
Reflect | Reflect-master/tf2_models/embedding.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
class SharedEmbeddings(tf.keras.layers.Layer):
"""Construct shared token embeddings.
"""
def __init__(self, vocab_size, hidden_size, initializer_range=None, regularizer=None, **kwargs):
super(SharedEmbeddings, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
self.regularizer = regularizer
def build(self, input_shape):
"""Build shared word embedding layer
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
regularizer=self.regularizer)
super(SharedEmbeddings, self).build(input_shape)
def call(self, inputs, mode="embedding"):
"""Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: (1) If mode == "embedding", output embedding tensor, float32 with
shape [batch_size, length, embedding_size]; (2) mode == "linear", output
linear tensor, float32 with shape [batch_size, length, vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size]) | 2,633 | 38.313433 | 137 | py |
Reflect | Reflect-master/tf2_models/lm_transformer.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
from tf2_models.embedding import SharedEmbeddings
from tf2_models.transformer_layers import Block
from tf2_models.transformers import *
class LmGPT2(tf.keras.Model):
def __init__(self, hparams, scope='lm_gpt2', *inputs, **kwargs):
del kwargs['cl_token']
super(LmGPT2, self).__init__(hparams, *inputs, **kwargs)
self.scope = scope
self.rep_index = 1
self.rep_layer = None
self.model_name = '_'.join([self.scope,
'h-'+str(hparams.embedding_dim),
'd-'+str(hparams.depth),
'rdrop-'+str(hparams.resid_pdrop),
'adrop-' + str(hparams.attn_pdrop),
'indrop-'+str(hparams.embd_pdrop)])
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.transformer = GPT2(hparams, name='transformer')
def call(self, inputs, padding_symbol=None, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
#outputs = (lm_logits,) + transformer_outputs[1:]
return lm_logits # lm_logits, presents, (all hidden_states), (attentions)
def detailed_call(self, inputs, padding_symbol=None, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs
return outputs # lm_logits, presents, (all hidden_states), (attentions)
class LmGPT2SharedWeights(LmGPT2):
def __init__(self, hparams, scope='lm_gpt2_shared_weights', *inputs, **kwargs):
super(LmGPT2SharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs)
@tf.function
def create_vars(self, hparams):
self.transformer = GPT2SharedWeights(hparams, name='shared_transformer')
def call(self, inputs, padding_symbol=None, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.wte(hidden_states, mode="linear")
#outputs = (lm_logits,) + transformer_outputs[1:]
return lm_logits # lm_logits, presents, (all hidden_states), (attentions)
class ClassifierGPT2(tf.keras.Model):
def __init__(self, hparams, scope='cl_gpt2',*inputs, **kwargs):
self.cl_token = kwargs['cl_token']
del kwargs['cl_token']
super(ClassifierGPT2, self).__init__(hparams, *inputs, **kwargs)
self.rep_index = 2
self.rep_layer = None
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-'+str(hparams.embedding_dim),
'd-'+str(hparams.depth),
'rdrop-'+str(hparams.resid_pdrop),
'adrop-' + str(hparams.attn_pdrop),
'indrop-'+str(hparams.embd_pdrop)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(**kwargs)
#@tf.function
def create_vars(self,**kwargs):
self.transformer = GPT2(self.hparams, name='transformer',
**kwargs)
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
def call(self, inputs, padding_symbol=None, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
mask = tf.cast(inputs != 0, dtype=tf.int32)
inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1
batch_indices = tf.range(batch_size)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
hidden_states = tf.gather_nd(transformer_outputs[0], indices)
cl_logits = self.e2c(hidden_states)
return cl_logits
# Add CL token:
batch_size = tf.shape(inputs)[0]
#cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
#cl_tokens = tf.tile(cl_token, (batch_size, 1))
#inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits = _call(batch_size, inputs, transformer_outputs)
return cl_logits
def detailed_call(self, inputs, padding_symbol=None, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
mask = tf.cast(inputs != 0, dtype=tf.int32)
inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1
batch_indices = tf.range(batch_size)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
hidden_states = tf.gather_nd(transformer_outputs[0], indices)
cl_logits = self.e2c(hidden_states)
return cl_logits, hidden_states
# Add CL token:
batch_size = tf.shape(inputs)[0]
#cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
#cl_tokens = tf.tile(cl_token, (batch_size, 1))
#inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits, hidden_states = _call(batch_size, inputs, transformer_outputs)
outputs = (cl_logits, hidden_states) + transformer_outputs
return outputs
class ClassifierGPT2SharedWeights(ClassifierGPT2):
def __init__(self, hparams, scope='cl_gpt2_shared_weights', *inputs, **kwargs):
super(ClassifierGPT2SharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs)
@tf.function
def create_vars(self):
self.transformer = GPT2SharedWeights(self.hparams, name='shared_transformer')
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
class ClassifierBERT(tf.keras.Model):
def __init__(self, hparams, scope='cl_bert',*inputs, **kwargs):
self.cl_token = kwargs['cl_token']
del kwargs['cl_token']
super(ClassifierBERT, self).__init__(hparams, *inputs, **kwargs)
self.scope = scope
self.hparams = hparams
self.rep_index = 2
self.rep_layer = None
self.model_name = '_'.join([self.scope,
'h-'+str(hparams.embedding_dim),
'd-'+str(hparams.depth),
'rdrop-'+str(hparams.resid_pdrop),
'adrop-' + str(hparams.attn_pdrop),
'indrop-'+str(hparams.embd_pdrop)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(**kwargs)
#@tf.function
def create_vars(self,**kwargs):
self.transformer = Bert(self.hparams, name='transformer',
**kwargs)
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
def call(self, inputs, padding_symbol=None, add_cls=True, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
#mask = tf.cast(inputs != 0, dtype=tf.int32)
#inputs_lengths = tf.reduce_sum(mask, axis=-1) - 1
#batch_indices = tf.range(batch_size)
#indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
hidden_states = transformer_outputs[0][:,0]#tf.gather_nd(transformer_outputs[0], indices)
cl_logits = self.e2c(hidden_states, **kwargs)
return cl_logits
# Add CL token:
batch_size = tf.shape(inputs)[0]
if add_cls:
cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
cl_tokens = tf.tile(cl_token, (batch_size, 1))
inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits = _call(batch_size, inputs, transformer_outputs)
return cl_logits
def detailed_call(self, inputs, padding_symbol=None, add_cls=True, **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(batch_size, inputs, transformer_outputs):
hidden_states = transformer_outputs[0][:, 0]
cl_logits = self.e2c(hidden_states)
return cl_logits, hidden_states
# Add CL token:
batch_size = tf.shape(inputs)[0]
if add_cls:
cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1,1))
cl_tokens = tf.tile(cl_token, (batch_size, 1))
inputs = tf.concat([cl_tokens, inputs], axis=-1)
transformer_outputs = self.transformer(inputs, **kwargs)
cl_logits, hidden_states = _call(batch_size, inputs, transformer_outputs)
reps_start_index = 1 if add_cls else 0
outputs = (cl_logits, hidden_states, transformer_outputs[0][:,reps_start_index:,:]) + transformer_outputs
return outputs
def get_input_embeddings(self, inputs, add_cls=True, **kwargs):
# Add CL token:
batch_size = tf.shape(inputs)[0]
if add_cls:
cl_token = tf.reshape(tf.convert_to_tensor(self.cl_token[0], dtype=tf.int64)[None], (-1, 1))
cl_tokens = tf.tile(cl_token, (batch_size, 1))
inputs = tf.concat([cl_tokens, inputs], axis=-1)
outputs = self.transformer.get_input_embeddings(inputs, **kwargs)
return outputs
def call_with_embeddings(self, input_embeddings, input_shape, padding_mask, past , **kwargs):
transformer_outputs = self.transformer.call_with_embeddings(input_embeddings=input_embeddings,
input_shape=input_shape, padding_mask=padding_mask,
past=past, **kwargs)
hidden_states = transformer_outputs[0][:, 0]
cl_logits = self.e2c(hidden_states)
return cl_logits, hidden_states
class ClassifierBERTSharedWeights(ClassifierBERT):
def __init__(self, hparams, scope='cl_bert_shared', *inputs, **kwargs):
super(ClassifierBERTSharedWeights, self).__init__(hparams, scope=scope, *inputs, **kwargs)
# @tf.function
def create_vars(self, **kwargs):
self.transformer = BertSharedWeights(self.hparams, name='transformer',
**kwargs)
self.e2c = tf.keras.layers.Dense(units=self.hparams.num_labels,
kernel_initializer=get_initializer(self.hparams.initializer_range),
name='e2c')
| 10,814 | 39.965909 | 109 | py |
Reflect | Reflect-master/tf2_models/ff.py | import tensorflow as tf
import numpy as np
class VanillaFF(tf.keras.models.Sequential):
def __init__(self, hparams, scope="cl_vff", *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(VanillaFF, self).__init__()
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-' + '.'.join([str(x) for x in self.hparams.hidden_dim]),
'd-' + str(self.hparams.depth),
'hdrop-' + str(self.hparams.hidden_dropout_rate),
'indrop-' + str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00001,
l2=0.00001)
self.create_vars()
self.rep_index = 1
self.rep_layer = -1
def create_vars(self):
self.flat = tf.keras.layers.Flatten()
# self.batch_norm = tf.keras.layers.BatchNormalization()
# self.batch_norm.trainable = True
self.indrop = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.activation = tf.keras.layers.Activation('relu')
self.hidden_layers = []
self.hidden_batch_norms = []
self.hidden_dropouts = []
for i in np.arange(self.hparams.depth):
self.hidden_layers.append(tf.keras.layers.Dense(self.hparams.hidden_dim[i],
activation=None, #'relu',
kernel_regularizer=self.regularizer))
self.hidden_batch_norms.append(tf.keras.layers.BatchNormalization())
self.hidden_batch_norms[i].trainable = True
self.hidden_dropouts.append(tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate))
self.final_dense = tf.keras.layers.Dense(self.hparams.output_dim,
kernel_regularizer=self.regularizer)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = self.flat(inputs, **kwargs)
# x = self.batch_norm(x, training=training, **kwargs)
x = self.indrop(x, training=training, **kwargs)
for i in np.arange(self.hparams.depth):
x = self.hidden_layers[i](x, training=training, **kwargs)
x = self.activation(x)
x = self.hidden_batch_norms[i](x, training=training, **kwargs)
x = self.hidden_dropouts[i](x, training=training, **kwargs)
logits = self.final_dense(x, training=training, **kwargs)
return logits
def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs):
layer_activations = []
x = self.flat(inputs, **kwargs)
x = self.indrop(x, training=None, **kwargs)
layer_activations.append(x)
for i in np.arange(self.hparams.depth):
x = self.hidden_layers[i](x, training=training, **kwargs)
x = self.activation(x)
x = self.hidden_batch_norms[i](x, training=training, **kwargs)
x = self.hidden_dropouts[i](x, training=training, **kwargs)
layer_activations.append(x)
pnltimt = x
logits = self.final_dense(x, training=None, **kwargs)
return logits, pnltimt, layer_activations
| 3,116 | 36.107143 | 92 | py |
Reflect | Reflect-master/tf2_models/common_layers.py | import tensorflow as tf
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import nest
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def make_variable_state_initializer(**kwargs):
def variable_state_initializer(shape, batch_size, dtype, index):
args = kwargs.copy()
if args.get('name'):
args['name'] = args['name'] + '_' + str(index)
else:
args['name'] = 'init_state_' + str(index)
args['shape'] = shape
args['dtype'] = dtype
var = tf.get_variable(**args)
var = tf.expand_dims(var, 0)
var = tf.tile(var, tf.pack([batch_size] + [1] * len(shape)))
var.set_shape(_state_size_with_prefix(shape, prefix=[None]))
return var
return variable_state_initializer
def get_initial_cell_state(cell, initializer, batch_size, dtype):
"""Return state tensor(s), initialized with initializer.
Args:
cell: RNNCell.
batch_size: int, float, or unit Tensor representing the batch size.
initializer: function with two arguments, shape and dtype, that
determines how the state is initialized.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size x state_size]` initialized
according to the initializer.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size x s]` for each s in `state_size`.
"""
state_size = cell.state_size
if nest.is_sequence(state_size):
state_size_flat = nest.flatten(state_size)
init_state_flat = [
initializer(s, batch_size, dtype, i)
for i, s in enumerate(state_size_flat)]
init_state = nest.pack_sequence_as(structure=state_size,
flat_sequence=init_state_flat)
else:
init_state_size = state_size
init_state = initializer(init_state_size, batch_size, dtype, None)
return init_state
def _generate_variable_state(batch_size_tensor, state_size, dtype):
"""Generate a variable tensor with shape [batch_size, state_size]."""
def create_variable(unnested_state_size):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return tf.Variable(init_state_size, dtype=dtype)
if nest.is_sequence(state_size):
return nest.map_structure(create_variable, state_size)
else:
return create_variable(state_size)
| 3,398 | 34.041237 | 72 | py |
Reflect | Reflect-master/tf2_models/lm_lstm.py | import absl
import tensorflow as tf
import numpy as np
from tensorboard.compat.tensorflow_stub import tensor_shape
from tensorflow.python.util import nest
from tf2_models.common_layers import get_initializer
from tf2_models.embedding import SharedEmbeddings
from tf2_models.utils import create_init_var
class LmLSTM(tf.keras.Model):
def __init__(self, hparams, scope="lm_lstm",*inputs, **kwargs):
del kwargs['cl_token']
super(LmLSTM, self).__init__(*inputs, **kwargs)
self.hparams = hparams
self.scope = scope
self.rep_index = 2
self.rep_layer = -1
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.00001)
self.create_vars()
@tf.function
def create_vars(self):
self.input_embedding = tf.compat.v2.keras.layers.Embedding(input_dim=self.hparams.input_dim,
output_dim=self.hparams.embedding_dim,
input_shape=(None, None),
mask_zero=True,
embeddings_regularizer=self.regularizer,
name='input_embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.output_embedding = tf.compat.v2.keras.layers.Dense(units=self.hparams.output_dim,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
name='output_projection')
self.stacked_rnns = []
for _ in np.arange(self.hparams.depth):
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
))
@tf.function(experimental_relax_shapes=True)
def call(self, inputs, **kwargs):
if 'training' in kwargs:
training = kwargs['training']
else:
training = False
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs),training=training)
rnn_outputs = embedded_input
input_mask = self.input_embedding.compute_mask(inputs)
float_input_mask = tf.cast(input_mask, dtype=tf.float32)
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training)
rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training)
logits = self.output_embedding(rnn_outputs)
logits = logits * float_input_mask[...,None] + tf.eye(self.hparams.output_dim)[0] * (1 - float_input_mask[...,None])
return logits
class ClassifierLSTM(tf.keras.Model):
def __init__(self, hparams, scope="cl_lstm", *inputs, **kwargs):
del kwargs['cl_token']
super(ClassifierLSTM, self).__init__(*inputs, **kwargs)
self.hparams = hparams
self.scope = scope
self.rep_index = 2
self.rep_layer = -1
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.00001)
self.create_vars()
@tf.function
def create_vars(self):
self.input_embedding = tf.compat.v2.keras.layers.Embedding(input_dim=self.hparams.input_dim,
output_dim=self.hparams.embedding_dim,
input_shape=(None, None),
mask_zero=True,
embeddings_regularizer=self.regularizer,
name='input_embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.output_embedding = tf.compat.v2.keras.layers.Dense(units=self.hparams.output_dim,
kernel_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
name='output_projection')
self.stacked_rnns = []
for _ in np.arange(self.hparams.depth):
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
))
def call(self, inputs, **kwargs):
if 'training' in kwargs:
training = kwargs['training']
else:
training = False
@tf.function(experimental_relax_shapes=True)
def _call(inputs, training):
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs),training=training)
rnn_outputs = embedded_input
input_mask = self.input_embedding.compute_mask(inputs)
inputs_length = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1)
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training)
rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training)
batch_size = tf.shape(rnn_outputs)[0]
bach_indices = tf.expand_dims(tf.range(batch_size), 1)
final_indexes = tf.concat([bach_indices, tf.expand_dims(tf.cast(inputs_length - 1, dtype=tf.int32), 1)], axis=-1)
final_rnn_outputs = tf.gather_nd(rnn_outputs, final_indexes)
logits = self.output_embedding(final_rnn_outputs)
return logits
return _call(inputs, training)
#@tf.function(experimental_relax_shapes=True)
def detailed_call(self, inputs, **kwargs):
if 'training' in kwargs:
training = kwargs['training']
else:
training = False
@tf.function(experimental_relax_shapes=True)
def _call(inputs, training):
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs), training=training)
rnn_outputs = embedded_input
input_mask = self.input_embedding.compute_mask(inputs)
inputs_length = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1)
hidden_activation = [embedded_input]
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask, training=training)
hidden_activation.append(rnn_outputs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs, training=training)
batch_size = tf.shape(rnn_outputs)[0]
bach_indices = tf.expand_dims(tf.range(batch_size), 1)
final_indexes = tf.concat([bach_indices, tf.expand_dims(tf.cast(inputs_length - 1, dtype=tf.int32), 1)], axis=-1)
final_rnn_outputs = tf.gather_nd(rnn_outputs, final_indexes)
logits = self.output_embedding(final_rnn_outputs)
out = logits
out = (out, final_rnn_outputs, hidden_activation)
return out
return _call(inputs, training)
class LmLSTMSharedEmb(tf.keras.Model):
def __init__(self, hparams, scope="lm_lstm_shared_emb",*inputs, **kwargs):
del kwargs['cl_token']
super(LmLSTMSharedEmb, self).__init__()
self.rep_index = 3
self.rep_layer = -1
self.hparams = hparams
self.scope = scope
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0000)
self.create_vars()
def create_vars(self):
@tf.function
def _create_vars():
self.input_embedding = SharedEmbeddings(vocab_size=self.hparams.input_dim,
hidden_size=self.hparams.embedding_dim,
initializer_range=self.hparams.initializer_range,
regularizer=self.regularizer,
name='embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
initializer_range = self.hparams.embedding_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.output_projection = tf.keras.layers.Dense(units=self.hparams.embedding_dim,
kernel_initializer=get_initializer(initializer_range))
self.stacked_rnns = []
self.rnn_initial_states = []
for _ in np.arange(self.hparams.depth):
initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
kernel_initializer=get_initializer(initializer_range),
recurrent_initializer=get_initializer(initializer_range)
))
_create_vars()
initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
for i in np.arange(self.hparams.depth):
state_size = self.stacked_rnns[i].cell.state_size
if nest.is_sequence(state_size):
init_state = nest.map_structure(lambda x: create_init_var(x, i, initializer_range), state_size)
else:
init_state = create_init_var(state_size, i, initializer_range)
self.rnn_initial_states.append(init_state)
def call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
for i in np.arange(self.hparams.depth):
batch_size_tensor = tf.shape(rnn_outputs)[0]
absl.logging.info(self.rnn_initial_states[i])
def tile_init(unnested_init_state):
return tf.tile(unnested_init_state, (batch_size_tensor, 1))
init_state = self.rnn_initial_states[i]
if nest.is_sequence(init_state):
init_for_batch = nest.map_structure(tile_init, init_state)
else:
init_for_batch = tile_init(init_state)
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
initial_state=init_for_batch,
**kwargs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
logits = self.input_embedding(rnn_outputs, mode='linear')
return logits
return _call(inputs, padding_symbol, **kwargs)
@tf.function(experimental_relax_shapes=True)
def detailed_call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
hidden_activation = [embedded_input]
for i in np.arange(self.hparams.depth):
batch_size_tensor = tf.shape(rnn_outputs)[0]
absl.logging.info(self.rnn_initial_states[i])
def tile_init(unnested_init_state):
return tf.tile(unnested_init_state, (batch_size_tensor, 1))
init_state = self.rnn_initial_states[i]
if nest.is_sequence(init_state):
init_for_batch = nest.map_structure(tile_init, init_state)
else:
init_for_batch = tile_init(init_state)
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
initial_state=init_for_batch,
**kwargs)
hidden_activation.append(rnn_outputs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
inputs_lengths = tf.reduce_sum(input_mask, axis=-1) - 1
batch_indices = tf.range(batch_size_tensor)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
sentence_rep = tf.gather_nd(rnn_outputs, indices)
logits = self.input_embedding(rnn_outputs, mode='linear')
out = logits
out = (out,rnn_outputs, sentence_rep, hidden_activation)
return out
return _call(inputs, padding_symbol, **kwargs)
class LmLSTMSharedEmbV2(tf.keras.Model):
def __init__(self, hparams, scope="lm_lstm_shared_emb",*inputs, **kwargs):
del kwargs['cl_token']
super(LmLSTMSharedEmbV2, self).__init__()
self.rep_index = 3
self.rep_layer = -1
self.hparams = hparams
self.scope = scope
self.model_name = '_'.join([self.scope,
'em-'+str(self.hparams.embedding_dim),
'h-'+str(self.hparams.hidden_dim),
'd-'+str(self.hparams.depth),
'hdrop-'+str(self.hparams.hidden_dropout_rate),
'indrop-'+str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0000)
self.create_vars()
@tf.function
def create_vars(self):
self.input_embedding = SharedEmbeddings(vocab_size=self.hparams.input_dim,
hidden_size=self.hparams.embedding_dim,
initializer_range=self.hparams.initializer_range,
regularizer=self.regularizer,
name='embedding')
self.input_embedding_dropout = tf.keras.layers.Dropout(self.hparams.input_dropout_rate)
self.output_embedding_dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
initializer_range = self.hparams.embedding_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.output_projection = tf.keras.layers.Dense(units=self.hparams.embedding_dim,
kernel_initializer=get_initializer(initializer_range))
self.stacked_rnns = []
self.rnn_initial_states = []
for _ in np.arange(self.hparams.depth):
initializer_range = self.hparams.hidden_dim ** -0.5 if self.hparams.initializer_range is None else self.hparams.initializer_range
self.stacked_rnns.append(tf.keras.layers.LSTM(units=self.hparams.hidden_dim,
return_sequences=True,
return_state=True,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
recurrent_dropout=self.hparams.hidden_dropout_rate,
dropout=self.hparams.hidden_dropout_rate,
kernel_regularizer=self.regularizer,
recurrent_regularizer=self.regularizer,
bias_regularizer=self.regularizer,
kernel_initializer=get_initializer(initializer_range),
recurrent_initializer=get_initializer(initializer_range)
))
def call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
**kwargs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
logits = self.input_embedding(rnn_outputs, mode='linear')
return logits
return _call(inputs, padding_symbol, **kwargs)
@tf.function(experimental_relax_shapes=True)
def detailed_call(self, inputs, padding_symbol=tf.constant(0, dtype=tf.int64), **kwargs):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, padding_symbol, **kwargs):
input_mask = tf.cast(inputs != padding_symbol, dtype=tf.bool)
embedded_input = self.input_embedding_dropout(self.input_embedding(inputs, mode='embedding'),
**kwargs)
rnn_outputs = embedded_input
hidden_activation = [embedded_input]
for i in np.arange(self.hparams.depth):
rnn_outputs, state_h, state_c = self.stacked_rnns[i](rnn_outputs, mask=input_mask,
**kwargs)
hidden_activation.append(rnn_outputs)
rnn_outputs = self.output_projection(rnn_outputs, **kwargs)
rnn_outputs = self.output_embedding_dropout(rnn_outputs,**kwargs)
batch_size_tensor = tf.shape(rnn_outputs)[0]
inputs_lengths = tf.reduce_sum(tf.cast(input_mask, dtype=tf.int32), axis=-1) - 1
batch_indices = tf.range(batch_size_tensor)
indices = tf.concat([batch_indices[..., None], inputs_lengths[..., None]], -1)
sentence_rep = tf.gather_nd(rnn_outputs, indices)
logits = self.input_embedding(rnn_outputs, mode='linear')
out = logits
out = (out, rnn_outputs, sentence_rep, hidden_activation)
return out
return _call(inputs, padding_symbol, **kwargs)
if __name__ == '__main__':
class hparams(object):
hidden_dim=8
input_dim=4
output_dim=4
depth=2
hidden_dropout_rate=0.1
lm_lstm = LmLSTM(hparams=hparams)
inputs = np.int64(np.flip(np.sort(np.random.uniform(0,3,size=(2,5)))))
inputs_mask = tf.equal(inputs, 0)
print(inputs_mask)
lm_lstm.build(input_shape=(None,None))
lm_lstm.summary()
print(inputs)
print(lm_lstm(inputs))
| 23,117 | 47.364017 | 138 | py |
Reflect | Reflect-master/tf2_models/transformers.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list
from tf2_models.embedding import SharedEmbeddings
from tf2_models.transformer_layers import Block
class GPT2(tf.keras.layers.Layer):
def __init__(self, hparams, *inputs, **kwargs):
super(GPT2, self).__init__(hparams, *inputs, **kwargs)
self.output_hidden_states = hparams.output_hidden_states
self.output_attentions = hparams.output_attentions
self.output_embeddings = hparams.output_embeddings
self.num_hidden_layers = hparams.depth
self.vocab_size = hparams.vocab_size
self.embedding_dim = hparams.embedding_dim
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
self.h = [Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
name='h_._{}'.format(i)) for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None,
training=False):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, past, attention_mask, token_type_ids, position_ids,
training):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:,tf.newaxis,:,tf.newaxis],
dtype=tf.float32)
if attention_mask is None:
attention_mask = padding_mask
else:
attention_mask = attention_mask*padding_mask
input_shape = shape_list(inputs)
input_ids = tf.reshape(inputs, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.wte(input_ids, mode='embedding')
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
if self.output_embeddings:
outputs = outputs + (inputs_embeds,)
return outputs # last hidden state, presents, (all hidden_states), (attentions)
return _call(inputs, past, attention_mask, token_type_ids, position_ids,
training)
class GPT2SharedWeights(GPT2):
def __init__(self, hparams, *inputs, **kwargs):
super(GPT2SharedWeights, self).__init__(hparams, *inputs, **kwargs)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size ,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
attention_block = Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
name='h')
self.h = [attention_block for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
class Bert(tf.keras.layers.Layer):
def __init__(self, hparams, *inputs, **kwargs):
super(Bert, self).__init__(hparams, *inputs, **kwargs)
self.output_hidden_states = hparams.output_hidden_states
self.output_attentions = hparams.output_attentions
self.output_embeddings = hparams.output_embeddings
self.num_hidden_layers = hparams.depth
self.vocab_size = hparams.vocab_size
self.embedding_dim = hparams.embedding_dim
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
self.h = [Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
casual_masking=False,
name='h_._{}'.format(i)) for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
def get_input_embeddings(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None,
training=False):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
input_shape = shape_list(inputs)
input_ids = tf.reshape(inputs, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.wte(input_ids, mode='embedding')
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
input_embeddings = inputs_embeds + position_embeds + token_type_embeds
padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:, tf.newaxis, :, tf.newaxis],
dtype=tf.float32)
return input_embeddings, input_shape, padding_mask, past
def call_with_embeddings(self, input_embeddings, input_shape, padding_mask, past, attention_mask=None,
training=False):
@tf.function(experimental_relax_shapes=True)
def _call(input_embeddings, input_shape, padding_mask, past, attention_mask,
training):
if attention_mask is not None:
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
if attention_mask is None:
attention_mask = padding_mask
else:
attention_mask = attention_mask*padding_mask
hidden_states = input_embeddings
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
if self.output_embeddings:
outputs = outputs
return outputs # last hidden state, presents, (all hidden_states), (attentions), input_embedding
return _call(input_embeddings, input_shape, padding_mask, past, attention_mask,
training)
def call(self, inputs, past=None, attention_mask=None, token_type_ids=None, position_ids=None,
training=False):
@tf.function(experimental_relax_shapes=True)
def _call(inputs, past, attention_mask, token_type_ids, position_ids,
training):
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, shape_list(inputs)[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
padding_mask = tf.cast(tf.not_equal(inputs, tf.zeros_like(inputs))[:,tf.newaxis,:,tf.newaxis],
dtype=tf.float32)
if attention_mask is None:
attention_mask = padding_mask
else:
attention_mask = attention_mask*padding_mask
input_shape = shape_list(inputs)
input_ids = tf.reshape(inputs, [-1, input_shape[-1]])
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
inputs_embeds = self.wte(input_ids, mode='embedding')
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, layer_past, attention_mask], training=training)
hidden_states, present = outputs[:2]
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states, presents)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
if self.output_embeddings:
outputs = outputs + (inputs_embeds,)
return outputs # last hidden state, presents, (all hidden_states), (attentions), input_embedding
return _call(inputs, past, attention_mask, token_type_ids, position_ids,
training)
class BertSharedWeights(Bert):
def __init__(self, hparams, *inputs, **kwargs):
super(BertSharedWeights, self).__init__(hparams, *inputs, **kwargs)
self.output_hidden_states = hparams.output_hidden_states
self.output_attentions = hparams.output_attentions
self.output_embeddings = hparams.output_embeddings
self.num_hidden_layers = hparams.depth
self.vocab_size = hparams.vocab_size
self.embedding_dim = hparams.embedding_dim
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.0001)
self.create_vars(hparams)
@tf.function
def create_vars(self, hparams):
self.wte = SharedEmbeddings(self.vocab_size,
hparams.hidden_size,
initializer_range=hparams.initializer_range,
regularizer=self.regularizer,
name='wte')
self.wpe = tf.keras.layers.Embedding(hparams.n_positions,
hparams.embedding_dim,
embeddings_initializer=get_initializer(hparams.initializer_range),
embeddings_regularizer=self.regularizer,
name='wpe')
self.drop = tf.keras.layers.Dropout(hparams.embd_pdrop)
attention_block = Block(hparams.n_ctx,
hparams,
regularizer=self.regularizer,
scale=True,
casual_masking=False,
name='h')
self.h = [attention_block for i in range(hparams.depth)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=hparams.layer_norm_epsilon, name='ln_f')
| 16,938 | 40.619165 | 113 | py |
Reflect | Reflect-master/tf2_models/resnet.py | import tensorflow as tf
class ResnetBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs):
super(ResnetBlock, self).__init__(*inputs, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layer()
def create_layer(self):
self.conv1 = tf.keras.layers.Conv2D(self.filters, self.kernel_size,
activation=self.activation,
padding='same',
kernel_regularizer=self.regularizer)
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(self.filters, self.kernel_size,
activation=None,
padding='same',
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.add = tf.keras.layers.Add()
self.activation = tf.keras.layers.Activation('relu')
def call(self, inputs, training=None, **kwargs):
outputs = self.conv1(inputs, training=training, **kwargs)
outputs = self.batch_norm1(outputs,training=training, **kwargs)
outputs = self.conv2(outputs, training=training, **kwargs)
outputs = self.batch_norm2(outputs,training=training, **kwargs)
outputs = self.add([outputs, inputs],training=training, **kwargs)
outputs = self.activation(outputs, training=training, **kwargs)
return outputs
class Resnet(tf.keras.Model):
def __init__(self, hparams, scope='resnet', *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(Resnet, self).__init__(name=scope, *inputs, **kwargs)
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-' + str(self.hparams.hidden_dim),
'rd-' + str(self.hparams.num_res_net_blocks),
'hdrop-' + str(self.hparams.hidden_dropout_rate),
'indrop-' + str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layers()
self.rep_index = 1
self.rep_layer = -1
def create_layers(self):
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.activation = tf.keras.layers.Activation('relu')
self.conv1 = tf.keras.layers.Conv2D(self.hparams.filters[0], self.hparams.kernel_size[0],
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(self.hparams.filters[1], self.hparams.kernel_size[1],
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm3 = tf.keras.layers.BatchNormalization()
self.pool2 = tf.keras.layers.MaxPooling2D(self.hparams.pool_size)
self.resblocks = []
for i in range(self.hparams.num_res_net_blocks):
self.resblocks.append(ResnetBlock(self.hparams.filters[2], self.hparams.kernel_size[2]))
self.conv4 = tf.keras.layers.Conv2D(self.hparams.filters[3], self.hparams.kernel_size[3],
activation=None)
self.batch_norm4 = tf.keras.layers.BatchNormalization()
self.avgpool = tf.keras.layers.GlobalAveragePooling2D()
self.dense = tf.keras.layers.Dense(self.hparams.hidden_dim, activation='relu')
self.dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.project = tf.keras.layers.Dense(self.hparams.output_dim, activation=None)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = inputs #self.batch_norm1(inputs, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.pool2(x, training=training, **kwargs)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.avgpool(x, training=training, **kwargs)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
outputs = self.project(x, training=training, **kwargs)
return outputs
def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs):
self.layer_activations = []
x = self.batch_norm1(inputs, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.pool2(x, training=training, **kwargs)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.avgpool(x, training=training, **kwargs)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
pnltimt = x
outputs = self.project(x, training=training, **kwargs)
return outputs, pnltimt, self.layer_activations | 6,572 | 40.601266 | 94 | py |
Reflect | Reflect-master/tf2_models/cnn.py | import tensorflow as tf
import numpy as np
def max_out(inputs, num_units, axis=None):
shape = inputs.get_shape().as_list()
if shape[0] is None:
shape[0] = -1
if axis is None: # Assume that channel is the last dimension
axis = -1
num_channels = shape[axis]
if num_channels % num_units:
raise ValueError('number of features({}) is not '
'a multiple of num_units({})'.format(num_channels,
num_units))
shape[axis] = num_units
shape += [num_channels // num_units]
outputs = tf.reduce_max(tf.reshape(inputs, shape), -1)
return outputs
class VanillaCNN(tf.keras.models.Model):
def __init__(self, hparams, scope="cl_vcnn", *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(VanillaCNN, self).__init__(*inputs, **kwargs)
self.hparams = hparams
self.scope = scope
self.model_name = '_'.join([self.scope,
'hc-' + '.'.join(
[str(h) for h in self.hparams.filters]),
'hfc-' + '.'.join(
[str(h) for h in self.hparams.fc_dim]),
'd-' + str(self.hparams.depth),
'hdrop-' + str(
self.hparams.hidden_dropout_rate),
'indrop-' + str(
self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_vars()
self.rep_index = 1
self.rep_layer = -1
def create_vars(self):
self.indrop = tf.keras.layers.Dropout(rate=self.hparams.input_dropout_rate)
self.cnns = []
self.cnn_nns = []
self.cnn_bnz = []
self.cnn_activations = []
self.cnn_pooling = []
self.cnn_dropouts = []
for i in np.arange(self.hparams.depth):
self.cnns.append(tf.keras.layers.Conv2D(self.hparams.filters[i],
self.hparams.kernel_size[i],
activation=None,
kernel_regularizer=self.regularizer))
# if self.hparams.maxout_size[i] < self.hparams.filters[i]:
# nn_size = int(self.hparams.filters[i] / self.hparams.maxout_size[i])
# self.cnn_nns.append(tf.keras.layers.Conv2D(self.hparams.maxout_size[i],
# (1,1),
# activation=None,
# kernel_regularizer=self.regularizer))
# else:
# self.cnn_nns.append(tf.keras.layers.Lambda(lambda x: x))
self.cnn_bnz.append(tf.keras.layers.BatchNormalization())
self.cnn_activations.append(tf.keras.layers.Activation('relu'))
self.cnn_pooling.append(
tf.keras.layers.MaxPooling2D(self.hparams.pool_size[i]))
self.cnn_dropouts.append(
tf.keras.layers.Dropout(rate=self.hparams.hidden_dropout_rate))
self.avg_pool = tf.keras.layers.GlobalAveragePooling2D()
self.densez = []
self.dense_bnz = []
self.dense_activations = []
self.dense_dropouts = []
for i in np.arange(self.hparams.proj_depth):
self.densez.append(
tf.keras.layers.Dense(self.hparams.fc_dim[i], activation=None,
kernel_regularizer=self.regularizer))
self.dense_bnz.append(tf.keras.layers.BatchNormalization())
self.dense_activations.append(tf.keras.layers.Activation('relu'))
self.dense_dropouts.append(
tf.keras.layers.Dropout(rate=self.hparams.hidden_dropout_rate))
self.projector = tf.keras.layers.Dense(self.hparams.output_dim,
kernel_regularizer=self.regularizer)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = self.indrop(inputs, training=training, **kwargs)
for i in np.arange(self.hparams.depth):
x = self.cnns[i](x, training=training, **kwargs)
# x = self.cnn_nns[i](x, training=training, **kwargs)
x = max_out(x, self.hparams.maxout_size[i])
x = self.cnn_bnz[i](x, training=training, **kwargs)
x = self.cnn_activations[i](x, training=training, **kwargs)
x = self.cnn_pooling[i](x, training=training, **kwargs)
x = self.cnn_dropouts[i](x, training=training, **kwargs)
x = self.avg_pool(x, **kwargs)
for i in np.arange(self.hparams.proj_depth):
x = self.densez[i](x, training=training, **kwargs)
x = self.dense_bnz[i](x, training=training, **kwargs)
x = self.dense_activations[i](x, training=training, **kwargs)
x = self.dense_dropouts[i](x, training=training, **kwargs)
logits = self.projector(x, training=training, **kwargs)
return logits
def detailed_call(self, inputs, padding_symbol=None, **kwargs):
x = self.indrop(inputs)
hidden_activations = []
for i in np.arange(self.hparams.depth):
x = self.cnns[i](x, **kwargs)
x = max_out(x, self.hparams.maxout_size[i])
x = self.cnn_bnz[i](x, **kwargs)
x = self.cnn_activations[i](x, **kwargs)
x = self.cnn_pooling[i](x, **kwargs)
x = self.cnn_dropouts[i](x, **kwargs)
hidden_activations.append(x)
x = self.avg_pool(x, **kwargs)
hidden_activations.append(x)
for i in np.arange(self.hparams.proj_depth):
x = self.densez[i](x, **kwargs)
x = self.dense_bnz[i](x, **kwargs)
x = self.dense_activations[i](x, **kwargs)
x = self.dense_dropouts[i](x, **kwargs)
hidden_activations.append(x)
logits = self.projector(x, **kwargs)
return logits, hidden_activations[-1], hidden_activations
| 5,878 | 38.993197 | 91 | py |
Reflect | Reflect-master/tf2_models/utils.py | import tensorflow as tf
import re
from tensorboard.compat.tensorflow_stub import tensor_shape
def camel2snake(name):
return name[0].lower() + re.sub(r'(?!^)[A-Z]', lambda x: '_' + x.group(0).lower(), name[1:])
def log_summary(log_value, log_name, summary_scope):
"""Produce scalar summaries."""
with tf.compat.v2.summary.experimental.summary_scope(summary_scope):
tf.summary.scalar(log_name, log_value)
def create_init_var(unnested_state_size, i, initializer_range):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [1] + flat_dims
return tf.Variable(shape=init_state_size, dtype=tf.float32,
initial_value=tf.keras.initializers.TruncatedNormal(stddev=initializer_range)(
shape=init_state_size),
trainable=True,
name="lstm_init_" + str(i))
| 884 | 31.777778 | 99 | py |
Reflect | Reflect-master/tf2_models/train_utils.py | import absl
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2.learning_rate_schedule import LearningRateSchedule
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
from tensorflow_addons.utils import keras_utils
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecayWithWarmpUp(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
warmup_steps,
warmup_learning_rate=0.0,
hold_base_rate_steps=0,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
super(ExponentialDecayWithWarmpUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.warmup_steps = warmup_steps
self.warmup_learning_rate = warmup_learning_rate
self.hold_base_rate_steps = hold_base_rate_steps
self.staircase = staircase
self.name = name
@tf.function(experimental_relax_shapes=True)
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = tf.constant(
self.initial_learning_rate, name="initial_learning_rate", dtype=tf.float32)
warmup_learning_rate = tf.constant(
self.warmup_learning_rate, name="warmup_learning_rate", dtype=tf.float32)
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
warmup_steps = math_ops.cast(self.warmup_steps, dtype)
hold_base_rate_steps = math_ops.cast(self.hold_base_rate_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = (global_step_recomp - (warmup_steps+hold_base_rate_steps)) / (decay_steps)
if self.staircase:
p = math_ops.floor(p)
learning_rate= math_ops.multiply(
initial_learning_rate, math_ops.pow(decay_rate, p), name=name)
learning_rate = tf.where(
global_step_recomp > (warmup_steps + hold_base_rate_steps),
learning_rate, initial_learning_rate)
if self.warmup_steps > 0:
if self.initial_learning_rate < self.warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (initial_learning_rate - warmup_learning_rate) / warmup_steps
warmup_rate = slope * tf.cast(global_step_recomp,
tf.float32) + warmup_learning_rate
learning_rate = tf.where(global_step_recomp < warmup_steps, warmup_rate,
learning_rate)
return learning_rate
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"warmup_steps": self.warmup_steps,
"warmup_learning_rate": self.warmup_learning_rate,
"hold_base_rate_steps": self.hold_base_rate_steps,
"name": self.name
}
class RectifiedAdam(tf.keras.optimizers.Optimizer):
"""Variant of the Adam optimizer whose adaptive learning rate is rectified
so as to have a consistent variance.
It implements the Rectified Adam (a.k.a. RAdam) proposed by
Liyuan Liu et al. in [On The Variance Of The Adaptive Learning Rate
And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).
Example of usage:
```python
opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
```
Note: `amsgrad` is not described in the original paper. Use it with
caution.
RAdam is not a placement of the heuristic warmup, the settings should be
kept if warmup has already been employed and tuned in the baseline method.
You can enable warmup by setting `total_steps` and `warmup_proportion`:
```python
opt = tfa.optimizers.RectifiedAdam(
lr=1e-3,
total_steps=10000,
warmup_proportion=0.1,
min_lr=1e-5,
)
```
In the above example, the learning rate will increase linearly
from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr`
in 9000 steps.
Lookahead, proposed by Michael R. Zhang et.al in the paper
[Lookahead Optimizer: k steps forward, 1 step back]
(https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam,
which is announced by Less Wright and the new combined optimizer can also
be called "Ranger". The mechanism can be enabled by using the lookahead
wrapper. For example:
```python
radam = tfa.optimizers.RectifiedAdam()
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
```
"""
def __init__(self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=0.,
amsgrad=False,
sma_threshold=5.0,
total_steps=0,
warmup_proportion=0.1,
min_lr=0.,
name='RectifiedAdam',
**kwargs):
r"""Construct a new RAdam optimizer.
Args:
learning_rate: A Tensor or a floating point value.
The learning rate.
beta_1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability.
weight_decay: A floating point value. Weight decay for each param.
amsgrad: boolean. Whether to apply AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
beyond".
sma_threshold. A float value.
The threshold for simple mean average.
total_steps: An integer. Total number of training steps.
Enable warmup by setting a positive value.
warmup_proportion: A floating point value.
The proportion of increasing steps.
min_lr: A floating point value. Minimum learning rate after warmup.
name: Optional name for the operations created when applying
gradients. Defaults to "RectifiedAdam".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super(RectifiedAdam, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self._set_hyper('decay', self._initial_decay)
self._set_hyper('weight_decay', weight_decay)
self._set_hyper('sma_threshold', sma_threshold)
self._set_hyper('total_steps', float(total_steps))
self._set_hyper('warmup_proportion', warmup_proportion)
self._set_hyper('min_lr', min_lr)
self.epsilon = epsilon or tf.keras.backend.epsilon()
self.amsgrad = amsgrad
self._initial_weight_decay = weight_decay
self._initial_total_steps = total_steps
def _create_slots(self, var_list):
for var in var_list:
self.add_slot(var, 'm')
for var in var_list:
self.add_slot(var, 'v')
if self.amsgrad:
for var in var_list:
self.add_slot(var, 'vhat')
def set_weights(self, weights):
params = self.weights
num_vars = int((len(params) - 1) / 2)
if len(weights) == 3 * num_vars + 1:
weights = weights[:len(params)]
super(RectifiedAdam, self).set_weights(weights)
def _resource_apply_dense(self, grad, var):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
m = self.get_slot(var, 'm')
v = self.get_slot(var, 'v')
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
if self._initial_total_steps > 0:
total_steps = self._get_hyper('total_steps', var_dtype)
warmup_steps = total_steps *\
self._get_hyper('warmup_proportion', var_dtype)
min_lr = self._get_hyper('min_lr', var_dtype)
decay_steps = tf.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
local_step <= warmup_steps,
lr_t * (local_step / warmup_steps),
lr_t + decay_rate * tf.minimum(local_step - warmup_steps,
decay_steps),
)
sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0
sma_t = sma_inf - 2.0 * local_step * beta_2_power / (
1.0 - beta_2_power)
m_t = m.assign(
beta_1_t * m + (1.0 - beta_1_t) * grad,
use_locking=self._use_locking)
m_corr_t = m_t / (1.0 - beta_1_power)
v_t = v.assign(
beta_2_t * v + (1.0 - beta_2_t) * tf.square(grad),
use_locking=self._use_locking)
if self.amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat_t = vhat.assign(
tf.maximum(vhat, v_t), use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t / (1.0 - beta_2_power))
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t / (1.0 - beta_2_power))
r_t = tf.sqrt((sma_t - 4.0) / (sma_inf - 4.0) * (sma_t - 2.0) /
(sma_inf - 2.0) * sma_inf / sma_t)
sma_threshold = self._get_hyper('sma_threshold', var_dtype)
var_t = tf.where(sma_t >= sma_threshold,
r_t * m_corr_t / (v_corr_t + epsilon_t), m_corr_t)
if self._initial_weight_decay > 0.0:
var_t += self._get_hyper('weight_decay', var_dtype) * var
var_update = var.assign_sub(
lr_t * var_t, use_locking=self._use_locking)
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def _resource_apply_sparse(self, grad, var, indices):
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta_1_t = self._get_hyper('beta_1', var_dtype)
beta_2_t = self._get_hyper('beta_2', var_dtype)
epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta_1_power = tf.pow(beta_1_t, local_step)
beta_2_power = tf.pow(beta_2_t, local_step)
if self._initial_total_steps > 0:
total_steps = self._get_hyper('total_steps', var_dtype)
warmup_steps = total_steps *\
self._get_hyper('warmup_proportion', var_dtype)
min_lr = self._get_hyper('min_lr', var_dtype)
decay_steps = tf.maximum(total_steps - warmup_steps, 1)
decay_rate = (min_lr - lr_t) / decay_steps
lr_t = tf.where(
local_step <= warmup_steps,
lr_t * (local_step / warmup_steps),
lr_t + decay_rate * tf.minimum(local_step - warmup_steps,
decay_steps),
)
sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0
sma_t = sma_inf - 2.0 * local_step * beta_2_power / (
1.0 - beta_2_power)
m = self.get_slot(var, 'm')
m_scaled_g_values = grad * (1 - beta_1_t)
m_t = m.assign(m * beta_1_t, use_locking=self._use_locking)
with tf.control_dependencies([m_t]):
m_t = self._resource_scatter_add(m, indices, m_scaled_g_values)
m_corr_t = m_t / (1.0 - beta_1_power)
v = self.get_slot(var, 'v')
v_scaled_g_values = (grad * grad) * (1 - beta_2_t)
v_t = v.assign(v * beta_2_t, use_locking=self._use_locking)
with tf.control_dependencies([v_t]):
v_t = self._resource_scatter_add(v, indices, v_scaled_g_values)
if self.amsgrad:
vhat = self.get_slot(var, 'vhat')
vhat_t = vhat.assign(
tf.maximum(vhat, v_t), use_locking=self._use_locking)
v_corr_t = tf.sqrt(vhat_t / (1.0 - beta_2_power))
else:
vhat_t = None
v_corr_t = tf.sqrt(v_t / (1.0 - beta_2_power))
r_t = tf.sqrt((sma_t - 4.0) / (sma_inf - 4.0) * (sma_t - 2.0) /
(sma_inf - 2.0) * sma_inf / sma_t)
sma_threshold = self._get_hyper('sma_threshold', var_dtype)
var_t = tf.where(sma_t >= sma_threshold,
r_t * m_corr_t / (v_corr_t + epsilon_t), m_corr_t)
if self._initial_weight_decay > 0.0:
var_t += self._get_hyper('weight_decay', var_dtype) * var
with tf.control_dependencies([var_t]):
var_update = self._resource_scatter_add(
var, indices, tf.gather(-lr_t * var_t, indices))
updates = [var_update, m_t, v_t]
if self.amsgrad:
updates.append(vhat_t)
return tf.group(*updates)
def get_config(self):
config = super(RectifiedAdam, self).get_config()
config.update({
'learning_rate':
self._serialize_hyperparameter('learning_rate'),
'beta_1':
self._serialize_hyperparameter('beta_1'),
'beta_2':
self._serialize_hyperparameter('beta_2'),
'decay':
self._serialize_hyperparameter('decay'),
'weight_decay':
self._serialize_hyperparameter('weight_decay'),
'sma_threshold':
self._serialize_hyperparameter('sma_threshold'),
'epsilon':
self.epsilon,
'amsgrad':
self.amsgrad,
'total_steps':
self._serialize_hyperparameter('total_steps'),
'warmup_proportion':
self._serialize_hyperparameter('warmup_proportion'),
'min_lr':
self._serialize_hyperparameter('min_lr'),
})
return config | 17,416 | 40.568019 | 92 | py |
Reflect | Reflect-master/tf2_models/transformer_layers.py | import tensorflow as tf
from tf2_models.common_layers import get_initializer, shape_list, gelu
class Attention(tf.keras.layers.Layer):
def __init__(self, hidden_dim, n_ctx, config, regularizer, casual_masking=True, scale=False, **kwargs):
super(Attention, self).__init__(**kwargs)
self.output_attentions = config.output_attentions
self.casual_masking = casual_masking
n_state = hidden_dim
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.regularizer = regularizer
self.c_attn = Conv1D(nf=n_state * 3, nx=hidden_dim,
initializer_range=config.initializer_range,
regularizer=self.regularizer, name='c_attn')
self.c_proj = Conv1D(nf=n_state, nx=hidden_dim,
initializer_range=config.initializer_range,
regularizer=self.regularizer,
name='c_proj')
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:, None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(tf.shape(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
if self.casual_masking:
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, layer_past, attention_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=1)
key = tf.concat([past_key, key], axis=-2)
value = tf.concat([past_value, value], axis=-2)
present = tf.stack([key, value], axis=1)
attn_outputs = self._attn([query, key, value, attention_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class Conv1D(tf.keras.layers.Layer):
def __init__(self, nf, nx, regularizer, initializer_range=0.02, **kwargs):
""" TFConv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
self.regularizer = regularizer
def build(self, input_shape):
self.weight = self.add_weight(
"weight",
shape=[self.nx, self.nf],
initializer=get_initializer(self.initializer_range),
regularizer=self.regularizer)
self.bias = self.add_weight(
"bias",
shape=[1, self.nf],
initializer=tf.zeros_initializer(),
regularizer=self.regularizer)
def call(self, x, **kwargs):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class Block(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, regularizer, casual_masking=True, scale=False, **kwargs):
super(Block, self).__init__(**kwargs)
self.regularizer = regularizer
nx = config.embedding_dim
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1')
self.attn = Attention(hidden_dim=nx, n_ctx=n_ctx, config=config, scale=scale,
regularizer=self.regularizer,
casual_masking=casual_masking, name='attn')
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2')
self.mlp = TransformerMLP(4 * nx, config, regularizer=self.regularizer, name='mlp')
def call(self, inputs, training=False):
x, layer_past, attention_mask = inputs
a = self.ln_1(x)
output_attn = self.attn([a, layer_past, attention_mask], training=training)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.ln_2(x)
m = self.mlp(m, training=training)
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class TransformerMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, regularizer, **kwargs):
super(TransformerMLP, self).__init__(**kwargs)
self.regularizer = regularizer
nx = config.embedding_dim
self.c_fc = Conv1D(n_state, nx, initializer_range=config.initializer_range,
regularizer=self.regularizer, name='c_fc')
self.c_proj = Conv1D(nx, n_state, initializer_range=config.initializer_range,
regularizer=self.regularizer, name='c_proj')
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2 | 6,560 | 35.049451 | 105 | py |
Reflect | Reflect-master/tf2_models/ff_resnet.py | import tensorflow as tf
class FFResnetBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, activation='relu',*inputs, **kwargs):
super(FFResnetBlock, self).__init__(*inputs, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.activation = activation
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layer()
def create_layer(self):
self.conv1 = tf.keras.layers.Dense(self.filters*9,
activation=self.activation,
kernel_regularizer=self.regularizer)
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Dense(self.filters*9,
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.add = tf.keras.layers.Add()
self.activation = tf.keras.layers.Activation('relu')
def call(self, inputs, training=None, **kwargs):
outputs = self.conv1(inputs, training=training, **kwargs)
outputs = self.batch_norm1(outputs,training=training, **kwargs)
outputs = self.conv2(outputs, training=training, **kwargs)
outputs = self.batch_norm2(outputs,training=training, **kwargs)
outputs = self.add([outputs, inputs],training=training, **kwargs)
outputs = self.activation(outputs, training=training, **kwargs)
return outputs
class FFResnet(tf.keras.Model):
def __init__(self, hparams, scope='ff_resnet', *inputs, **kwargs):
if 'cl_token' in kwargs:
del kwargs['cl_token']
super(FFResnet, self).__init__(name=scope, *inputs, **kwargs)
self.scope = scope
self.hparams = hparams
self.model_name = '_'.join([self.scope,
'h-' + str(self.hparams.hidden_dim),
'rd-' + str(self.hparams.num_res_net_blocks),
'hdrop-' + str(self.hparams.hidden_dropout_rate),
'indrop-' + str(self.hparams.input_dropout_rate)])
self.regularizer = tf.keras.regularizers.l1_l2(l1=0.00,
l2=0.000000002)
self.create_layers()
self.rep_index = 1
self.rep_layer = -1
def create_layers(self):
self.flat = tf.keras.layers.Flatten()
self.batch_norm1 = tf.keras.layers.BatchNormalization()
self.activation = tf.keras.layers.Activation('relu')
self.conv1 = tf.keras.layers.Dense(self.hparams.filters[0]*9,
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm2 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Dense(self.hparams.filters[1]*9,
activation=None,
kernel_regularizer=self.regularizer)
self.batch_norm3 = tf.keras.layers.BatchNormalization()
self.resblocks = []
for i in range(self.hparams.num_res_net_blocks):
self.resblocks.append(FFResnetBlock(self.hparams.filters[2], self.hparams.kernel_size[2]))
self.conv4 = tf.keras.layers.Dense(self.hparams.filters[3]*9,
activation=None)
self.batch_norm4 = tf.keras.layers.BatchNormalization()
self.dense = tf.keras.layers.Dense(self.hparams.hidden_dim, activation='relu')
self.dropout = tf.keras.layers.Dropout(self.hparams.hidden_dropout_rate)
self.project = tf.keras.layers.Dense(self.hparams.output_dim, activation=None)
def call(self, inputs, padding_symbol=None, training=None, **kwargs):
x = self.flat(inputs, **kwargs)
x = self.batch_norm1(x, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
outputs = self.project(x, training=training, **kwargs)
return outputs
def detailed_call(self, inputs, padding_symbol=None, training=None, **kwargs):
self.layer_activations = []
x = self.flat(inputs, **kwargs)
x = self.batch_norm1(x, training=training, **kwargs)
x = self.conv1(x, training=training, **kwargs)
x = self.batch_norm2(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv2(x, training=training, **kwargs)
x = self.batch_norm3(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
for i in range(self.hparams.num_res_net_blocks):
x = self.resblocks[i](x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.conv4(x, training=training, **kwargs)
x = self.batch_norm4(x, training=training, **kwargs)
x = self.activation(x)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
x = self.dense(x, training=training, **kwargs)
x = self.dropout(x, training=training, **kwargs)
self.layer_activations.append(x)
pnltimt = x
outputs = self.project(x, training=training, **kwargs)
return outputs, pnltimt, self.layer_activations | 6,118 | 39.256579 | 96 | py |
Reflect | Reflect-master/tf2_models/keras_callbacks.py | import tensorflow as tf
from tf2_models.utils import log_summary
class CheckpointCallback(tf.keras.callbacks.Callback):
def __init__(self, manager, ckpt):
super(CheckpointCallback, self).__init__()
self.manager = manager
self.ckpt = ckpt
def on_epoch_end(self, epoch, logs=None):
self.ckpt.step.assign_add(1)
save_path = self.manager.save()
tf.print("Epoch %d: " %epoch)
tf.print("Saved checkpoint for:", save_path)
class SummaryCallback(tf.keras.callbacks.Callback):
def __init__(self, summary_writer):
self.summary_writer = summary_writer
def on_train_batch_end(self, batch, logs=None):
if (self.model.optimizer.iterations % 200) == 0:
print(logs)
if 'loss' in logs.keys():
log_summary(log_name='learning_rate', log_value=self.model.optimizer.learning_rate( self.model.optimizer.iterations), summary_scope='train')
log_summary(log_name='fine_total_loss', log_value=logs['loss'], summary_scope='train')
if 'masked_sequence_loss' in logs.keys():
log_summary(log_name='fine_lm_loss', log_value=logs['masked_sequence_loss'], summary_scope='train')
if 'sequence_loss' in logs.keys():
log_summary(log_name='fine_lm_loss', log_value=logs['sequence_loss'], summary_scope='train')
def on_epoch_end(self, epoch, logs=None):
# Log summary for test and train
if 'masked_sequence_loss' in logs.keys():
log_summary(log_name='perolexity', log_value=tf.exp(logs['masked_sequence_loss']), summary_scope='train')
log_summary(log_name='perplexity', log_value=tf.exp(logs['val_masked_sequence_loss']), summary_scope='valid')
for key in logs.keys():
if 'val' in key:
log_summary(log_name=key, log_value=logs[key], summary_scope='valid')
else:
log_summary(log_name=key, log_value=logs[key], summary_scope='train')
| 1,859 | 38.574468 | 148 | py |
Reflect | Reflect-master/tf2_models/metrics.py | import tensorflow as tf
@tf.function(experimental_relax_shapes=True)
def distill_loss(y_true, y_pred, tmp):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.float32)
scale_factor = 1.0 / (tmp*tmp)
return tf.reduce_mean(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=y_pred / tmp,
labels=y_true,
name='loss')) * scale_factor
@tf.function(experimental_relax_shapes=True)
def sequence_distill_loss(y_true, y_pred, padding_symbol, tmp):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.float32)
sequence_mask = tf.cast(y_true[..., padding_symbol] != 1.0, dtype=tf.float32)
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask)
scale_factor = 1.0 / (tmp * tmp)
return tf.reduce_sum(tf.compat.v2.nn.softmax_cross_entropy_with_logits(logits=y_pred / tmp,
labels=y_true,
name='loss') * sequence_mask) * scale_factor
@tf.function(experimental_relax_shapes=True)
def masked_sequence_loss(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask, axis=-1)[...,None]
return tf.reduce_mean(tf.reduce_sum(tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss') * sequence_mask, axis=-1))
@tf.function(experimental_relax_shapes=True)
def batch_masked_sequence_loss(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask
return tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss'), sequence_mask
@tf.function(experimental_relax_shapes=True)
def masked_perplexity(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask, axis=-1)[...,None]
return tf.reduce_mean(tf.exp(tf.reduce_sum(tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss') * sequence_mask, axis=-1)))
@tf.function(experimental_relax_shapes=True)
def masked_batch_perplexity(y_true, y_pred, padding_symbol=0):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != padding_symbol, dtype=tf.float32)
# [batch_size, 1]
sequence_mask = sequence_mask / tf.reduce_sum(sequence_mask)
return tf.exp(tf.reduce_sum(sequence_mask * tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss')))
#@tf.function(experimental_relax_shapes=True)
def classification_loss(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = tf.squeeze(y_true, axis=-1)
y_true = tf.cast(y_true, dtype=tf.int64)
return tf.compat.v2.nn.sparse_softmax_cross_entropy_with_logits(logits=y_pred,
labels=y_true,
name='loss')
@tf.function(experimental_relax_shapes=True)
def accuracy(targets, logits, padding_symbol=0):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32)
return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(1))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy(targets, logits, ):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
return unmasked_accuracy_topk(targets, logits, topk=tf.constant(1))
@tf.function(experimental_relax_shapes=True)
def accuracy_top2(targets, logits, padding_symbol=0):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32)
return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(2))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy_top2(targets, logits, ):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
return unmasked_accuracy_topk(targets, logits, topk=tf.constant(2))
@tf.function(experimental_relax_shapes=True)
def accuracy_top5(targets, logits, padding_symbol=0):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
sequence_mask = tf.cast(targets != padding_symbol, dtype=tf.float32)
return accuracy_topk(targets, logits, sequence_mask, topk=tf.constant(5))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy_top5(targets, logits, ):
targets = tf.cast(tf.squeeze(targets), dtype=tf.int64)
return unmasked_accuracy_topk(targets, logits, topk=tf.constant(5))
@tf.function(experimental_relax_shapes=True)
def accuracy_topk(targets, logits, sequence_mask, topk):
orig_shape = tf.shape(logits)
last_dim = orig_shape[-1]
logits = tf.reshape(logits, (-1,last_dim))
targets = tf.reshape(targets, (-1,1))
sequence_mask = tf.cast(tf.reshape(sequence_mask, (-1,1)), tf.float32)
unmasked_accuracies = tf.keras.metrics.sparse_top_k_categorical_accuracy(y_true=targets,
y_pred=logits,
k=topk)
normalizing_factor = sequence_mask / tf.reduce_sum(sequence_mask)
normalizing_factor = tf.squeeze(normalizing_factor)
return tf.reduce_sum(tf.multiply(normalizing_factor, unmasked_accuracies))
@tf.function(experimental_relax_shapes=True)
def unmasked_accuracy_topk(targets, logits, topk):
orig_shape = tf.shape(logits)
last_dim = orig_shape[-1]
logits = tf.reshape(logits, (-1,last_dim))
targets = tf.reshape(targets, (-1,1))
unmasked_accuracies = tf.keras.metrics.sparse_top_k_categorical_accuracy(y_true=targets,
y_pred=logits,
k=topk)
return tf.reduce_mean(unmasked_accuracies)
class MaskedSequenceLoss(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0, num_replicas_in_sync=1,
**kwargs):
super(MaskedSequenceLoss, self).__init__(reduction=tf.keras.losses.Reduction.SUM, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "batch_masked_sequence_loss"
self.num_replicas_in_sync = num_replicas_in_sync
def call(self, y_true, y_pred, sample_weight=None):
entropies, mask = batch_masked_sequence_loss(y_true=y_true, y_pred=y_pred, padding_symbol=self.padding_symbol)
if sample_weight is not None:
mask = sample_weight
norm_factor = mask / tf.reduce_sum(mask)
return tf.reduce_sum(entropies * norm_factor) / self.num_replicas_in_sync
class MaskedSequenceMetric(tf.keras.losses.Loss):
def __init__(self, padding_symbol=0,
**kwargs):
super(MaskedSequenceMetric, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "batch_masked_sequence_loss"
def call(self, y_true, y_pred, sample_weight=None):
entropies, mask = batch_masked_sequence_loss(y_true=y_true, y_pred=y_pred, padding_symbol=self.padding_symbol)
if sample_weight is not None:
mask = sample_weight
norm_factor = mask / tf.reduce_sum(mask)
return tf.reduce_sum(entropies * norm_factor)
class ClassificationLoss(tf.keras.losses.Loss):
def __init__(self, global_batch_size, padding_symbol=tf.constant(0),
**kwargs):
super(ClassificationLoss, self).__init__(reduction=tf.keras.losses.Reduction.SUM, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "classification_loss"
self.global_batch_size = tf.cast(global_batch_size, dtype=tf.float32)
def call(self, y_true, y_pred):
return classification_loss(y_true=y_true, y_pred=y_pred) / self.global_batch_size
class ClassificationLossMetric(tf.keras.losses.Loss):
def __init__(self, global_batch_size, padding_symbol=0,
**kwargs):
super(ClassificationLossMetric, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs)
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.name = "classification_loss"
self.global_batch_size = global_batch_size
def call(self, y_true, y_pred):
return tf.reduce_mean(classification_loss(y_true=y_true, y_pred=y_pred), axis=0)
class AccuracyTopk(tf.keras.losses.Loss):
def __init__(self, global_batch_size, padding_symbol=0, topk=1,
**kwargs):
super(AccuracyTopk, self).__init__(reduction=tf.keras.losses.Reduction.NONE, **kwargs)
self.name = '-'.join(['accuracy','top', str(topk)])
self.padding_symbol = tf.constant(padding_symbol, dtype=tf.int64)
self.global_batch_size = global_batch_size
self.topk = tf.constant(topk)
def call(self, y_true, y_pred):
y_true = tf.cast(tf.squeeze(y_true), dtype=tf.int64)
sequence_mask = tf.cast(y_true != self.padding_symbol, dtype=tf.float32)
return accuracy_topk(targets=y_true, logits=y_pred, sequence_mask=sequence_mask, topk=self.topk)
if __name__ == '__main__':
import numpy as np
a = np.asarray([[[1,1.5,2,0], [4,3,0,0]],
[[1,1.5,2,0], [4,3,0,0]]], dtype=np.float32)
a_mask = [[1, 1],[1 , 0]]
print(a_mask)
b = np.asarray([[0, 0],[1, 1]], dtype=np.int64)
print(accuracy_topk(logits=a,targets=b,sequence_mask=a_mask,topk=1)) | 10,276 | 46.578704 | 117 | py |
Reflect | Reflect-master/tf2_models/trainer.py | import tensorflow as tf
import os
from tf2_models.keras_callbacks import CheckpointCallback, SummaryCallback
from tf2_models.train_utils import RectifiedAdam, ExponentialDecayWithWarmpUp
OPTIMIZER_DIC = {'adam': tf.keras.optimizers.Adam,
'radam': RectifiedAdam,
}
class Trainer(object):
def __init__(self, hparams, strategy, model, task, train_params, log_dir, ckpt_dir):
self.hparams = hparams
self.model = model
self.task = task
self.train_params = train_params
self.strategy = strategy
lr_schedule = self.get_lr_schedule()
self.optimizer = OPTIMIZER_DIC[self.train_params.optimizer](learning_rate=lr_schedule, epsilon=1e-08, clipnorm=1.0)
self.ckpt = tf.train.Checkpoint(step=tf.Variable(1, name='checkpoint_step'), optimizer=self.optimizer, net=self.model)
self.manager = tf.train.CheckpointManager(self.ckpt, ckpt_dir,
keep_checkpoint_every_n_hours=self.hparams.keep_checkpoint_every_n_hours,
max_to_keep=2)
with self.strategy.scope():
x, y = iter(self.task.valid_dataset).next()
model(x)
model.summary()
model.compile(
optimizer=self.optimizer,
loss=self.task.get_loss_fn(),
metrics=self.task.metrics())#[self.task.get_loss_fn()])
#tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),)
summary_dir = os.path.join(log_dir, 'summaries')
tf.io.gfile.makedirs(log_dir)
self.summary_writer = tf.compat.v2.summary.create_file_writer(os.path.join(summary_dir, 'train'))
tf.compat.v2.summary.experimental.set_step(self.optimizer.iterations)
ckpt_callback = CheckpointCallback(manager=self.manager, ckpt=self.ckpt)
summary_callback = SummaryCallback(summary_writer=self.summary_writer)
self.callbacks = [ckpt_callback, summary_callback]
def get_lr_schedule(self):
if 'crs' in self.train_params.schedule:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = (
tf.keras.experimental.CosineDecayRestarts(
initial_learning_rate,
self.train_params.decay_steps,
t_mul=2.0,
m_mul=0.9,
alpha=0.001,
))
elif self.train_params.optimizer == 'radam':
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
decay_rate=0.96,
warmup_steps=0.0)
else:
initial_learning_rate = self.train_params.learning_rate
lr_schedule = ExponentialDecayWithWarmpUp(
initial_learning_rate=initial_learning_rate,
decay_steps=self.train_params.decay_steps,
decay_rate=0.96,
hold_base_rate_steps=self.train_params.hold_base_rate_steps,
warmup_steps=self.train_params.warmup_steps)
return lr_schedule
def restore(self):
with self.strategy.scope():
self.ckpt.restore(self.manager.latest_checkpoint)
if self.manager.latest_checkpoint:
print("Restored from {}".format(self.manager.latest_checkpoint))
else:
print("Initializing from scratch.")
def train(self):
with self.strategy.scope():
with self.summary_writer.as_default():
print("initial learning rate:", self.model.optimizer.learning_rate(self.model.optimizer.iterations))
self.model.fit(self.task.train_dataset,
epochs=self.train_params.num_train_epochs,
steps_per_epoch=self.task.n_train_batches,
validation_steps=self.task.n_valid_batches,
callbacks=self.callbacks,
validation_data=self.task.valid_dataset,
verbose=2
)
| 3,931 | 39.536082 | 122 | py |
Reflect | Reflect-master/tfds_data/tal_agreement.py | from collections import Counter
import tensorflow as tf
import tensorflow_datasets as tfds
import os
import numpy as np
from tensorflow_datasets.core.features.text import Tokenizer
from tensorflow_datasets.core.features.text.text_encoder import write_lines_to_file, read_lines_from_file
from prep_data.build_dictionary import build_and_save_dic
from util import text_util, constants
from util.text_util import deps_from_tsv, deps_to_tsv
import string
class SVAgreement(tfds.core.GeneratorBasedBuilder):
""" This is the dataset for evaluating the ability of language models to learn syntax.
Paper:
Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies
Tal Linzen, Emmanuel Dupoux, Yoav Goldberg
"""
VERSION = tfds.core.Version('0.1.0')
CLASS_TO_CODE = {'VBZ': 0, 'VBP': 1}
CODE_TO_CLASS = {x: y for y, x in CLASS_TO_CODE.items()}
def __init__(self, **kwargs):
super(SVAgreement, self).__init__(**kwargs)
def _info(self):
self.text_encoder_config = tfds.features.text.TextEncoderConfig(
encoder_cls=tfds.features.text.SubwordTextEncoder,
vocab_size=2 ** 13)
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=("This is the dataset for subject verb agreement "
"to assess the ability of language models to learn syntax"),
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
"sentence": tfds.features.Text(
encoder_config=self.text_encoder_config),
# Here, labels can be of 5 distinct values.
"verb_class": tfds.features.ClassLabel(names=["VBZ", "VBP"]),
"verb_position": tf.int32,
"n_intervening": tf.int32,
"n_diff_intervening": tf.int32,
"distance": tf.int32,
"verb": tfds.features.Text()
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("sentence", "verb_class"),
# Homepage of the dataset for documentation
urls=["https://github.com/TalLinzen/rnn_agreement"],
# Bibtex citation for the dataset
citation=r"""@article{my-awesome-dataset-2020,
author = {Linzen, Tal; Dupoux,Emmanuel; Goldberg, Yoav},"}""",
)
def _vocab_text_gen(self, input_file):
for _, ex in self._generate_examples(input_file):
yield ex["sentence"]
def _split_generators(self, dl_manager):
# Downloads the data and defines the splits
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
extracted_path = dl_manager.download_and_extract(
'http://tallinzen.net/media/rnn_agreement/agr_50_mostcommon_10K.tsv.gz')
def make_splits(extracted_path, data_dir, prop_train=0.1, prop_valid=0.01):
# for reproducibility
np.random.seed(42)
print('| read in the data')
data = deps_from_tsv(extracted_path)
print('| shuffling')
np.random.shuffle(data)
n_train = int(len(data) * prop_train)
n_valid = int(len(data) * prop_valid)
train = data[:n_train]
valid = data[n_train: n_train + n_valid]
test = data[n_train + n_valid:]
print('| splitting')
deps_to_tsv(train, os.path.join(data_dir, "train.tsv"))
deps_to_tsv(valid, os.path.join(data_dir, "valid.tsv"))
deps_to_tsv(test, os.path.join(data_dir, "test.tsv"))
print('| done!')
make_splits(extracted_path,self.data_dir)
# Generate vocabulary from training data if SubwordTextEncoder configured
self.info.features["sentence"].maybe_build_from_corpus(
self._vocab_text_gen(os.path.join(self.data_dir, "train.tsv")))
# Specify the splits
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"input_file_path": os.path.join(self.data_dir, "train.tsv"),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"input_file_path": os.path.join(self.data_dir, "valid.tsv"),
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"input_file_path": os.path.join(self._data_dir, "test.tsv"),
},
),
]
def _generate_examples(self, input_file_path):
""" Yields examples from the dataset
:param input_file_path:
:return: example
"""
# Read the input data out of the source files
data = deps_from_tsv(input_file_path)
# And yield examples as feature dictionaries
example_id = 0
for example in data:
example_id += 1
yield example_id, {
"sentence": example['sentence'],
"verb_class": example['verb_pos'],
"verb_position": int(example['verb_index']) - 1,
"n_intervening": example['n_intervening'],
"n_diff_intervening": example['n_diff_intervening'],
"distance": example['distance'],
"verb": example['verb']
}
def sentence_encoder(self):
return self.info.features["sentence"].encoder
def vocab_size(self):
"""Retrieves the dictionary mapping word indices back to words.
Arguments:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary.
"""
return self.info.features["sentence"].encoder.vocab_size
class WordSvAgreement(SVAgreement):
""" This is the dataset for evaluating the ability of language models to learn syntax.
Paper:
Assessing the Ability of LSTMs to Learn Syntax-Sensitive Dependencies
Tal Linzen, Emmanuel Dupoux, Yoav Goldberg
"""
VERSION = tfds.core.Version('0.1.0')
CLASS_TO_CODE = {'VBZ': 0, 'VBP': 1}
CODE_TO_CLASS = {x: y for y, x in CLASS_TO_CODE.items()}
VOCAB_DIR = 'tal_agreement/vocab'
def __init__(self, data_dir, **kwargs):
self.vocab_dir = os.path.join(data_dir, self.VOCAB_DIR)
super(WordSvAgreement, self).__init__(data_dir=data_dir, **kwargs)
def _info(self):
vocab = list(np.load(self.vocab_dir, allow_pickle=True).item().keys())
print("Vocab len: ", len(vocab))
self.text_encoder_config = tfds.features.text.TextEncoderConfig(
encoder=tfds.features.text.TokenTextEncoder(vocab_list=vocab,
oov_token=constants.unk,
lowercase=False, tokenizer=tfds.features.text.Tokenizer(
alphanum_only=True,
reserved_tokens=[a for a in string.punctuation if a not in ['<', '>']] + constants.all
)))
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=("This is the dataset for subject verb agreement "
"to assess the ability of language models to learn syntax"),
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
"sentence": tfds.features.Text(
encoder_config=self.text_encoder_config),
# Here, labels can be of 5 distinct values.
"verb_class": tfds.features.ClassLabel(names=["VBZ", "VBP"]),
"verb_position": tf.int32,
"n_intervening": tf.int32,
"n_diff_intervening": tf.int32,
"distance": tf.int32,
"verb": tfds.features.Text()
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("sentence", "verb_class"),
# Homepage of the dataset for documentation
homepage="https://github.com/TalLinzen/rnn_agreement",
# Bibtex citation for the dataset
citation=r"""@article{my-awesome-dataset-2020,
author = {Linzen, Tal; Dupoux,Emmanuel; Goldberg, Yoav},"}""",
)
if __name__ == '__main__':
databuilder = WordSvAgreement(data_dir='data')
databuilder.download_and_prepare(download_dir='tmp/',
download_config=tfds.download.DownloadConfig(register_checksums=True))
dataset = databuilder.as_dataset(split="validation", batch_size=1000)
dataset = tfds.as_numpy(dataset)
for batch in dataset:
print("encoded_sentence:", batch['sentence'])
print("decoded_sentence:", databuilder.sentence_encoder().decode(batch['sentence'][0]))
print("verb class:", batch['verb_class'][0])
print("verb position:",batch['verb_position'][0])
print("distance:",batch['distance'][0])
break
print(databuilder.vocab_size())
| 8,680 | 35.020747 | 106 | py |
Reflect | Reflect-master/tasks/sv_agreement.py | import functools
from distill.distill_util import DistillLoss, get_probs, SequenceDistillLoss, get_topk_masked_probs, get_masked_probs
from tasks.task import Task
import tensorflow as tf
from tf2_models import metrics
from tf2_models.metrics import masked_batch_perplexity, masked_perplexity, \
MaskedSequenceLoss, ClassificationLoss
from tfds_data.tal_agreement import WordSvAgreement, SVAgreement
from util import constants
class SvAgreementLM(Task):
def __init__(self, task_params, name='sv_agreement_lm', data_dir='data', builder_cls=SVAgreement):
super(SvAgreementLM, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls,
output_padding=True)
@tf.function
def convert_examples(self, examples):
sentences = examples['sentence']
s_shape = tf.shape(sentences)
#batch_size, length = s_shape[0], s_shape[1]
bos = self.databuilder.sentence_encoder().encode(constants.bos)
eos = self.databuilder.sentence_encoder().encode(constants.eos)
sentence = tf.concat([bos, sentences, eos], axis=-1)
return sentence[:-1],\
sentence[1:]
def get_loss_fn(self):
return MaskedSequenceLoss(padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), num_replicas_in_sync=self.task_params.num_replicas_in_sync)
def vocab_size(self):
return self.databuilder.vocab_size()
def output_size(self):
return self.vocab_size()
def sentence_encoder(self):
return self.databuilder.sentence_encoder()
def get_distill_loss_fn(self, distill_params):
return SequenceDistillLoss(tmp=distill_params.distill_temp, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64))
def get_probs_fn(self):
return get_masked_probs
def metrics(self):
return [MaskedSequenceLoss(padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)),
functools.update_wrapper(functools.partial(masked_batch_perplexity,
padding_symbol=tf.constant(self.output_padding_symbol,
dtype=tf.int64)),
masked_batch_perplexity),
functools.update_wrapper(functools.partial(masked_perplexity,
padding_symbol=tf.constant(self.output_padding_symbol,
dtype=tf.int64)),
masked_perplexity),
metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=1),
metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=2),
metrics.AccuracyTopk(global_batch_size=self.task_params.batch_size, padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64), topk=5)
]
class WordSvAgreementLM(SvAgreementLM):
def __init__(self, task_params, name='word_sv_agreement_lm', data_dir='data', builder_cls=WordSvAgreement):
super(WordSvAgreementLM, self).__init__(task_params=task_params, name=name, data_dir=data_dir, builder_cls=builder_cls)
class WordSvAgreementVP(Task):
def __init__(self, task_params, name='word_sv_agreement_vp', data_dir='data', builder_cls=WordSvAgreement):
super(WordSvAgreementVP, self).__init__(task_params=task_params, name=name, data_dir=data_dir,
builder_cls=builder_cls,
output_padding=False)
@property
def padded_shapes(self):
return ([None],[])
@tf.function
def convert_examples(self, examples):
sentences = examples['sentence']
#bos = self.databuilder.sentence_encoder().encode(constants.bos)
eos = self.databuilder.sentence_encoder().encode(constants.eos)
sentences = tf.concat([sentences, eos], axis=-1)
verb_position = examples['verb_position']
# The verb it self is also masked
mask = tf.cast(tf.sequence_mask(verb_position,maxlen=tf.shape(sentences)[0]), dtype=tf.int64)
max_length = tf.reduce_max(verb_position + 1)
last_index_mask = tf.eye(tf.shape(sentences)[0], dtype=tf.int64)[verb_position]
last_index_mask = last_index_mask * eos[0]
return (sentences * mask + last_index_mask)[:max_length], \
examples['verb_class']
def vocab_size(self):
return self.databuilder.vocab_size()
def output_size(self):
return 2
def get_loss_fn(self):
return ClassificationLoss(global_batch_size=tf.constant(self.task_params.batch_size), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64))
def get_distill_loss_fn(self, distill_params):
return DistillLoss(tmp=tf.constant(distill_params.distill_temp), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64))
def get_probs_fn(self):
return get_probs
def metrics(self):
return [ClassificationLoss(global_batch_size=tf.constant(self.task_params.batch_size), padding_symbol=tf.constant(self.output_padding_symbol, dtype=tf.int64)),
tf.keras.metrics.SparseCategoricalAccuracy()]
def sentence_encoder(self):
return self.databuilder.sentence_encoder() | 5,424 | 44.208333 | 163 | py |
Reflect | Reflect-master/tasks/mnist.py | from distill.distill_util import DistillLoss, get_probs
from tasks.task import Task
import tensorflow as tf
import tensorflow_datasets as tfds
from tf2_models.metrics import ClassificationLoss
from tfds_data.aff_nist import AffNist
class Mnist(Task):
def __init__(self, task_params, name='mnist', data_dir='mnist_data'):
self.databuilder = tfds.builder("mnist")
super(Mnist, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=None)
def vocab_size(self):
return 28*28
def output_size(self):
return 10
def get_loss_fn(self):
return ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64))
def get_distill_loss_fn(self, distill_params):
return DistillLoss(tmp=distill_params.distill_temp)
def get_probs_fn(self):
return get_probs
def metrics(self):
return [ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64)),
tf.keras.metrics.SparseCategoricalAccuracy()]
@property
def padded_shapes(self):
# To make sure we are not using this!
raise NotImplementedError
def convert_examples(self, examples):
return tf.cast(examples['image'], dtype=tf.float32)/255, tf.cast(examples['label'], dtype=tf.int32)
def setup_datasets(self):
self.info = self.databuilder.info
self.n_train_batches = int(
self.info.splits['train'].num_examples / self.task_params.batch_size)
self.n_test_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.n_valid_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.databuilder.download_and_prepare(download_dir=self.data_dir)
self.test_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.test_dataset, tf.data.Dataset)
self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.test_dataset = self.test_dataset.repeat()
self.test_dataset = self.test_dataset.batch(
batch_size=self.task_params.batch_size)
self.test_dataset = self.test_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.train_dataset = self.databuilder.as_dataset(split="train")
assert isinstance(self.train_dataset, tf.data.Dataset)
self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.train_dataset = self.train_dataset.repeat()
self.train_dataset = self.train_dataset.shuffle(1024)
self.train_dataset = self.train_dataset.batch(
batch_size=self.task_params.batch_size)
# self.train_dataset = self.train_dataset.cache()
self.train_dataset = self.train_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.valid_dataset, tf.data.Dataset)
self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.valid_dataset.repeat()
self.valid_dataset = self.valid_dataset.batch(
batch_size=self.task_params.batch_size)
self.valid_dataset = self.valid_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
class AffNistTask(Task):
def __init__(self, task_params, name='aff_nist',data_dir='data', builder_cls=AffNist):
super(AffNistTask, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=builder_cls)
def input_shape(self):
"""
To be used when calling model.build(input_shape)
:return:
#[batch_size, height, width, channels
"""
return [None, 32, 32, 1]
def vocab_size(self):
return 40*40
def output_size(self):
return 10
def get_loss_fn(self):
return ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64))
def get_distill_loss_fn(self, distill_params):
return DistillLoss(tmp=distill_params.distill_temp)
def get_probs_fn(self):
return get_probs
def metrics(self):
return [ClassificationLoss(global_batch_size=self.task_params.batch_size,
padding_symbol=tf.constant(-1, dtype=tf.int64)),
tf.keras.metrics.SparseCategoricalAccuracy()]
@property
def padded_shapes(self):
# To make sure we are not using this!
raise NotImplementedError
def convert_examples(self, examples):
return tf.cast(examples['image'], dtype=tf.float32)/255, tf.cast(examples['label'], dtype=tf.int32)
def setup_datasets(self):
self.info = self.databuilder.info
self.n_train_batches = int(
self.info.splits['train'].num_examples / self.task_params.batch_size)
self.n_test_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.n_valid_batches = int(
self.info.splits['test'].num_examples / self.task_params.batch_size)
self.test_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.test_dataset, tf.data.Dataset)
self.test_dataset = self.test_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.test_dataset = self.test_dataset.repeat()
self.test_dataset = self.test_dataset.batch(
batch_size=self.task_params.batch_size)
self.test_dataset = self.test_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.train_dataset = self.databuilder.as_dataset(split="train")
assert isinstance(self.train_dataset, tf.data.Dataset)
self.train_dataset = self.train_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.train_dataset = self.train_dataset.repeat()
self.train_dataset = self.train_dataset.shuffle(1024)
self.train_dataset = self.train_dataset.batch(
batch_size=self.task_params.batch_size)
# self.train_dataset = self.train_dataset.cache()
self.train_dataset = self.train_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.databuilder.as_dataset(split="test")
assert isinstance(self.valid_dataset, tf.data.Dataset)
self.valid_dataset = self.valid_dataset.map(map_func=lambda x: self.convert_examples(x),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.valid_dataset = self.valid_dataset.repeat()
self.valid_dataset = self.valid_dataset.batch(
batch_size=self.task_params.batch_size)
self.valid_dataset = self.valid_dataset.prefetch(
tf.data.experimental.AUTOTUNE)
class Svhn(Mnist):
def __init__(self, task_params, name='svhn', data_dir='mnist_data'):
self.databuilder = tfds.builder("svhn_cropped")
super(Mnist, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=None)
def vocab_size(self):
return 32 * 32
def input_shape(self):
"""
To be used when calling model.build(input_shape)
:return:
#[batch_size, height, width, channels
"""
return [None, 32, 32, 1]
class Mnist40(Mnist):
def __init__(self, task_params, name='mnist40', data_dir='mnist_data'):
self.databuilder = tfds.builder("mnist")
super(Mnist, self).__init__(task_params=task_params, name=name,
data_dir=data_dir,
builder_cls=None)
def vocab_size(self):
return 40 * 40
def output_size(self):
return 10
def input_shape(self):
"""
To be used when calling model.build(input_shape)
:return:
#[batch_size, height, width, channels
"""
return [None, 32, 32, 1]
def convert_examples(self, examples):
pad_length = int((40 - 28) / 2)
return tf.pad(tf.cast(examples['image'], dtype=tf.float32) / 255,
([pad_length, pad_length], [pad_length, pad_length],
[0, 0])), tf.cast(
examples['label'], dtype=tf.int32)
| 8,663 | 37.678571 | 103 | py |
PyKrige | PyKrige-main/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyKrige documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 1 18:34:53 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import shlex
import sys
import matplotlib
import sphinx_rtd_theme
matplotlib.use("Agg")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath("../../"))
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve
import pykrige
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon", # parameters look better than with numpydoc only
"numpydoc",
"sphinx_gallery.gen_gallery",
"sphinx.ext.linkcode",
"m2r2",
]
autodoc_default_flags = ["members", "inherited-members"]
# autosummaries from source-files
autosummary_generate = True
# dont show __init__ docstring
autoclass_content = "class"
# sort class members
autodoc_member_order = "groupwise"
# autodoc_member_order = 'bysource'
# Notes in boxes
napoleon_use_admonition_for_notes = True
# Attributes like parameters
# napoleon_use_ivar = True
# this is a nice class-doc layout
numpydoc_show_class_members = True
# class members have no separate file, so they are not in a toctree
numpydoc_class_members_toctree = False
# for the covmodels alot of classmembers show up...
# maybe switch off with: :no-inherited-members:
numpydoc_show_inherited_class_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = {
".rst": "restructuredtext",
".txt": "restructuredtext",
".md": "markdown",
}
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": "../../examples",
# path where to save gallery generated examples
"gallery_dirs": "examples",
"filename_pattern": "/.*.py",
}
# General information about the project.
curr_year = datetime.datetime.now().year
project = "PyKrige"
copyright = "2017 - {}, PyKrige developers".format(curr_year)
author = "PyKrige developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pykrige.__version__
# The full version, including alpha/beta/rc tags.
release = pykrige.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "PyKrigedoc"
html_logo = "pics/PyKrige_150.png"
html_favicon = "pics/PyKrige.ico"
# -- Options for LaTeX output ---------------------------------------------
latex_logo = "pics/PyKrige_150.png"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "PyKrige.tex", "PyKrige Documentation", "PyKrige developers", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pykrige", "PyKrige Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"PyKrige",
"PyKrige Documentation",
author,
"PyKrige",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"pykrige",
"https://github.com/GeoStat-Framework/"
"PyKrige/blob/{revision}/"
"{package}/{path}#L{lineno}",
)
| 11,242 | 30.940341 | 88 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/misc/__init__.py | import random
import numpy as np
import torch
def set_seed(seed: int):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
| 149 | 12.636364 | 27 | py |
A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning | A-Quantile-based-Approach-for-Hyperparameter-Transfer-Learning-master/src/prior/mlp_pytorch.py | import tempfile
import uuid
from pathlib import Path
from typing import Optional, Tuple
from sklearn.preprocessing import StandardScaler
from constants import num_gradient_updates
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader, TensorDataset
from prior import Prior
def train(
module,
X_train: np.array,
y_train: np.array,
num_gradient_updates: int = num_gradient_updates,
lr: float = 1e-2,
num_decays: int = 3,
factor_decay: float = 5.0,
batch_size: int = 64,
clip_gradient: Optional[float] = None,
optimizer=None,
early_stopping: bool = True,
):
dataset = TensorDataset(
torch.Tensor(X_train),
torch.Tensor(y_train)
)
# keep 10% of train dataset as validation
num_train = len(dataset) * 9 // 10
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [num_train, len(dataset) - num_train])
# dont use gpu for now
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# module = module.to(device)
def infinite_stream():
while True:
# reshuffle
dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
for data in dataloader:
yield data
train_losses = []
val_rmses = []
first = True
if optimizer is None:
optimizer = torch.optim.Adam(module.parameters(), lr=lr)
checkpoint_freq = 100
it = 0
best_val_rmse = float("inf")
checkpoint_path = Path(tempfile.gettempdir()) / f"best-model-{uuid.uuid4().hex}.pth"
with torch.autograd.set_detect_anomaly(True):
for _ in range(num_decays):
with tqdm(infinite_stream(), total=num_gradient_updates, miniters=200, mininterval=2) as tqdm_iter:
for X_batch, y_batch in tqdm_iter:
optimizer.zero_grad()
# both of shape (batch_size, output_dim,) we could also fit a covariate matrix to account
# the dependency between different criterion
mu, sigma = module(X_batch)
distr = torch.distributions.normal.Normal(loc=mu, scale=sigma)
# y_batch has shape (batch_size, output_dim)
loss = - distr.log_prob(y_batch).mean()
loss.backward()
loss_value = loss.item()
if clip_gradient is not None:
nn.utils.clip_grad_norm_(
module.parameters(),
max_norm=clip_gradient
)
if first:
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(
"\n".join(f"{name}: shape {p.shape}, {p.numel()} parameters" for name, p in
module.named_parameters() if p.requires_grad)
)
print(f"number of parameters: {count_parameters(module)}")
first = False
# print(loss_value)
train_losses.append(loss_value)
optimizer.step()
metrics_dict = {
"train_loss": loss_value,
}
if it % checkpoint_freq == 0:
for X_val, y_val in DataLoader(val_dataset, batch_size=len(val_dataset)):
# compute mean
mu, sigma = module(X_val)
val_rmse = ((mu - y_val) ** 2).mean().sqrt().item()
metrics_dict['val_rmse'] = val_rmse
val_rmses.append(val_rmse)
if early_stopping and val_rmse < best_val_rmse:
# print(f" found better loss {val_rmse} than {best_val_rmse}, checkpointing in {checkpoint_path}")
best_val_rmse = min(best_val_rmse, val_rmse)
torch.save(module.state_dict(), checkpoint_path)
tqdm_iter.set_postfix(metrics_dict)
it += 1
if it % num_gradient_updates == 0:
break
lr /= factor_decay
if early_stopping:
print(f"loading best model found at {checkpoint_path} with val_rmse={val_rmse}")
module.load_state_dict(torch.load(checkpoint_path))
return module, (train_losses, val_rmses)
class GaussianRegression(nn.Module):
def __init__(self, input_dim: int, num_layers: int = 3, num_hidden: int = 40, dropout: float = 0.0):
super(GaussianRegression, self).__init__()
layers = [nn.Linear(input_dim, num_hidden)]
for i in range(num_layers):
layers.append(nn.Linear(num_hidden, num_hidden))
layers.append(nn.ReLU())
layers.append(nn.Dropout(dropout))
self.layers = nn.Sequential(*layers)
self.mu_proj = nn.Linear(num_hidden, 1)
self.sigma_proj = nn.Sequential(nn.Linear(num_hidden, 1), nn.Softplus())
def init(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
# use the modules apply function to recursively apply the initialization
self.layers.apply(init)
def forward(self, x):
x_hidden = self.layers(x)
mu = self.mu_proj(x_hidden)
sigma = self.sigma_proj(x_hidden)
return mu, sigma
class ParametricPrior(Prior):
def __init__(
self,
X_train: np.array,
y_train: np.array,
num_gradient_updates: int = num_gradient_updates,
dropout: float = 0.1,
num_layers: int = 3,
num_hidden: int = 50,
**train_kwargs
):
super(ParametricPrior, self).__init__(
X_train=X_train,
y_train=y_train,
)
n, dim = X_train.shape
self.scaler = StandardScaler()
module = GaussianRegression(input_dim=dim, num_layers=num_layers, num_hidden=num_hidden, dropout=dropout)
self.module, _ = train(
module=module,
X_train=self.scaler.fit_transform(X_train),
y_train=y_train,
num_gradient_updates=num_gradient_updates,
**train_kwargs
)
def predict(self, X: np.array) -> Tuple[np.array, np.array]:
X_test = torch.Tensor(self.scaler.transform(X))
self.module.eval()
mu, sigma = self.module(X_test)
return mu.detach().numpy(), sigma.detach().numpy()
| 6,805 | 36.191257 | 126 | py |