prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: modelscope/normal-depth-diffusion
# Path: ldm/camera_utils.py
def get_camera(num_frames,
elevation=15,
azimuth_start=0,
azimuth_span=360,
blender_coord=True,
camera_distance=1.):
angle_gap = azimuth_span / num_frames
cameras = []
for azimuth in np.arange(azimuth_start, azimuth_span + azimuth_start,
angle_gap):
camera_matrix = create_camera_to_world_matrix(elevation, azimuth,
camera_distance)
if blender_coord:
camera_matrix = convert_opengl_to_blender(camera_matrix)
cameras.append(camera_matrix.flatten())
return torch.tensor(np.stack(cameras, 0)).float()
# Path: ldm/models/diffusion/ddim.py
class DDIMSampler(object):
def __init__(self, model, schedule='linear', **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device('cuda'):
attr = attr.to(torch.device('cuda'))
setattr(self, name, attr)
def make_schedule(self,
ddim_num_steps,
ddim_discretize='uniform',
ddim_eta=0.,
verbose=True):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[
0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model
.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev',
to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod',
to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod',
to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod',
to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod',
to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod',
to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,
verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas',
np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *
(1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps',
sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(
self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs):
if conditioning is not None:
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if cbs != batch_size:
print(
f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'
)
else:
if conditioning.shape[0] != batch_size:
print(
f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'
)
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
samples, intermediates = self.ddim_sampling(
conditioning,
size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask,
x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
**kwargs)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(self,
cond,
shape,
x_T=None,
ddim_use_original_steps=False,
callback=None,
timesteps=None,
quantize_denoised=False,
mask=None,
x0=None,
img_callback=None,
log_every_t=100,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
**kwargs):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(
min(timesteps / self.ddim_timesteps.shape[0], 1)
* self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = reversed(range(
0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[
0]
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b, ), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(
x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
**kwargs)
img, pred_x0 = outs
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
dynamic_threshold=None,
**kwargs):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
model_output = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [
torch.cat(
[unconditional_conditioning[k][i], c[k][i]])
for i in range(len(c[k]))
]
elif isinstance(c[k], torch.Tensor):
c_in[k] = torch.cat(
[unconditional_conditioning[k], c[k]])
else:
assert c[k] == unconditional_conditioning[k]
c_in[k] = c[k]
elif isinstance(c, list):
c_in = list()
assert isinstance(unconditional_conditioning, list)
for i in range(len(c)):
c_in.append(
torch.cat([unconditional_conditioning[i], c[i]]))
else:
c_in = torch.cat([unconditional_conditioning, c])
model_uncond, model_t = self.model.apply_model(x_in, t_in,
c_in).chunk(2)
# model_t = self.model.apply_model(x, t, c, **kwargs)
# model_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)
model_output = model_uncond + unconditional_guidance_scale * (
model_t - model_uncond)
if self.model.parameterization == 'v':
print('using v!')
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
else:
e_t = model_output
if score_corrector is not None:
assert self.model.parameterization == 'eps', 'not implemented'
e_t = score_corrector.modify_score(self.model, e_t, x, t, c,
**corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1),
sqrt_one_minus_alphas[index],
device=device)
# current prediction for x_0
if self.model.parameterization != 'v':
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
else:
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
raise NotImplementedError()
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device,
repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
@torch.no_grad()
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
# fast, but does not allow for exact reconstruction
# t serves as an index to gather the correct alphas
if use_original_steps:
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
else:
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
if noise is None:
noise = torch.randn_like(x0)
return (
extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0
+ extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)
* noise)
@torch.no_grad()
def decode(self,
x_latent,
cond,
t_start,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
use_original_steps=False,
**kwargs):
timesteps = np.arange(self.ddpm_num_timesteps
) if use_original_steps else self.ddim_timesteps
timesteps = timesteps[:t_start]
time_range = np.flip(timesteps)
total_steps = timesteps.shape[0]
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
x_dec = x_latent
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((x_latent.shape[0], ),
step,
device=x_latent.device,
dtype=torch.long)
x_dec, _ = self.p_sample_ddim(
x_dec,
cond,
ts,
index=index,
use_original_steps=use_original_steps,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
**kwargs)
return x_dec
# Path: ldm/util.py
def instantiate_from_config(config):
if not 'target' in config:
print(config)
if config == '__is_first_stage__':
return None
elif config == '__is_unconditional__':
return None
raise KeyError('Expected key `target` to instantiate.')
return get_obj_from_str(config['target'])(**config.get('params', dict()))
# Path: model_zoo.py
def build_model(model_name,
ckpt_path=None,
cache_dir=None,
return_cfg=False,
strict=True):
if not model_name in PRETRAINED_MODELS:
raise RuntimeError(
f'Model name {model_name} is not a pre-trained model. Available models are:\n- ' + \
'\n- '.join(PRETRAINED_MODELS.keys())
)
model_info = PRETRAINED_MODELS[model_name]
# Instiantiate the model
print(f"Loading model from config: {model_info['config']}")
config_file = os.path.join(REPO_DIR, model_info['config'])
assert os.path.exists(config_file)
config = OmegaConf.load(config_file)
# loading from ema_model
model = instantiate_from_config(config.model)
if ckpt_path.endswith('_ema.ckpt'):
ema_ckpt_path = ckpt_path
else:
ema_ckpt_path = os.path.splitext(ckpt_path)[0] + '_ema.ckpt'
# model_ckpt = torch.load(ckpt_path, map_location='cpu')['state_dict']
# model_ckpt = extract_ema(model, model_ckpt)
print(ema_ckpt_path)
if os.path.exists(ema_ckpt_path):
print(f'load from ema_ckpt:{ema_ckpt_path}')
ckpt_path = ema_ckpt_path
model_ckpt = torch.load(ckpt_path, map_location='cpu')['state_dict']
else:
model_ckpt = torch.load(ckpt_path, map_location='cpu')
model_ckpt = extract_ema(model, model_ckpt['state_dict'])
torch.save({'state_dict': model_ckpt}, ema_ckpt_path)
model.load_state_dict(model_ckpt, strict=strict)
if not return_cfg:
return model
else:
return model, config
# Path: scripts/t2i_mv.py
import argparse
import os
import random
import sys
import numpy as np
import torch
import torch.nn.functional as F
from ldm.camera_utils import get_camera
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config
from model_zoo import build_model
from omegaconf import OmegaConf
from PIL import Image
sys.path.append('./')
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def show_img(imgs):
if imgs.shape[-1] == 4:
tokens = []
rgb_tensors = imgs[..., :3]
depth_tensors = imgs[..., 3:]
depth_tensors = np.concatenate(
[depth_tensors, depth_tensors, depth_tensors], -1)
batch_size = rgb_tensors.shape[0]
for rgb, depth in zip(
np.split(rgb_tensors, batch_size // 4, axis=0),
np.split(depth_tensors, batch_size // 4, axis=0)):
tokens.append(rgb)
tokens.append(depth)
imgs = np.concatenate(tokens, axis=0)
ret_imgs = []
for i in range(0, imgs.shape[0] // 4):
cur_imgs = imgs[i * 4:(i + 1) * 4]
cur_img_list = np.split(cur_imgs, 4)
cur_imgs = np.concatenate(cur_img_list, axis=2)[0]
ret_imgs.append(cur_imgs)
imgs = np.concatenate(ret_imgs, axis=0)
return imgs
def t2i(model,
image_size,
prompt,
uc,
sampler,
step=20,
| scale=7.5, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: facebookresearch/DCI
# Path: reproduction/crowdsourcing/annotate/preprocessing/mask_creation_utils.py
TARGET_STEP = 100
SKIP_LOGGING = True
class GroupItem(TypedDict):
class FinalGroup(TypedDict):
def jitter(size: float) -> float:
def bound(v, lo, hi):
def _load_final_group_from_json(json_dict) -> FinalGroup:
def load_final_group_from_json(json_dict) -> FinalGrouping:
def get_grid(
step: int,
top_left: Point,
bottom_right: Point,
noise: Optional[float] = None
) -> List[Point]:
def get_missing_points_greedy(mask: np.ndarray, min_size: int) -> List[Point]:
def get_points_from_canny_greedy(
image: np.ndarray,
distance_threshold: int = 40,
jitter_amount: int = 40,
num_extra: int = 3,
) -> List[Point]:
def predict_all(
predictor: "SamPredictor",
image: np.ndarray,
step: int = TARGET_STEP,
top_left: Optional[Point] = None,
bottom_right: Optional[Point] = None,
containing_mask: Optional[np.ndarray] = None
) -> Dict[Point, List[EfficientMask]]:
def predict_for_points(
predictor: "SamPredictor",
points: List[Point],
) -> Dict[Point, List[EfficientMask]]:
def predict_for_bounded_points(
predictor: "SamPredictor",
image: np.ndarray,
points: List[Point],
mask: EfficientMask,
) -> Dict[Point, List[EfficientMask]]:
def get_canny_masks(
predictor: "SamPredictor",
image: np.ndarray,
distance_threshold: int = 40,
jitter_amount: int = 40
):
def process_best_largest(
results: Dict[Point, List[EfficientMask]],
penalty_gap: float = 0.2,
) -> Dict[Point, Dict[MaskMergeKey, EfficientMask]]:
def get_groups(
processed_results: Dict[Point, Dict[MaskMergeKey, EfficientMask]],
merge_key: MaskMergeKey = 'best',
groups: Optional[GroupDict] = None,
) -> GroupDict:
def get_groups_simple(
sam_results: List[EfficientMask],
) -> FinalGrouping:
def print_groups(groups: FinalGrouping) -> None:
def _get_group_map(curr_g: FinalGrouping) -> Dict[Union[int, str], Any]:
def refine_groups_simple(groups: FinalGrouping, merge_thresh = 0.03) -> FinalGrouping:
def first_iteration_groups(
predictor: "SamPredictor",
processed_results: Dict[Point, Dict[MaskMergeKey, EfficientMask]],
step: int,
merge_key: MaskMergeKey = "largest",
) -> GroupDict:
def get_subgroup_mask_lists(
groups: GroupDict,
base_masks: Dict[Point, List[EfficientMask]],
canny_masks: Dict[Point, List[EfficientMask]],
score_cutoff: float = 0.7,
retain_best: bool = False,
) -> GroupDict:
def compute_subgroups(
group_mask_item: GroupItem,
contained_in_thresh: float = 0.90,
outer_sim_thresh: float = 0.77,
mutual_sim_thresh: float = 0.85,
retain_best: bool = False,
) -> GroupDict:
def add_points_in_mask(
predictor: "SamPredictor",
image: np.ndarray,
item: GroupItem,
score_cutoff: float = 0.7,
num_points = 5,
) -> GroupItem:
def compute_subgroup_recursively(
predictor: "SamPredictor",
image: np.ndarray,
group_mask_item: GroupItem,
score_cutoff: float = 0.7,
contained_in_thresh: float = 0.90,
outer_sim_thresh: float = 0.77,
mutual_sim_thresh: float = 0.85,
retain_best: bool = False,
depth: int = 0,
) -> FinalGroup:
def compute_group_tree(
predictor: "SamPredictor",
image: np.ndarray,
score_cutoff: float = 0.7,
contained_in_thresh: float = 0.9,
outer_sim_thresh: float = 0.8,
mutual_sim_thresh: float = 0.9,
retain_best: bool = False,
) -> FinalGrouping:
# Path: reproduction/crowdsourcing/annotate/preprocessing/efficient_mask.py
class EfficientMask():
"""Class for more efficient mask mask over full numpy ndarrays"""
def __init__(self, mask: np.ndarray, score: float, size: Optional[int] = None):
self.mask = mask
self.score = score
self._size: Optional[int] = size
self._tlbr: Optional[Tuple[Point, Point]] = None
def __repr__(self) -> str:
return f"<EM : {self.get_size()}, {self.get_tlbr()}>"
def _reset_cache(self):
self._tlbr = None
self._size = None
def set_to(self, other: "EfficientMask"):
"""Set this mask's values to that of other"""
self.mask = other.mask
self.score = other.score
self._size = other._size
self._tlbr = other._tlbr
def get_tlbr(self) -> Tuple[Point, Point]:
"""Return the top left and bottom right bounds of this mask"""
if self._tlbr is None:
try:
np_where = np.where(self.mask == True)
left = np.min(np_where[1])
right = np.max(np_where[1]) + 1
top = np.min(np_where[0])
bottom = np.max(np_where[0]) + 1
except ValueError:
top, left, bottom, right = (0, 0, 0, 0)
self._tlbr = ((cast(Ydm, top), cast(Xdm, left)), (cast(Ydm, bottom), cast(Xdm, right)))
return self._tlbr
def get_size(self) -> int:
"""Return the total number of true pixels in this mask"""
if self._size is None:
(top, left), (bottom, right) = self.get_tlbr()
self._size = np.sum(self.mask[top:bottom,left:right]*1)
return self._size
def get_density(self) -> float:
"""Provide rough density with number of pixels and bbox size"""
size = self.get_size()
(t, l), (b, r) = self.get_tlbr()
area = (b-t) * (r-l) + 1
return size / area
def dense_score(self) -> float:
"""Return the score times the density, a heuristic for quality"""
return self.score * math.sqrt(self.get_density())
def _bbox_overlaps(self, other: "EfficientMask") -> bool:
"""Check points of opposite diagonals in each other bbox"""
(t1, l1), (b1, r1) = self.get_tlbr()
(t2, l2), (b2, r2) = other.get_tlbr()
return (
point_in_box(t1, l1, other.get_tlbr()) or
point_in_box(b1, r1, other.get_tlbr()) or
point_in_box(t2, r2, self.get_tlbr()) or
point_in_box(b2, l2, self.get_tlbr())
)
def _get_overlap_submask(self, other: "EfficientMask") -> np.ndarray:
"""Get a classic ndarray of pixels in the overlap between this and other"""
if not self._bbox_overlaps(other):
return np.array([])
(t1, l1), (b1, r1) = self.get_tlbr()
(t2, l2), (b2, r2) = other.get_tlbr()
maxt, maxl = max(t1, t2), max(l1, l2)
minb, minr = min(b1, b2), min(r1, r2)
return (self.mask[maxt:minb,maxl:minr]*1 + other.mask[maxt:minb,maxl:minr]*1 == 2)
def _get_xor_submask(self, other: "EfficientMask") -> np.ndarray:
"""Get a classic ndarray of pixels in the xor between this and other"""
if not self._bbox_overlaps(other):
return np.array([])
(t1, l1), (b1, r1) = self.get_tlbr()
(t2, l2), (b2, r2) = other.get_tlbr()
mint, minl = min(t1, t2), min(l1, l2)
maxb, maxr = max(b1, b2), max(r1, r2)
return (self.mask[mint:maxb,minl:maxr]*1 + other.mask[mint:maxb,minl:maxr]*1 == 1)
def intersect(self, other: "EfficientMask") -> "EfficientMask":
"""Return an efficient mask of the overlap between this and other"""
res = np.full(self.mask.shape, False)
submask = self._get_overlap_submask(other)
if len(submask) != 0:
(t1, l1), (b1, r1) = self.get_tlbr()
(t2, l2), (b2, r2) = other.get_tlbr()
maxt, maxl = max(t1, t2), max(l1, l2)
minb, minr = min(b1, b2), min(r1, r2)
res[maxt:minb,maxl:minr] = submask
return EfficientMask(res, (self.score + other.score)/2)
def mostly_contained_in(self, out_mask: "EfficientMask", thresh: float = 0.95) -> bool:
"""Returns True if thresh of self's pixels are in out_mask"""
size_in = self.get_size() + 1
overlap = mask_size(self._get_overlap_submask(out_mask))
return overlap / size_in > thresh
def overlaps_threshold(self, other: "EfficientMask", thresh: float = 0.50) -> bool:
"""Returns true if over thresh of either mask is contained in the other"""
size_1 = self.get_size() + 1
size_2 = other.get_size() + 1
overlap = mask_size(self._get_overlap_submask(other))
return overlap / size_1 > thresh or overlap / size_2 > thresh
def near_equivalent_to(self, other: "EfficientMask", thresh: float = 0.96) -> bool:
"""Return true if these two masks have prop overlapping pixels > thresh"""
size_1 = self.get_size() + 1
size_2 = other.get_size() + 1
if size_1 / size_2 < thresh or size_2 / size_1 < thresh:
return False
difference = mask_size(self._get_xor_submask(other))
if (difference / size_1) > (1-thresh) or (difference / size_2) > (1-thresh):
return False
return True
def union(self, other: "EfficientMask") -> "EfficientMask":
"""Return a new efficient mask unioning these"""
new_mask = self.mask * 1
(t2, l2), (b2, r2) = other.get_tlbr()
new_mask[t2:b2,l2:r2] += other.mask[t2:b2,l2:r2]*1
return EfficientMask(
mask=cast(np.ndarray, new_mask > 0),
score=(self.score + other.score) / 2, # may be more appropriate as weighted mask sizes
)
def subtract(self, other: "EfficientMask") -> "EfficientMask":
"""Subtract the other mask from this one"""
new_mask = self.mask * 1
(t2, l2), (b2, r2) = other.get_tlbr()
new_mask[t2:b2,l2:r2] -= other.mask[t2:b2,l2:r2]*1
return EfficientMask(
mask=cast(np.ndarray, new_mask == 1),
score=self.score,
)
# Path: reproduction/crowdsourcing/annotate/preprocessing/preprocess_assets_segev.py
import time
import sys
import numpy as np
import os
import base64
import cv2
import json
from segment_anything import sam_model_registry
from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator
from .mask_creation_utils import get_groups_simple, refine_groups_simple, FinalGrouping, FinalGroup, get_points_from_canny_greedy
from .efficient_mask import EfficientMask
from PIL import Image
from io import BytesIO
from typing import TypedDict, List
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the CC-BY-NC license found in the
# LICENSE file in the root directory of this source tree.
LOW = 5000 # Low value into the images array to start at
HIGH = 12000 # High value in images array to go to
SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in
ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__))
SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images")
OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks")
class SAMResult(TypedDict):
segmentation: np.ndarray # the mask itself
bbox: List[float] #XYWH of the mask
area: int # area of the mask
predicted_iou: float # model predicted quality
point_coords: List[List[float]] # coords of this point
stability_score: float # model stability score
crop_box: List[float] # image crop used to generate this mask, XYWH
def fold_group_tree(g: FinalGrouping):
def fold_group(subg: FinalGroup):
outer_mask = subg['outer_mask']
mask_img = Image.fromarray(np.uint8(outer_mask.mask * 255)) # type: ignore
mask_img = mask_img.convert('1')
maskbuf = BytesIO()
mask_img.save(maskbuf, format='png', bits=1, optimize=True)
mask_bytes = maskbuf.getvalue()
as_base64 = base64.b64encode(mask_bytes)
as_str = as_base64.decode('utf-8')
(t, l), (b, r) = subg['outer_mask'].get_tlbr()
return {
'outer_mask': as_str,
'area': int(outer_mask.get_size()),
'bounds': ((int(t), int(l)), (int(b), int(r))),
'subgroups': {
idx: fold_group(subsubg) for (idx, subsubg) in subg['subgroups'].items()
}
}
return {
idx: fold_group(subg) for (idx, subg) in g.items()
}
def group_outputs(outputs: List[SAMResult]) -> FinalGrouping:
as_efficient_masks: List[EfficientMask] = [
EfficientMask(
res['segmentation'],
res['predicted_iou'] * (res['stability_score'] ** 2),
size=res['area'],
) for res in outputs
]
in_order = sorted(as_efficient_masks, key=lambda x: x.get_size(), reverse=True)
return get_groups_simple(in_order)
def main():
all_images = os.listdir(SOURCE_DIR)
target_images = all_images[LOW:HIGH]
sam_checkpoint = SETEV_MODEL_ROOT
model_type = "vit_h"
device = "cuda"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
generator = SamAutomaticMaskGenerator(
sam,
points_per_side = 50,
points_per_batch = 64,
pred_iou_thresh = 0.8,
stability_score_thresh = 0.94,
stability_score_offset = 1.0,
box_nms_thresh = 0.97,
min_mask_region_area = 1000,
output_mode = "binary_mask",
)
| first_start = time.time() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daswer123/xtts-webui
# Path: scripts/resemble_enhance/common.py
class Normalizer(nn.Module):
def __init__(self, momentum=0.01, eps=1e-9):
super().__init__()
self.momentum = momentum
self.eps = eps
self.running_mean_unsafe: Tensor
self.running_var_unsafe: Tensor
self.register_buffer("running_mean_unsafe", torch.full([], torch.nan))
self.register_buffer("running_var_unsafe", torch.full([], torch.nan))
@property
def started(self):
return not torch.isnan(self.running_mean_unsafe)
@property
def running_mean(self):
if not self.started:
return torch.zeros_like(self.running_mean_unsafe)
return self.running_mean_unsafe
@property
def running_std(self):
if not self.started:
return torch.ones_like(self.running_var_unsafe)
return (self.running_var_unsafe + self.eps).sqrt()
@torch.no_grad()
def _ema(self, a: Tensor, x: Tensor):
return (1 - self.momentum) * a + self.momentum * x
def update_(self, x):
if not self.started:
self.running_mean_unsafe = x.mean()
self.running_var_unsafe = x.var()
else:
self.running_mean_unsafe = self._ema(self.running_mean_unsafe, x.mean())
self.running_var_unsafe = self._ema(self.running_var_unsafe, (x - self.running_mean).pow(2).mean())
def forward(self, x: Tensor, update=True):
if self.training and update:
self.update_(x)
self.stats = dict(mean=self.running_mean.item(), std=self.running_std.item())
x = (x - self.running_mean) / self.running_std
return x
def inverse(self, x: Tensor):
return x * self.running_std + self.running_mean
# Path: scripts/resemble_enhance/denoiser/inference.py
@cache
def load_denoiser(run_dir, device):
if run_dir is None:
return Denoiser(HParams())
hp = HParams.load(run_dir)
denoiser = Denoiser(hp)
path = run_dir / "ds" / "G" / "default" / "mp_rank_00_model_states.pt"
state_dict = torch.load(path, map_location="cpu")["module"]
denoiser.load_state_dict(state_dict)
denoiser.eval()
denoiser.to(device)
return denoiser
# Path: scripts/resemble_enhance/melspec.py
class MelSpectrogram(nn.Module):
def __init__(self, hp: HParams):
"""
Torch implementation of Resemble's mel extraction.
Note that the values are NOT identical to librosa's implementation
due to floating point precisions.
"""
super().__init__()
self.hp = hp
self.melspec = TorchMelSpectrogram(
hp.wav_rate,
n_fft=hp.n_fft,
win_length=hp.win_size,
hop_length=hp.hop_size,
f_min=0,
f_max=hp.wav_rate // 2,
n_mels=hp.num_mels,
power=1,
normalized=False,
# NOTE: Folowing librosa's default.
pad_mode="constant",
norm="slaney",
mel_scale="slaney",
)
self.register_buffer("stft_magnitude_min", torch.FloatTensor([hp.stft_magnitude_min]))
self.min_level_db = 20 * np.log10(hp.stft_magnitude_min)
self.preemphasis = hp.preemphasis
self.hop_size = hp.hop_size
def forward(self, wav, pad=True):
"""
Args:
wav: [B, T]
"""
device = wav.device
if wav.is_mps:
wav = wav.cpu()
self.to(wav.device)
if self.preemphasis > 0:
wav = torch.nn.functional.pad(wav, [1, 0], value=0)
wav = wav[..., 1:] - self.preemphasis * wav[..., :-1]
mel = self.melspec(wav)
mel = self._amp_to_db(mel)
mel_normed = self._normalize(mel)
assert not pad or mel_normed.shape[-1] == 1 + wav.shape[-1] // self.hop_size # Sanity check
mel_normed = mel_normed.to(device)
return mel_normed # (M, T)
def _normalize(self, s, headroom_db=15):
return (s - self.min_level_db) / (-self.min_level_db + headroom_db)
def _amp_to_db(self, x):
return x.clamp_min(self.hp.stft_magnitude_min).log10() * 20
# Path: scripts/resemble_enhance/utils/distributed.py
def get_free_port():
def fix_unset_envs():
def init_distributed():
def local_rank():
def global_rank():
def is_local_leader():
def is_global_leader():
def leader_only(leader_only_type, fn: Callable | None = None, boardcast_return=False) -> Callable:
def wrapper(fn):
def wrapped(*args, **kwargs):
# Path: scripts/resemble_enhance/utils/train_loop.py
class TrainLoop:
_ = KW_ONLY
run_dir: Path
train_dl: DataLoader
load_G: EngineLoader
feed_G: GenFeeder
load_D: EngineLoader | None = None
feed_D: DisFeeder | None = None
update_every: int = 5_000
eval_every: int = 5_000
backup_steps: tuple[int, ...] = (5_000, 100_000, 500_000)
device: str = "cuda"
eval_fn: EvalFn | None = None
gan_training_start_step: int | None = None
@property
def global_step(self):
return self.engine_G.global_step # How many steps have been completed?
@property
def eval_dir(self) -> Path | None:
if self.eval_every != 0:
eval_dir = self.run_dir.joinpath("eval")
eval_dir.mkdir(exist_ok=True)
else:
eval_dir = None
return eval_dir
@property
def viz_dir(self) -> Path:
return Path(self.run_dir / "viz")
def make_current_step_viz_path(self, name: str, suffix: str) -> Path:
path = (self.viz_dir / name / f"{self.global_step}").with_suffix(suffix)
path.parent.mkdir(exist_ok=True, parents=True)
return path
def __post_init__(self):
engine_G = self.load_G(self.run_dir)
if self.load_D is None:
engine_D = None
else:
engine_D = self.load_D(self.run_dir)
self.engine_G = engine_G
self.engine_D = engine_D
@property
def model_G(self):
return self.engine_G.module
@property
def model_D(self):
if self.engine_D is None:
return None
return self.engine_D.module
def save_checkpoint(self, tag="default"):
engine_G = self.engine_G
engine_D = self.engine_D
engine_G.save_checkpoint(tag=tag)
if engine_D is not None:
engine_D.save_checkpoint(tag=tag)
def run(self, max_steps: int = -1):
self.set_running_loop_(self)
train_dl = self.train_dl
update_every = self.update_every
eval_every = self.eval_every
device = self.device
eval_fn = self.eval_fn
engine_G = self.engine_G
engine_D = self.engine_D
eval_dir = self.eval_dir
init_step = self.global_step
logger.info(f"\nTraining from step {init_step} to step {max_steps}")
warmup_steps = {init_step + x for x in [50, 100, 500]}
engine_G.train()
if engine_D is not None:
engine_D.train()
gan_start_step = self.gan_training_start_step
while True:
loss_G = loss_D = 0
for batch in train_dl:
torch.cuda.synchronize()
start_time = time.time()
# What's the step after this batch?
step = self.global_step + 1
# Send data to the GPU
batch = tree_map(lambda x: x.to(device) if isinstance(x, Tensor) else x, batch)
stats = {"step": step}
# Include step == 1 for sanity check
gan_started = gan_start_step is not None and (step >= gan_start_step or step == 1)
gan_started &= engine_D is not None
# Generator step
fake, losses = self.feed_G(engine=engine_G, batch=batch)
# Train generator
if gan_started:
assert engine_D is not None
assert self.feed_D is not None
# Freeze the discriminator to let gradient go through fake
engine_D.freeze_()
losses |= self.feed_D(engine=engine_D, batch=None, fake=fake)
loss_G = sum(losses.values())
stats |= {f"G/{k}": v.item() for k, v in losses.items()}
stats |= {f"G/{k}": v for k, v in engine_G.gather_attribute("stats").items()}
del losses
assert isinstance(loss_G, Tensor)
stats["G/loss"] = loss_G.item()
stats["G/lr"] = engine_G.get_lr()[0]
stats["G/grad_norm"] = engine_G.get_grad_norm() or 0
if loss_G.isnan().item():
logger.error("Generator loss is NaN, skipping step")
continue
engine_G.backward(loss_G)
engine_G.step()
# Discriminator step
if gan_started:
assert engine_D is not None
assert self.feed_D is not None
engine_D.unfreeze_()
losses = self.feed_D(engine=engine_D, batch=batch, fake=fake.detach())
del fake
assert isinstance(losses, dict)
loss_D = sum(losses.values())
assert isinstance(loss_D, Tensor)
stats |= {f"D/{k}": v.item() for k, v in losses.items()}
stats |= {f"D/{k}": v for k, v in engine_D.gather_attribute("stats").items()}
del losses
if loss_D.isnan().item():
logger.error("Discriminator loss is NaN, skipping step")
continue
engine_D.backward(loss_D)
engine_D.step()
stats["D/loss"] = loss_D.item()
stats["D/lr"] = engine_D.get_lr()[0]
stats["D/grad_norm"] = engine_D.get_grad_norm() or 0
torch.cuda.synchronize()
stats["elapsed_time"] = time.time() - start_time
stats = tree_map(lambda x: float(f"{x:.4g}") if isinstance(x, float) else x, stats)
logger.info(json.dumps(stats, indent=0))
command = non_blocking_input()
evaling = step % eval_every == 0 or step in warmup_steps or command.strip() == "eval"
if eval_fn is not None and is_global_leader() and eval_dir is not None and evaling:
engine_G.eval()
eval_fn(engine_G, eval_dir=eval_dir)
engine_G.train()
if command.strip() == "quit":
logger.info("Training paused")
self.save_checkpoint("default")
return
if command.strip() == "backup" or step in self.backup_steps:
logger.info("Backing up")
self.save_checkpoint(tag=f"backup_{step:07d}")
if step % update_every == 0 or command.strip() == "save":
self.save_checkpoint(tag="default")
if step == max_steps:
logger.info("Training finished")
self.save_checkpoint(tag="default")
return
@classmethod
def set_running_loop_(cls, loop):
assert isinstance(loop, cls), f"Expected {cls}, got {type(loop)}"
cls._running_loop: cls = loop
@classmethod
def get_running_loop(cls) -> "TrainLoop | None":
if hasattr(cls, "_running_loop"):
assert isinstance(cls._running_loop, cls)
return cls._running_loop
return None
@classmethod
def get_running_loop_global_step(cls) -> int | None:
if loop := cls.get_running_loop():
return loop.global_step
return None
@classmethod
def get_running_loop_viz_path(cls, name: str, suffix: str) -> Path | None:
if loop := cls.get_running_loop():
return loop.make_current_step_viz_path(name, suffix)
return None
# Path: scripts/resemble_enhance/enhancer/hparams.py
class HParams(HParamsBase):
cfm_solver_method: str = "midpoint"
cfm_solver_nfe: int = 64
cfm_time_mapping_divisor: int = 4
univnet_nc: int = 96
lcfm_latent_dim: int = 64
lcfm_training_mode: str = "ae"
lcfm_z_scale: float = 5
vocoder_extra_dim: int = 32
gan_training_start_step: int | None = 5_000
enhancer_stage1_run_dir: Path | None = None
denoiser_run_dir: Path | None = None
# Path: scripts/resemble_enhance/enhancer/lcfm/irmae.py
class IRMAE(nn.Module):
def __init__(
self,
input_dim,
output_dim,
latent_dim,
hidden_dim=1024,
num_irms=4,
):
"""
Args:
input_dim: input dimension
output_dim: output dimension
latent_dim: latent dimension
hidden_dim: hidden layer dimension
num_irm_matrics: number of implicit rank minimization matrices
norm: normalization layer
"""
self.input_dim = input_dim
super().__init__()
self.encoder = nn.Sequential(
nn.Conv1d(input_dim, hidden_dim, 3, padding="same"),
*[ResBlock(hidden_dim) for _ in range(4)],
# Try to obtain compact representation (https://proceedings.neurips.cc/paper/2020/file/a9078e8653368c9c291ae2f8b74012e7-Paper.pdf)
*[nn.Conv1d(hidden_dim if i == 0 else latent_dim, latent_dim, 1, bias=False) for i in range(num_irms)],
nn.Tanh(),
)
self.decoder = nn.Sequential(
nn.Conv1d(latent_dim, hidden_dim, 3, padding="same"),
*[ResBlock(hidden_dim) for _ in range(4)],
nn.Conv1d(hidden_dim, output_dim, 1),
)
self.head = nn.Sequential(
nn.Conv1d(output_dim, hidden_dim, 3, padding="same"),
nn.GELU(),
nn.Conv1d(hidden_dim, input_dim, 1),
)
self.estimator = Normalizer()
def encode(self, x):
"""
Args:
x: (b c t) tensor
"""
z = self.encoder(x) # (b c t)
_ = self.estimator(z) # Estimate the glboal mean and std of z
self.stats = {}
self.stats["z_mean"] = z.mean().item()
self.stats["z_std"] = z.std().item()
self.stats["z_abs_68"] = z.abs().quantile(0.6827).item()
self.stats["z_abs_95"] = z.abs().quantile(0.9545).item()
self.stats["z_abs_99"] = z.abs().quantile(0.9973).item()
return z
def decode(self, z):
"""
Args:
z: (b c t) tensor
"""
return self.decoder(z)
def forward(self, x, skip_decoding=False):
"""
Args:
x: (b c t) tensor
skip_decoding: if True, skip the decoding step
"""
z = self.encode(x) # q(z|x)
if skip_decoding:
# This speeds up the training in cfm only mode
decoded = None
else:
decoded = self.decode(z) # p(x|z)
predicted = self.head(decoded)
self.losses = dict(mse=F.mse_loss(predicted, x))
return IRMAEOutput(latent=z, decoded=decoded)
# Path: scripts/resemble_enhance/enhancer/lcfm/lcfm.py
CFM = "cfm"
# Path: scripts/resemble_enhance/enhancer/lcfm/lcfm.py
class LCFM(nn.Module):
class Mode(Enum):
AE = "ae"
CFM = "cfm"
def __init__(self, ae: IRMAE, cfm: CFM, z_scale: float = 1.0):
super().__init__()
self.ae = ae
self.cfm = cfm
self.z_scale = z_scale
self._mode = None
self._eval_tau = 0.5
@property
def mode(self):
return self._mode
def set_mode_(self, mode):
mode = self.Mode(mode)
self._mode = mode
if mode == mode.AE:
freeze_(self.cfm)
logger.info("Freeze cfm")
elif mode == mode.CFM:
freeze_(self.ae)
logger.info("Freeze ae (encoder and decoder)")
else:
raise ValueError(f"Unknown training mode: {mode}")
def get_running_train_loop(self):
try:
# Lazy import
from ...utils.train_loop import TrainLoop
return TrainLoop.get_running_loop()
except ImportError:
return None
@property
def global_step(self):
loop = self.get_running_train_loop()
if loop is None:
return None
return loop.global_step
@torch.no_grad()
def _visualize(self, x, y, y_):
loop = self.get_running_train_loop()
if loop is None:
return
plt.subplot(221)
plt.imshow(y[0].detach().cpu().numpy(), aspect="auto", origin="lower", interpolation="none")
plt.title("GT")
plt.subplot(222)
y_ = y_[:, : y.shape[1]]
plt.imshow(y_[0].detach().cpu().numpy(), aspect="auto", origin="lower", interpolation="none")
plt.title("Posterior")
plt.subplot(223)
z_ = self.cfm(x)
y__ = self.ae.decode(z_)
y__ = y__[:, : y.shape[1]]
plt.imshow(y__[0].detach().cpu().numpy(), aspect="auto", origin="lower", interpolation="none")
plt.title("C-Prior")
del y__
plt.subplot(224)
z_ = torch.randn_like(z_)
y__ = self.ae.decode(z_)
y__ = y__[:, : y.shape[1]]
plt.imshow(y__[0].detach().cpu().numpy(), aspect="auto", origin="lower", interpolation="none")
plt.title("Prior")
del z_, y__
path = loop.make_current_step_viz_path("recon", ".png")
path.parent.mkdir(exist_ok=True, parents=True)
plt.tight_layout()
plt.savefig(path, dpi=500)
plt.close()
def _scale(self, z: Tensor):
return z * self.z_scale
def _unscale(self, z: Tensor):
return z / self.z_scale
def eval_tau_(self, tau):
self._eval_tau = tau
def forward(self, x, y: Tensor | None = None, ψ0: Tensor | None = None):
"""
Args:
x: (b d t), condition mel
y: (b d t), target mel
ψ0: (b d t), starting mel
"""
if self.mode == self.Mode.CFM:
self.ae.eval() # Always set to eval when training cfm
if ψ0 is not None:
ψ0 = self._scale(self.ae.encode(ψ0))
if self.training:
tau = torch.rand_like(ψ0[:, :1, :1])
else:
tau = self._eval_tau
ψ0 = tau * torch.randn_like(ψ0) + (1 - tau) * ψ0
if y is None:
if self.mode == self.Mode.AE:
with torch.no_grad():
training = self.ae.training
self.ae.eval()
z = self.ae.encode(x)
self.ae.train(training)
else:
z = self._unscale(self.cfm(x, ψ0=ψ0))
h = self.ae.decode(z)
else:
ae_output: IRMAEOutput = self.ae(y, skip_decoding=self.mode == self.Mode.CFM)
if self.mode == self.Mode.CFM:
_ = self.cfm(x, self._scale(ae_output.latent.detach()), ψ0=ψ0)
h = ae_output.decoded
if h is not None and self.global_step is not None and self.global_step % 100 == 0:
self._visualize(x[:1], y[:1], h[:1])
return h
# Path: scripts/resemble_enhance/enhancer/univnet/univnet.py
class UnivNet(nn.Module):
@property
def d_noise(self):
return 128
@property
def strides(self):
return [7, 5, 4, 3]
@property
def dilations(self):
return [1, 3, 9, 27]
@property
def nc(self):
return self.hp.univnet_nc
@property
def scale_factor(self) -> int:
return self.hp.hop_size
def __init__(self, hp: HParams, d_input):
super().__init__()
self.d_input = d_input
self.hp = hp
self.blocks = nn.ModuleList(
[
LVCBlock(
self.nc,
d_input,
stride=stride,
dilations=self.dilations,
cond_hop_length=hop_length,
kpnet_conv_size=3,
)
for stride, hop_length in zip(self.strides, np.cumprod(self.strides))
]
)
self.conv_pre = weight_norm(nn.Conv1d(self.d_noise, self.nc, 7, padding=3, padding_mode="reflect"))
self.conv_post = nn.Sequential(
nn.LeakyReLU(0.2),
weight_norm(nn.Conv1d(self.nc, 1, 7, padding=3, padding_mode="reflect")),
nn.Tanh(),
)
self.mrstft = MRSTFTLoss(hp)
@property
def eps(self):
return 1e-5
def forward(self, x: Tensor, y: Tensor | None = None, npad=10):
"""
Args:
x: (b c t), acoustic features
y: (b t), waveform
Returns:
z: (b t), waveform
"""
assert x.ndim == 3, "x must be 3D tensor"
assert y is None or y.ndim == 2, "y must be 2D tensor"
assert x.shape[1] == self.d_input, f"x.shape[1] must be {self.d_input}, but got {x.shape}"
assert npad >= 0, "npad must be positive or zero"
x = F.pad(x, (0, npad), "constant", 0)
z = torch.randn(x.shape[0], self.d_noise, x.shape[2]).to(x)
z = self.conv_pre(z) # (b c t)
for block in self.blocks:
z = block(z, x) # (b c t)
z = self.conv_post(z) # (b 1 t)
z = z[..., : -self.scale_factor * npad]
z = z.squeeze(1) # (b t)
if y is not None:
self.losses = self.mrstft(z, y)
return z
# Path: scripts/resemble_enhance/enhancer/enhancer.py
import logging
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch import Tensor, nn
from torch.distributions import Beta
from ..common import Normalizer
from ..denoiser.inference import load_denoiser
from ..melspec import MelSpectrogram
from ..utils.distributed import global_leader_only
from ..utils.train_loop import TrainLoop
from .hparams import HParams
from .lcfm import CFM, IRMAE, LCFM
from .univnet import UnivNet
logger = logging.getLogger(__name__)
def _maybe(fn):
def _fn(*args):
if args[0] is None:
return None
return fn(*args)
return _fn
def _normalize_wav(x: Tensor):
return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7)
class Enhancer(nn.Module):
def __init__(self, hp: HParams):
super().__init__()
self.hp = hp
n_mels = self.hp.num_mels
vocoder_input_dim = n_mels + self.hp.vocoder_extra_dim
latent_dim = self.hp.lcfm_latent_dim
self.lcfm = LCFM(
IRMAE(
| input_dim=n_mels, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: FrozenBurning/PrimDiffusion
# Path: dva/ray_marcher.py
class RayMarcher(nn.Module):
def __init__(
self,
image_height,
image_width,
volradius,
fadescale=8.0,
fadeexp=8.0,
dt=1.0,
ray_subsample_factor=1,
accum=2,
termthresh=0.99,
blocksize=None,
with_t_img=True,
chlast=False,
assets=None,
):
super().__init__()
# TODO: add config?
self.image_height = image_height
self.image_width = image_width
self.volradius = volradius
self.dt = dt
self.fadescale = fadescale
self.fadeexp = fadeexp
# NOTE: this seems to not work for other configs?
if blocksize is None:
blocksize = (8, 16)
self.blocksize = blocksize
self.with_t_img = with_t_img
self.chlast = chlast
self.accum = accum
self.termthresh = termthresh
base_pixel_coords = th.stack(
th.meshgrid(
th.arange(self.image_height, dtype=th.float32),
th.arange(self.image_width, dtype=th.float32),
)[::-1],
dim=-1,
)
self.register_buffer("base_pixel_coords", base_pixel_coords, persistent=False)
self.fixed_bvh_cache = {-1: (th.empty(0), th.empty(0), th.empty(0))}
self.ray_subsample_factor = ray_subsample_factor
def _set_pix_coords(self):
dev = self.base_pixel_coords.device
self.base_pixel_coords = th.stack(
th.meshgrid(
th.arange(self.image_height, dtype=th.float32, device=dev),
th.arange(self.image_width, dtype=th.float32, device=dev),
)[::-1],
dim=-1,
)
def resize(self, h: int, w: int):
self.image_height = h
self.image_width = w
self._set_pix_coords()
def forward(
self,
prim_rgba: th.Tensor,
prim_pos: th.Tensor,
prim_rot: th.Tensor,
prim_scale: th.Tensor,
K: th.Tensor,
RT: th.Tensor,
ray_subsample_factor: Optional[int] = None,
):
"""
Args:
prim_rgba: primitive payload [B, K, 4, S, S, S],
K - # of primitives, S - primitive size
prim_pos: locations [B, K, 3]
prim_rot: rotations [B, K, 3, 3]
prim_scale: scales [B, K, 3]
K: intrinsics [B, 3, 3]
RT: extrinsics [B, 3, 4]
Returns:
a dict of tensors
"""
# TODO: maybe we can re-use mvpraymarcher?
B = prim_rgba.shape[0]
device = prim_rgba.device
# TODO: this should return focal 2x2?
camera = convert_camera_parameters(RT, K)
camera = {k: v.contiguous() for k, v in camera.items()}
dt = self.dt / self.volradius
if ray_subsample_factor is None:
ray_subsample_factor = self.ray_subsample_factor
if ray_subsample_factor > 1 and self.training:
pixel_coords = subsample_pixel_coords(
self.base_pixel_coords, int(B), ray_subsample_factor
)
elif ray_subsample_factor > 1:
pixel_coords = resize_pixel_coords(
self.base_pixel_coords,
int(B),
ray_subsample_factor,
)
else:
pixel_coords = (
self.base_pixel_coords[np.newaxis].expand(B, -1, -1, -1).contiguous()
)
prim_pos = prim_pos / self.volradius
focal = th.diagonal(camera["focal"], dim1=1, dim2=2).contiguous()
# TODO: port this?
raypos, raydir, tminmax = compute_raydirs(
viewpos=camera["campos"],
viewrot=camera["camrot"],
focal=focal,
princpt=camera["princpt"],
pixelcoords=pixel_coords,
volradius=self.volradius,
)
rgba = mvpraymarch(
raypos,
raydir,
stepsize=dt,
tminmax=tminmax,
algo=0,
template=prim_rgba.permute(0, 1, 3, 4, 5, 2).contiguous(),
warp=None,
termthresh=self.termthresh,
primtransf=(prim_pos, prim_rot, prim_scale),
fadescale=self.fadescale,
fadeexp=self.fadeexp,
usebvh="fixedorder",
chlast=True,
)
rgba = rgba.permute(0, 3, 1, 2)
preds = {
"rgba_image": rgba,
"pixel_coords": pixel_coords,
}
return preds
# Path: dva/ray_marcher.py
def generate_colored_boxes(template, prim_rot, alpha=10000.0, seed=123456):
B = template.shape[0]
output = template.clone()
device = template.device
lightdir = -3 * th.ones([B, 3], dtype=th.float32, device=device)
lightdir = lightdir / th.norm(lightdir, p=2, dim=1, keepdim=True)
zz, yy, xx = th.meshgrid(
th.linspace(-1.0, 1.0, template.size(-1), device=device),
th.linspace(-1.0, 1.0, template.size(-1), device=device),
th.linspace(-1.0, 1.0, template.size(-1), device=device),
)
primnormalx = th.where(
(th.abs(xx) >= th.abs(yy)) & (th.abs(xx) >= th.abs(zz)),
th.sign(xx) * th.ones_like(xx),
th.zeros_like(xx),
)
primnormaly = th.where(
(th.abs(yy) >= th.abs(xx)) & (th.abs(yy) >= th.abs(zz)),
th.sign(yy) * th.ones_like(xx),
th.zeros_like(xx),
)
primnormalz = th.where(
(th.abs(zz) >= th.abs(xx)) & (th.abs(zz) >= th.abs(yy)),
th.sign(zz) * th.ones_like(xx),
th.zeros_like(xx),
)
primnormal = th.stack([primnormalx, -primnormaly, -primnormalz], dim=-1)
primnormal = primnormal / th.sqrt(th.sum(primnormal**2, dim=-1, keepdim=True))
output[:, :, 3, :, :, :] = alpha
np.random.seed(seed)
for i in range(template.size(1)):
# generating a random color
output[:, i, 0, :, :, :] = np.random.rand() * 255.0
output[:, i, 1, :, :, :] = np.random.rand() * 255.0
output[:, i, 2, :, :, :] = np.random.rand() * 255.0
# get light direction in local coordinate system?
lightdir0 = lightdir
mult = th.sum(
lightdir0[:, None, None, None, :] * primnormal[np.newaxis], dim=-1
)[:, np.newaxis, :, :, :].clamp(min=0.2)
output[:, i, :3, :, :, :] *= 1.4 * mult
return output
# Path: primdiffusion/dataset/renderpeople_crossid_dataset.py
class RenderPeopleSViewDataset(Dataset):
def __init__(
self,
root_dir,
subject_ids,
smpl_poses,
image,
image_mask,
image_part_mask,
cam_path,
frame_list=None,
cameras=None,
cond_cameras=None,
sample_cameras=True,
camera_id=None,
image_height=1024,
image_width=1024,
is_train=True,
**kwargs,
):
super().__init__()
# subject ids is a text file contains list of subject ids
self.image_height = image_height
self.image_width = image_width
self.ref_frame = 0
with open(subject_ids, 'r') as f:
human_list = f.read().splitlines()
self.subject_ids = human_list
self.root_dir = root_dir
if frame_list is None:
n_frames = len(os.listdir(os.path.join(self.root_dir, self.subject_ids[0], 'img', 'camera0000')))
self.frame_list = [str(fid) for fid in range(n_frames)]
self.image_path = image
self.image_mask_path = image_mask
self.image_part_mask_path = image_part_mask
self.is_train = is_train
all_cameras = self.load_all_cameras(cam_path)
# TODO: inference logics
if not self.is_train:
assert not sample_cameras
assert camera_id is not None
self.cameras = all_cameras
self.cond_cameras = cond_cameras
self.sample_cameras = sample_cameras
self.camera_id = camera_id
self.all_smpl = self.load_all_smpl(smpl_poses)
def load_all_smpl(self, smpl_poses):
all_smpl = {}
for people_id in self.subject_ids:
current_smpl_path = smpl_poses.format(people_id=people_id)
smpl_param = dict(np.load(current_smpl_path, allow_pickle=True))['smpl'].item()
poses = np.zeros((smpl_param['body_pose'].shape[0], 72)).astype(np.float32)
poses[:, :3] = np.array(smpl_param['global_orient']).astype(np.float32)
poses[:, 3:] = np.array(smpl_param['body_pose']).astype(np.float32)
shapes = np.array(smpl_param['betas']).astype(np.float32)
shapes = np.repeat(shapes[:], poses.shape[0], axis=0)
Rh = smpl_param['global_orient'].astype(np.float32)
Th = smpl_param['transl'].astype(np.float32)
current_smpl = {
'shapes': shapes,
'Rh': Rh * 0, #FIXME: hack
'Th': Th,
'poses': poses,
}
all_smpl[people_id] = current_smpl
return all_smpl
def load_all_cameras(self, camera_path):
# input path to camera.json under synbody sequence
# all_cameras is dict of dict
all_cameras = {}
for people_id in self.subject_ids:
current_camera_path = camera_path.format(people_id=people_id)
current_camera = {}
with open(current_camera_path) as f:
camera = json.load(f)
for view_index in range(len(camera.keys())):
K, R, T, _ = get_KRTD(camera, view_index)
current_camera['camera{:04d}'.format(view_index)] = {
"Rt": np.concatenate([R, T[..., None]], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
for c in current_camera.values():
c["cam_pos"] = -np.dot(c["Rt"][:3, :3].T, c["Rt"][:3, 3])
c["Rt"][:, -1] *= 1000.0
all_cameras[people_id] = current_camera
return all_cameras
def __len__(self):
return len(self.subject_ids) * 200
def __getitem__(self, idx):
# idx is subject_id wise index
people_id = self.subject_ids[idx % len(self.subject_ids)]
# random sample frames
frame = (
random.choice(self.frame_list)
)
# random sample cameras
camera_id = (
random.choice(list(self.cameras[people_id].keys()))
if self.sample_cameras
else self.camera_id
)
fmts = dict(people_id=people_id, frame=int(frame), camera=camera_id)
sample = {"index": idx, **fmts}
sample.update(load_smpl_params(self.all_smpl[people_id], int(frame)))
ref_frame_smpl = {'ref_' + k: v for k, v in load_smpl_params(self.all_smpl[people_id], int(self.ref_frame)).items()}
sample.update(ref_frame_smpl)
sample["image"] = np.transpose(
cv2.imread(self.image_path.format(**fmts))[..., ::-1].astype(np.float32),
axes=(2, 0, 1),
)
# reading all the cond images
if self.cond_cameras:
sample["cond_image"] = []
sample["cond_Rt"] = []
sample["cond_K"] = []
# for cond_camera_id in self.cond_cameras:
# FIXME: hack for random condition views
cond_camera_id = random.choice(list(self.cameras[people_id].keys()))
if True:
cond_image = np.transpose(
cv2.imread(
self.image_path.format(
people_id=people_id, frame=int(self.ref_frame), camera=cond_camera_id
)
)[..., ::-1].astype(np.float32),
axes=(2, 0, 1),
)
sample["cond_image"].append(cond_image)
sample["cond_Rt"].append(self.cameras[people_id][cond_camera_id]["Rt"])
sample["cond_K"].append(self.cameras[people_id][cond_camera_id]["K"])
for key in ["image", "K", "Rt"]:
sample[f"cond_{key}"] = np.stack(sample[f"cond_{key}"], axis=0)
sample["cond_cameras"] = self.cond_cameras[:]
sample["image"] = np.transpose(
cv2.imread(self.image_path.format(**fmts))[..., ::-1].astype(np.float32),
axes=(2, 0, 1),
)
image_mask = cv2.imread(self.image_mask_path.format(**fmts))
border = 3
kernel = np.ones((border, border), np.uint8)
msk_erode = cv2.erode(image_mask.copy(), kernel)[np.newaxis, ..., 0]
sample["image_mask"] = (msk_erode != 0).astype(np.float32)
image_part_mask = cv2.imread(self.image_part_mask_path.format(**fmts))
part_msk_erode = cv2.erode(image_part_mask.copy(), kernel)[np.newaxis, ..., 0]
sample["image_part_mask"] = part_msk_erode
sample["image_bg"] = sample["image"] * ~(sample["image_part_mask"] != 0)
sample.update(self.cameras[people_id][camera_id])
return sample
def gen_inf_cameras(self, num_views = 5):
training_views = self.cameras[self.subject_ids[0]]
self.training_views = training_views
num_training_views = len(training_views.keys())
interpolation_anchors = []
for view_index in range(num_training_views):
Rt = training_views['camera{:04d}'.format(view_index)]['Rt']
K = training_views['camera{:04d}'.format(view_index)]['K']
rot = Rt[:, :3]
trans = Rt[:, 3]
interpolation_anchors.append((rot, trans))
interpolated_poses = interpolate_poses(interpolation_anchors, num_views)
inf_camera = {}
for people_id in self.subject_ids:
current_camera = {}
for view_index in range(len(interpolated_poses)):
R, T = interpolated_poses[view_index]
current_camera['camera{:04d}'.format(view_index)] = {
"Rt": np.concatenate([R, T[..., None]], axis=1).astype(np.float32),
"K": K.astype(np.float32),
}
for c in current_camera.values():
c["cam_pos"] = -np.dot(c["Rt"][:3, :3].T, c["Rt"][:3, 3])
# c["Rt"][:, -1] *= 1000.0
inf_camera[people_id] = current_camera
self.inf_cameras = inf_camera
def inf_sample(self, people_id, camera_id, frame_id, cond_sample):
fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)
sample = {}
sample.update({**fmts})
sample.update(load_smpl_params(self.all_smpl[people_id], int(frame_id)))
sample.update(self.inf_cameras[people_id][camera_id])
for k, v in sample.items():
if isinstance(v, np.ndarray):
sample[k] = v[None, ...]
sample.update(cond_sample)
return sample
def cond_sample(self, people_id):
sample = {}
# reading all the cond images
if self.cond_cameras:
sample["cond_image"] = []
sample["cond_Rt"] = []
sample["cond_K"] = []
cond_camera_id = random.choice(list(self.cameras[people_id].keys()))
if True:
cond_image = np.transpose(
cv2.imread(
self.image_path.format(
people_id=people_id, frame=int(self.ref_frame), camera=cond_camera_id
)
)[..., ::-1].astype(np.float32),
axes=(2, 0, 1),
)
sample["cond_image"].append(cond_image)
sample["cond_Rt"].append(self.cameras[people_id][cond_camera_id]["Rt"])
sample["cond_K"].append(self.cameras[people_id][cond_camera_id]["K"])
for key in ["image", "K", "Rt"]:
sample[f"cond_{key}"] = np.stack(sample[f"cond_{key}"], axis=0)
sample["cond_cameras"] = self.cond_cameras[:]
for k, v in sample.items():
if isinstance(v, np.ndarray):
sample[k] = v[None, ...]
return sample
def inf_sample_wsmpl(self, people_id, camera_id, frame_id, cond_sample, smpl_param):
fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)
sample = {}
sample.update({**fmts})
sample.update(load_smpl_params(smpl_param, int(frame_id)))
sample.update(self.inf_cameras[people_id][camera_id])
for k, v in sample.items():
if isinstance(v, np.ndarray):
sample[k] = v[None, ...]
sample.update(cond_sample)
return sample
def sample_cam_smpl(self):
people_id = random.choice(self.subject_ids)
frame_id = random.choice(self.frame_list)
camera_id = random.choice(list(self.cameras[people_id].keys()))
fmts = dict(people_id=people_id, frame=int(frame_id), camera=camera_id)
sample = {}
sample.update({**fmts})
sample.update(load_smpl_params(self.all_smpl[people_id], int(frame_id)))
sample.update(self.cameras[people_id][camera_id])
for k, v in sample.items():
if isinstance(v, np.ndarray):
sample[k] = v[None, ...]
return sample
# Path: dva/io.py
def load_static_assets_crossid_smpl(config):
# with chumpy dependency!!!
data_struct = read_pickle(config.data.smpl_topology)
vt = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_vt.npy'))
ft = np.load(os.path.join(os.path.dirname(config.data.smpl_topology), 'basicModel_ft.npy'))
n_verts = data_struct["v_template"].shape[0]
topology = AttrDict(
dict(
vi=data_struct["f"].astype(np.int64),
vt=vt.astype(np.float32),
vti=ft.astype(np.int64),
n_verts=n_verts,
)
)
topology.v2uv = compute_v2uv(topology.n_verts, topology.vi, topology.vti)
nbs_idxs, nbs_weights = compute_neighbours(topology.n_verts, topology["vi"])
topology.nbs_idxs = nbs_idxs
topology.nbs_weights = nbs_weights
static_assets = AttrDict(
dict(
topology=topology,
lbs_template_verts=data_struct["v_template"],
smpl_path=config.smpl_dir,
)
)
if "ref_frame" in config:
current_smpl_path = config.data.smpl_poses.format(people_id='seq_000016-rp_alison_rigged_002')
smpl_param = dict(np.load(current_smpl_path, allow_pickle=True))['smpl'].item()
poses = np.zeros((smpl_param['body_pose'].shape[0], 72)).astype(np.float32)
poses[:, :3] = np.array(smpl_param['global_orient']).astype(np.float32)
poses[:, 3:] = np.array(smpl_param['body_pose']).astype(np.float32)
shapes = np.array(smpl_param['betas']).astype(np.float32)
shapes = np.repeat(shapes[:], poses.shape[0], axis=0)
Rh = smpl_param['global_orient'].astype(np.float32)
Th = smpl_param['transl'].astype(np.float32)
current_smpl = {
'shapes': shapes,
'Rh': Rh * 0, #FIXME: hack
'Th': Th,
'poses': poses,
}
static_assets["ref_frame"] = {k: v[config.ref_frame][None, ...] for k, v in current_smpl.items()}
return static_assets
# Path: dva/io.py
def load_from_config(config, **kwargs):
"""Instantiate an object given a config and arguments."""
assert "class_name" in config and "module_name" not in config
config = copy.deepcopy(config)
class_name = config.pop("class_name")
object_class = load_class(class_name)
return object_class(**config, **kwargs)
# Path: dva/utils.py
def to_device(values, device=None, non_blocking=True):
"""Transfer a set of values to the device.
Args:
values: a nested dict/list/tuple of tensors
device: argument to `to()` for the underlying vector
NOTE:
if the device is not specified, using `th.cuda()`
"""
if device is None:
device = th.device("cuda")
if isinstance(values, dict):
return {k: to_device(v, device=device) for k, v in values.items()}
elif isinstance(values, tuple):
return tuple(to_device(v, device=device) for v in values)
elif isinstance(values, list):
return [to_device(v, device=device) for v in values]
elif isinstance(values, th.Tensor):
return values.to(device, non_blocking=non_blocking)
elif isinstance(values, nn.Module):
return values.to(device)
elif isinstance(values, np.ndarray):
return th.from_numpy(values).to(device)
else:
return values
# Path: dva/geom.py
def make_postex(v, idxim, barim):
return (
barim[None, :, :, 0, None] * v[:, idxim[:, :, 0]]
+ barim[None, :, :, 1, None] * v[:, idxim[:, :, 1]]
+ barim[None, :, :, 2, None] * v[:, idxim[:, :, 2]]
).permute(0, 3, 1, 2)
# Path: dva/geom.py
def compute_tbn(geom, vt, vi, vti):
"""Computes tangent, bitangent, and normal vectors given a mesh.
Args:
geom: [N, n_verts, 3] th.Tensor
Vertex positions.
vt: [n_uv_coords, 2] th.Tensor
UV coordinates.
vi: [..., 3] th.Tensor
Face vertex indices.
vti: [..., 3] th.Tensor
Face UV indices.
Returns:
[..., 3] th.Tensors for T, B, N.
"""
v0 = geom[:, vi[..., 0]]
v1 = geom[:, vi[..., 1]]
v2 = geom[:, vi[..., 2]]
vt0 = vt[vti[..., 0]]
vt1 = vt[vti[..., 1]]
vt2 = vt[vti[..., 2]]
v01 = v1 - v0
v02 = v2 - v0
vt01 = vt1 - vt0
vt02 = vt2 - vt0
f = 1.0 / (
vt01[None, ..., 0] * vt02[None, ..., 1]
- vt01[None, ..., 1] * vt02[None, ..., 0]
)
tangent = f[..., None] * th.stack(
[
v01[..., 0] * vt02[None, ..., 1] - v02[..., 0] * vt01[None, ..., 1],
v01[..., 1] * vt02[None, ..., 1] - v02[..., 1] * vt01[None, ..., 1],
v01[..., 2] * vt02[None, ..., 1] - v02[..., 2] * vt01[None, ..., 1],
],
dim=-1,
)
tangent = F.normalize(tangent, dim=-1)
normal = F.normalize(th.cross(v01, v02, dim=3), dim=-1)
bitangent = F.normalize(th.cross(tangent, normal, dim=3), dim=-1)
return tangent, bitangent, normal
# Path: visualize.py
import os
import sys
import imageio
import torch as th
import numpy as np
import random
import logging
from omegaconf import OmegaConf
from dva.ray_marcher import RayMarcher, generate_colored_boxes
from primdiffusion.dataset.renderpeople_crossid_dataset import RenderPeopleSViewDataset
from dva.io import load_static_assets_crossid_smpl, load_from_config
from dva.utils import to_device
from dva.geom import make_postex, compute_tbn
device = th.device("cuda")
logger = logging.getLogger("visualize.py")
def render_mvp_boxes(rm, batch, preds):
| with th.no_grad(): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ml-stat-Sustech/TorchCP
# Path: torchcp/classification/predictors/classwise.py
class ClassWisePredictor(SplitPredictor):
"""
Applications of Class-Conditional Conformal Predictor in Multi-Class Classification (Shi et al., 2013)
paper: https://ieeexplore.ieee.org/document/6784618
:param score_function: non-conformity score function.
:param model: a pytorch model.
"""
def __init__(self, score_function, model=None):
super(ClassWisePredictor, self).__init__(score_function, model)
self.q_hat = None
def calculate_threshold(self, logits, labels, alpha):
if alpha >= 1 or alpha <= 0:
raise ValueError("Significance level 'alpha' must be in (0,1).")
logits = logits.to(self._device)
labels = labels.to(self._device)
# Count the number of classes
num_classes = logits.shape[1]
self.q_hat = torch.zeros(num_classes, device=self._device)
for label in range(num_classes):
x_cal_tmp = logits[labels == label]
y_cal_tmp = labels[labels == label]
scores = self.score_function(x_cal_tmp, y_cal_tmp)
self.q_hat[label] = self._calculate_conformal_value(scores, alpha)
# Path: torchcp/classification/predictors/cluster.py
class ClusterPredictor(SplitPredictor):
"""
Class-Conditional Conformal Prediction with Many Classes (Ding et al., 2023).
paper: https://arxiv.org/abs/2306.09335.
:param score_function: a non-conformity score function.
:param model: a pytorch model.
:param ratio_clustering: the ratio of examples in the calibration dataset used to cluster classes.
:param num_clusters: the number of clusters. If ratio_clustering is "auto", the number of clusters is automatically computed.
:param split: the method to split the dataset into clustering dataset and calibration set. Options are 'proportional' (sample proportional to distribution such that rarest class has n_clustering example), 'doubledip' (don't split and use all data for both steps, or 'random' (each example is assigned to clustering step with some fixed probability).
"""
def __init__(self, score_function, model=None, ratio_clustering="auto", num_clusters="auto", split='random',
temperature=1):
super(ClusterPredictor, self).__init__(score_function, model, temperature)
self.__ratio_clustering = ratio_clustering
self.__num_clusters = num_clusters
self.__split = split
def calculate_threshold(self, logits, labels, alpha):
if alpha >= 1 or alpha <= 0:
raise ValueError("Significance level 'alpha' must be in (0,1).")
logits = logits.to(self._device)
labels = labels.to(self._device)
num_classes = logits.shape[1]
scores = self.score_function(logits, labels)
alpha = torch.tensor(alpha, device=self._device)
classes_statistics = torch.tensor([torch.sum(labels == k).item() for k in range(num_classes)],
device=self._device)
# 1) Choose necessary parameters for Cluster algorithm
if self.__ratio_clustering == 'auto' and self.__num_clusters == 'auto':
n_min = torch.min(classes_statistics)
n_thresh = self.__get_quantile_minimum(alpha)
# Classes with fewer than n_thresh examples will be excluded from clustering
n_min = torch.maximum(n_min, n_thresh)
num_remaining_classes = torch.sum((classes_statistics >= n_min).float())
# Compute the number of clusters and the minium number of examples for each class
n_clustering = (n_min * num_remaining_classes / (75 + num_remaining_classes)).clone().to(
torch.int32).to(self._device)
self.__num_clusters = torch.floor(n_clustering / 2).to(torch.int32)
self.__ratio_clustering = n_clustering / n_min
# 2) Split data
clustering_scores, clustering_labels, cal_scores, cal_labels = self.__split_data(scores,
labels,
classes_statistics)
# 3) Filter "rare" classes
rare_classes = self.__get_rare_classes(clustering_labels, alpha, num_classes)
# 4) Run clustering
if (num_classes - len(rare_classes) > self.__num_clusters) and (self.__num_clusters > 1):
# Filter out rare classes and re-index
remaining_idx, filtered_labels, class_remapping = self.__remap_classes(clustering_labels, rare_classes)
filtered_scores = clustering_scores[remaining_idx]
# Compute embedding for each class and get class counts
embeddings, class_cts = self.__embed_all_classes(filtered_scores, filtered_labels)
kmeans = KMeans(n_clusters=int(self.__num_clusters), n_init=10).fit(X=embeddings.detach().cpu().numpy(),
sample_weight=np.sqrt(
class_cts.detach().cpu().numpy()))
nonrare_class_cluster_assignments = torch.tensor(kmeans.labels_, device=self._device)
cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)
for cls, remapped_cls in class_remapping.items():
cluster_assignments[cls] = nonrare_class_cluster_assignments[remapped_cls]
else:
cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)
# 5) Compute qhats for each cluster
self.q_hat = self.__compute_cluster_specific_qhats(cluster_assignments,
cal_scores,
cal_labels,
alpha)
def __split_data(self, scores, labels, classes_statistics):
if self.__split == 'proportional':
# Split dataset along with fraction "frac_clustering"
num_classes = classes_statistics.shape[0]
n_k = torch.tensor([self.__ratio_clustering * classes_statistics[k] for k in range(num_classes)],
device=self._device, dtype=torch.int32)
idx1 = torch.zeros(labels.shape, dtype=torch.bool, device=self._device)
for k in range(num_classes):
# Randomly select n instances of class k
idx = torch.argwhere(labels == k).flatten()
random_indices = torch.randint(0, classes_statistics[k], (n_k[k],), device=self._device)
selected_idx = idx[random_indices]
idx1[selected_idx] = 1
clustering_scores = scores[idx1]
clustering_labels = labels[idx1]
cal_scores = scores[~idx1]
cal_labels = labels[~idx1]
elif self.__split == 'doubledip':
clustering_scores, clustering_labels = scores, labels
cal_scores, cal_labels = scores, labels
elif self.__split == 'random':
# Each point is assigned to clustering set w.p. frac_clustering
idx1 = torch.rand(size=(len(labels),), device=self._device) < self.__ratio_clustering
clustering_scores = scores[idx1]
clustering_labels = labels[idx1]
cal_scores = scores[~idx1]
cal_labels = labels[~idx1]
else:
raise Exception("Invalid split method. Options are 'proportional', 'doubledip', and 'random'")
return clustering_scores, clustering_labels, cal_scores, cal_labels
def __get_quantile_minimum(self, alpha):
"""
Compute smallest n such that ceil((n+1)*(1-alpha)/n) <= 1
"""
n = torch.tensor(0, device=alpha.device)
while torch.ceil((n + 1) * (1 - alpha) / n) > 1:
n += 1
return n
def __get_rare_classes(self, labels, alpha, num_classes):
"""
Choose classes whose number is less than or equal to .
"""
thresh = self.__get_quantile_minimum(alpha)
classes, cts = torch.unique(labels, return_counts=True)
rare_classes = classes[cts < thresh].to(self._device)
# Also included any classes that are so rare that we have 0 labels for it
all_classes = torch.arange(num_classes, device=self._device)
zero_ct_classes = all_classes[(all_classes.view(1, -1) != classes.view(-1, 1)).all(dim=0)]
rare_classes = torch.concatenate((rare_classes, zero_ct_classes))
return rare_classes
def __remap_classes(self, labels, rare_classes):
"""
Exclude classes in rare_classes and remap remaining classes to be 0-indexed
:returns:
- remaining_idx: Boolean array the same length as labels. Entry i is True
if labels[i] is not in rare_classes
- remapped_labels : Array that only contains the entries of labels that are
not in rare_classes (in order)
- remapping : Dict mapping old class index to new class index
"""
labels = labels.detach().cpu().numpy()
rare_classes = rare_classes.detach().cpu().numpy()
remaining_idx = ~np.isin(labels, rare_classes)
remaining_labels = labels[remaining_idx]
remapped_labels = np.zeros(remaining_labels.shape, dtype=int)
new_idx = 0
remapping = {}
for i in range(len(remaining_labels)):
if remaining_labels[i] in remapping:
remapped_labels[i] = remapping[remaining_labels[i]]
else:
remapped_labels[i] = new_idx
remapping[remaining_labels[i]] = new_idx
new_idx += 1
return torch.from_numpy(remaining_idx).to(self._device), torch.tensor(remapped_labels,
device=self._device), remapping
def __embed_all_classes(self, scores_all, labels, q=[0.5, 0.6, 0.7, 0.8, 0.9]):
"""
:param scores_all: num_instances-length array where scores_all[i] = score of true class for instance i.
:param labels: num_instances-length array of true class labels.
:param q: quantiles to include in embedding.
:returns:
- embeddings: num_classes x len(q) array where ith row is the embeddings of class i.
- cts: num_classes-length array where cts[i] = # of times class i appears in labels .
"""
num_classes = len(torch.unique(labels))
embeddings = torch.zeros((num_classes, len(q)), device=self._device)
cts = torch.zeros((num_classes,), device=self._device)
for i in range(num_classes):
if len(scores_all.shape) > 1:
raise DimensionError(f"Expected 1-dimension, but got {len(scores_all.shape)}-dimension.")
class_i_scores = scores_all[labels == i]
cts[i] = class_i_scores.shape[0]
# Computes the q-quantiles of samples and returns the vector of quantiles
embeddings[i, :] = torch.quantile(class_i_scores, torch.tensor(q, device=self._device))
return embeddings, cts
def __compute_cluster_specific_qhats(self, cluster_assignments, cal_class_scores, cal_true_labels, alpha):
'''
Computes cluster-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)
:param cluster_assignments: num_classes length array where entry i is the index of the cluster that class i belongs to. Rare classes can be assigned to cluster -1 and they will automatically be given as default_qhat.
:param cal_class_scores: cal_class_scores[i] is the score for instance i.
:param cal_true_labels: true class labels for instances
:param alpha: Desired coverage level
:return : num_classes length array where entry i is the quantile correspond to the cluster that class i belongs to.
'''
# Map true class labels to clusters
cal_true_clusters = torch.tensor([cluster_assignments[label] for label in cal_true_labels], device=self._device)
num_clusters = torch.max(cluster_assignments) + 1
cluster_qhats = self.__compute_class_specific_qhats(cal_class_scores, cal_true_clusters, num_clusters, alpha)
# Map cluster qhats back to classes
num_classes = len(cluster_assignments)
qhats_class = torch.tensor([cluster_qhats[cluster_assignments[k]] for k in range(num_classes)],
device=self._device)
return qhats_class
def __compute_class_specific_qhats(self, cal_class_scores, cal_true_clusters, num_clusters, alpha):
'''
Computes class-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)
:param cal_class_scores: num_instances-length array where cal_class_scores[i] is the score for instance i
:param cal_true_clusters: num_instances-length array of true class labels. If class -1 appears, it will be assigned the null_qhat value. It is appended as an extra entry of the returned q_hats so that q_hats[-1] = null_qhat.
:param num_clusters: the number of clusters.
:param alpha: Desired coverage level.
:return: the threshold of each class
'''
# Compute quantile q_hat that will result in marginal coverage of (1-alpha)
null_qhat = self._calculate_conformal_value(cal_class_scores, alpha)
q_hats = torch.zeros((num_clusters,), device=self._device) # q_hats[i] = quantile for class i
for k in range(num_clusters):
# Only select data for which k is true class
idx = (cal_true_clusters == k)
scores = cal_class_scores[idx]
q_hats[k] = self._calculate_conformal_value(scores, alpha)
if -1 in cal_true_clusters:
q_hats = torch.concatenate((q_hats, torch.tensor([null_qhat], device=self._device)))
return q_hats
# Path: torchcp/classification/predictors/split.py
class SplitPredictor(BasePredictor):
"""
Split Conformal Prediction (Vovk et a., 2005).
Book: https://link.springer.com/book/10.1007/978-3-031-06649-8.
:param score_function: non-conformity score function.
:param model: a pytorch model.
:param temperature: the temperature of Temperature Scaling.
"""
def __init__(self, score_function, model=None, temperature=1):
super().__init__(score_function, model, temperature)
#############################
# The calibration process
############################
def calibrate(self, cal_dataloader, alpha):
self._model.eval()
logits_list = []
labels_list = []
with torch.no_grad():
for examples in cal_dataloader:
tmp_x, tmp_labels = examples[0].to(self._device), examples[1].to(self._device)
tmp_logits = self._logits_transformation(self._model(tmp_x)).detach()
logits_list.append(tmp_logits)
labels_list.append(tmp_labels)
logits = torch.cat(logits_list).float()
labels = torch.cat(labels_list)
self.calculate_threshold(logits, labels, alpha)
def calculate_threshold(self, logits, labels, alpha):
if alpha >= 1 or alpha <= 0:
raise ValueError("Significance level 'alpha' must be in (0,1).")
logits = logits.to(self._device)
labels = labels.to(self._device)
scores = self.score_function(logits, labels)
self.q_hat = self._calculate_conformal_value(scores, alpha)
def _calculate_conformal_value(self, scores, alpha):
"""
Calculate the 1-alpha quantile of scores.
:param scores: non-conformity scores.
:param alpha: a significance level.
:return: the threshold which is use to construct prediction sets.
"""
if len(scores) == 0:
warnings.warn(
"The number of scores is 0, which is a invalid scores. To avoid program crash, the threshold is set as torch.inf.")
return torch.inf
qunatile_value = math.ceil(scores.shape[0] + 1) * (1 - alpha) / scores.shape[0]
if qunatile_value > 1:
warnings.warn(
"The value of quantile exceeds 1. It should be a value in (0,1). To avoid program crash, the threshold is set as torch.inf.")
return torch.inf
return torch.quantile(scores, qunatile_value).to(self._device)
#############################
# The prediction process
############################
def predict(self, x_batch):
"""
The input of score function is softmax probability.
:param x_batch: a batch of instances.
"""
self._model.eval()
if self._model != None:
x_batch = self._model(x_batch.to(self._device)).float()
x_batch = self._logits_transformation(x_batch).detach()
sets = self.predict_with_logits(x_batch)
return sets
def predict_with_logits(self, logits, q_hat=None):
"""
The input of score function is softmax probability.
if q_hat is not given by the function 'self.calibrate', the construction progress of prediction set is a naive method.
:param logits: model output before softmax.
:param q_hat: the conformal threshold.
:return: prediction sets
"""
scores = self.score_function(logits).to(self._device)
if q_hat is None:
S = self._generate_prediction_set(scores, self.q_hat)
else:
S = self._generate_prediction_set(scores, q_hat)
return S
#############################
# The evaluation process
############################
def evaluate(self, val_dataloader):
prediction_sets = []
labels_list = []
with torch.no_grad():
for examples in val_dataloader:
tmp_x, tmp_label = examples[0].to(self._device), examples[1].to(self._device)
prediction_sets_batch = self.predict(tmp_x)
prediction_sets.extend(prediction_sets_batch)
labels_list.append(tmp_label)
val_labels = torch.cat(labels_list)
res_dict = {"Coverage_rate": self._metric('coverage_rate')(prediction_sets, val_labels),
"Average_size": self._metric('average_size')(prediction_sets, val_labels)}
return res_dict
# Path: torchcp/classification/scores/aps.py
class APS(BaseScore):
"""
Adaptive Prediction Sets (Romano et al., 2020)
paper :https://proceedings.neurips.cc/paper/2020/file/244edd7e85dc81602b7615cd705545f5-Paper.pdf
"""
def __call__(self, logits, label=None):
assert len(logits.shape) <= 2, "The dimension of logits must be less than 2."
if len(logits.shape) == 1:
logits = logits.unsqueeze(0)
probs = torch.softmax(logits, dim=-1)
if label is None:
return self._calculate_all_label(probs)
else:
return self._calculate_single_label(probs, label)
def _calculate_all_label(self, probs):
indices, ordered, cumsum = self._sort_sum(probs)
U = torch.rand(probs.shape, device=probs.device)
ordered_scores = cumsum - ordered * U
_, sorted_indices = torch.sort(indices, descending=False, dim=-1)
scores = ordered_scores.gather(dim=-1, index=sorted_indices)
return scores
def _sort_sum(self, probs):
# ordered: the ordered probabilities in descending order
# indices: the rank of ordered probabilities in descending order
# cumsum: the accumulation of sorted probabilities
ordered, indices = torch.sort(probs, dim=-1, descending=True)
cumsum = torch.cumsum(ordered, dim=-1)
return indices, ordered, cumsum
def _calculate_single_label(self, probs, label):
indices, ordered, cumsum = self._sort_sum(probs)
U = torch.rand(indices.shape[0], device=probs.device)
idx = torch.where(indices == label.view(-1, 1))
scores_first_rank = U * cumsum[idx]
idx_minus_one = (idx[0], idx[1] - 1)
scores_usual = U * ordered[idx] + cumsum[idx_minus_one]
return torch.where(idx[1] == 0, scores_first_rank, scores_usual)
# Path: torchcp/classification/scores/raps.py
class RAPS(APS):
"""
Regularized Adaptive Prediction Sets (Angelopoulos et al., 2020)
paper : https://arxiv.org/abs/2009.14193
:param penalty: the weight of regularization. When penalty = 0, RAPS=APS.
:param kreg: the rank of regularization which is an integer in [0,labels_num].
"""
def __init__(self, penalty, kreg=0):
if penalty <= 0:
raise ValueError("The parameter 'penalty' must be a positive value.")
if kreg < 0:
raise ValueError("The parameter 'kreg' must be a nonnegative value.")
if type(kreg) != int:
raise TypeError("The parameter 'kreg' must be a integer.")
super(RAPS, self).__init__()
self.__penalty = penalty
self.__kreg = kreg
def _calculate_all_label(self, probs):
indices, ordered, cumsum = self._sort_sum(probs)
U = torch.rand(probs.shape, device=probs.device)
reg = torch.maximum(self.__penalty * (torch.arange(1, probs.shape[-1] + 1, device=probs.device) - self.__kreg),
torch.tensor(0, device=probs.device))
ordered_scores = cumsum - ordered * U + reg
_, sorted_indices = torch.sort(indices, descending=False, dim=-1)
scores = ordered_scores.gather(dim=-1, index=sorted_indices)
return scores
def _calculate_single_label(self, probs, label):
indices, ordered, cumsum = self._sort_sum(probs)
U = torch.rand(indices.shape[0], device=probs.device)
idx = torch.where(indices == label.view(-1, 1))
reg = torch.maximum(self.__penalty * (idx[1] + 1 - self.__kreg), torch.tensor(0).to(probs.device))
scores_first_rank = U * ordered[idx] + reg
idx_minus_one = (idx[0], idx[1] - 1)
scores_usual = U * ordered[idx] + cumsum[idx_minus_one] + reg
return torch.where(idx[1] == 0, scores_first_rank, scores_usual)
# Path: torchcp/classification/scores/saps.py
class SAPS(APS):
"""
Sorted Adaptive Prediction Sets (Huang et al., 2023)
paper: https://arxiv.org/abs/2310.06430
:param weight: the weight of label ranking.
"""
def __init__(self, weight):
super(SAPS, self).__init__()
if weight <= 0:
raise ValueError("The parameter 'weight' must be a positive value.")
self.__weight = weight
def _calculate_all_label(self, probs):
indices, ordered, cumsum = self._sort_sum(probs)
ordered[:, 1:] = self.__weight
cumsum = torch.cumsum(ordered, dim=-1)
U = torch.rand(probs.shape, device=probs.device)
ordered_scores = cumsum - ordered * U
_, sorted_indices = torch.sort(indices, descending=False, dim=-1)
scores = ordered_scores.gather(dim=-1, index=sorted_indices)
return scores
def _calculate_single_label(self, probs, label):
indices, ordered, cumsum = self._sort_sum(probs)
U = torch.rand(indices.shape[0], device=probs.device)
idx = torch.where(indices == label.view(-1, 1))
scores_first_rank = U * cumsum[idx]
scores_usual = self.__weight * (idx[1] - U) + ordered[:, 0]
return torch.where(idx[1] == 0, scores_first_rank, scores_usual)
# Path: torchcp/classification/scores/thr.py
class THR(BaseScore):
"""
Threshold conformal predictors (Sadinle et al., 2016).
paper : https://arxiv.org/abs/1609.00451.
:param score_type: a transformation on logits. Default: "softmax". Optional: "softmax", "Identity", "log_softmax" or "log".
"""
def __init__(self, score_type="softmax") -> None:
super().__init__()
self.score_type = score_type
if score_type == "Identity":
self.transform = lambda x: x
elif score_type == "softmax":
self.transform = lambda x: torch.softmax(x, dim=- 1)
elif score_type == "log_softmax":
self.transform = lambda x: torch.log_softmax(x, dim=-1)
elif score_type == "log":
self.transform = lambda x: torch.log(x)
else:
raise NotImplementedError
def __call__(self, logits, label=None):
assert len(logits.shape) <= 2, "The dimension of logits must be less than 2."
if len(logits.shape) == 1:
logits = logits.unsqueeze(0)
temp_values = self.transform(logits)
if label is None:
return self.__calculate_all_label(temp_values)
else:
return self.__calculate_single_label(temp_values, label)
def __calculate_single_label(self, temp_values, label):
return 1 - temp_values[torch.arange(label.shape[0], device=temp_values.device), label]
def __calculate_all_label(self, temp_values):
return 1 - temp_values
# Path: torchcp/classification/utils/metrics.py
class Metrics:
def __call__(self, metric) -> Any:
if metric not in METRICS_REGISTRY_CLASSIFICATION.registered_names():
raise NameError(f"The metric: {metric} is not defined in TorchCP.")
return METRICS_REGISTRY_CLASSIFICATION.get(metric)
# Path: torchcp/utils/common.py
def fix_randomness(seed=0):
"""
Fix the random seed for python, torch, numpy.
:param seed: the random seed
"""
np.random.seed(seed=seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
# Path: examples/common/dataset.py
def build_dataset(dataset_name, transform=None, mode='train'):
# path of usr
usr_dir = os.path.expanduser('~')
data_dir = os.path.join(usr_dir, "data")
if dataset_name == 'imagenet':
if transform is None:
transform = trn.Compose([
trn.Resize(256),
trn.CenterCrop(224),
trn.ToTensor(),
trn.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
dataset = dset.ImageFolder(data_dir + "/imagenet/val", transform)
elif dataset_name == 'mnist':
if transform is None:
transform = trn.Compose([
trn.ToTensor(),
trn.Normalize((0.1307,), (0.3081,))
])
if mode == "train":
dataset = dset.MNIST(data_dir, train=True, download=True, transform=transform)
elif mode == "test":
dataset = dset.MNIST(data_dir, train=False, download=True, transform=transform)
else:
raise NotImplementedError
return dataset
# Path: examples/imagenet_example.py
import argparse
import os
import torch
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as trn
from tqdm import tqdm
from torchcp.classification.predictors import ClusterPredictor, ClassWisePredictor, SplitPredictor
from torchcp.classification.scores import THR, APS, SAPS, RAPS
from torchcp.classification import Metrics
from torchcp.utils import fix_randomness
from examples.common.dataset import build_dataset
# Copyright (c) 2023-present, SUSTech-ML.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--alpha', default=0.1, type=float)
args = parser.parse_args()
fix_randomness(seed=args.seed)
#######################################
# Loading ImageNet dataset and a pytorch model
#######################################
model_name = 'ResNet101'
model = torchvision.models.resnet101(weights="IMAGENET1K_V1", progress=True)
model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
| model.to(model_device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: vintagedave/Fontimize
# Path: fontimize.py
def get_used_characters_in_html(html : str) -> set[chr]:
soup = BeautifulSoup(html, 'html.parser')
text = soup.get_text()
return get_used_characters_in_str(text)
# Path: fontimize.py
class charPair:
def __init__(self, first : chr, second : chr):
self.first = first
self.second = second
def __str__(self):
return "[" + self.first + "-" + self.second + "]" # Pairs are inclusive
# For print()-ing
def __repr__(self):
return self.__str__()
def __eq__(self, other):
if isinstance(other, charPair):
return self.first == other.first and self.second == other.second
return False
def get_range(self):
if self.first == self.second:
return _get_unicode_string(self.first)
else:
return _get_unicode_string(self.first) + '-' + _get_unicode_string(self.second, False) # Eg "U+0061-0071"
# Path: fontimize.py
def _get_char_ranges(chars : list[chr]):
chars.sort()
if not chars:
return []
res : list[charPair] = []
first : chr = chars[0]
prev_seen : chr = first
for c in chars[1:]:
expected_next_char = chr(ord(prev_seen) + 1)
if c != expected_next_char:
# non-sequential, so time to start a new set
pair = charPair(first, prev_seen)
res.append(pair)
first = c
prev_seen = c
# add final set if it hasn't been added yet
if (not res) or (res[-1].second != prev_seen):
pair = charPair(first, prev_seen)
res.append(pair)
return res
# Path: fontimize.py
def optimise_fonts(text : str, fonts : list[str], fontpath : str = "", subsetname = "FontimizeSubset", verbose : bool = False, print_stats : bool = True) -> dict[str, typing.Any]:
verbosity = 2 if verbose else 0 # ttf2web has 0, 1, 2, so match that to off and on
res : dict[str, typing.Any] = {}
res["css"] = {} # at this level there are no CSS files, include just to prevent errors for API consumer
characters = get_used_characters_in_str(text)
char_list = list(characters)
if verbosity >= 2:
print("Characters:")
print(" " + str(char_list))
res["chars"] = characters # set of characters used in the input text
char_ranges = _get_char_ranges(char_list)
if verbosity >= 2:
print("Character ranges:")
print(" " + str(char_ranges))
uranges_str = ', '.join(r.get_range() for r in char_ranges)
uranges = [[subsetname, uranges_str]] # subsetname here will be in the generated font, eg 'Arial.FontimizeSubset.woff2'
if verbosity >= 2:
print("Unicode ranges:")
print(" " + uranges_str)
res["uranges"] = uranges_str # list of unicode ranges matching the characters used in the input text
# For each font, generate a new font file using only the used characters
# By default, place it in the same folder as the respective font, unless fontpath is specified
res["fonts"] = {} # dict of old font path -> new font path
for font in fonts:
assetdir = fontpath if fontpath else path.dirname(font)
t2w = TTF2Web(font, uranges, assetdir=assetdir)
woff2_list = t2w.generateWoff2(verbosity=verbosity)
# print(woff2_list)
assert len(woff2_list) == 1 # We only expect one font file to be generated, per font input
assert len(woff2_list[0]) == 2 # Pair of font, plus ranges -- we only care about [0], the font
res["fonts"][font] = woff2_list[0][0]
if verbosity >= 2:
print("Generated the following fonts from the originals:")
for k in res["fonts"].keys():
print(" " + k + " ->\n " + res["fonts"][k])
if (verbosity >= 2) or print_stats:
print("Results:")
print(" Fonts processed: " + str(len(res["fonts"])))
if (verbosity == 1): # If 2, printed above already
print(" Generated (use verbose output for input -> generated map):")
for k in res["fonts"].keys():
print(" " + res["fonts"][k])
sum_orig = _get_file_size_sum(list(res["fonts"].keys()))
sum_new = _get_file_size_sum(list(res["fonts"].values()))
print(" Total original font size: " + _file_size_to_readable(sum_orig))
print(" Total optimised font size: " + _file_size_to_readable(sum_new))
savings = sum_orig - sum_new;
savings_percent = savings / sum_orig * 100
print(" Savings: " + _file_size_to_readable(savings) + " less, which is " + str(round(savings_percent, 1)) + "%!")
print("Thankyou for using Fontimize!") # A play on Font and Optimise, haha, so good pun clever. But seriously - hopefully a memorable name!
return res
# Path: fontimize.py
def optimise_fonts_for_files(files : list[str], font_output_dir = "", subsetname = "FontimizeSubset", verbose : bool = False, print_stats : bool = True, fonts : list[str] = [], addtl_text : str = "") -> dict[str, typing.Any]:
if (len(files) == 0) and len(addtl_text) == 0: # If you specify any text, input files are optional -- note, not documented, used for cmd line app
print("Error: No input files. Exiting.")
res = {
"css" : [],
"fonts" : [],
"chars": set(),
"uranges": []
}
text = addtl_text
css_files : set[str] = set()
font_files : set[str] = set()
for f in fonts: # user-specified input font files
font_files.add(f)
for f in files:
file_ext = pathlib.Path(f).suffix.lower()
with open(f, 'r') as file:
if file_ext == '.html' or file_ext == '.htm':
html = file.read()
soup = BeautifulSoup(html, 'html.parser')
# Extract used text
text += soup.get_text()
# Extract CSS files the HTML references
for link in soup.find_all('link', href=True):
if 'css' in link['href']:
css_ref = link['href']
adjusted_css_path = _get_path(f, css_ref) # It'll be relative, so relative to the HTML file
css_files.add(adjusted_css_path)
else: # not HTML, treat as text
text += file.read()
# Sanity check that there is any text to process
if len(text) == 0:
print("Error: No text found in the input files or additional text. Exiting.")
res = {
"css" : [],
"fonts" : [],
"chars": set(),
"uranges": []
}
return res
# Extract fonts from CSS files
for css_file in css_files:
with open(css_file, 'r') as file:
css = file.read()
# Extract the contents of all :before and :after CSS pseudo-elements; add these to the text
pseudo_elements = _extract_pseudo_elements_content(css)
for pe in pseudo_elements:
text += pe
# List of all fonts from @font-face src url: statements. This assumes they're all local files
font_urls = _find_font_face_urls(css)
for font_url in font_urls:
# Only handle local files -- this does not support remote files
adjusted_font_path = _get_path(adjusted_css_path, font_url) # Relative to the CSS file
if path.isfile(adjusted_font_path):
font_files.add(adjusted_font_path)
else:
# if verbose:
print("Warning: Font file not found (may be remote not local?); skipping: " + font_url + " (resolved to " + adjusted_font_path + ")")
if verbose:
print("Found the following CSS files:")
for css_file in css_files:
print(" " + css_file)
print("Found the following fonts:")
for font_file in font_files:
print(" " + font_file)
# print("Found the following text:")
# print(text)
if len(font_files) == 0:
print("Error: No fonts found in the input files. Exiting.")
res = {
"css" : css_files,
"fonts" : [],
"chars": set(),
"uranges": []
}
return res
res = optimise_fonts(text, font_files, fontpath=font_output_dir, subsetname=subsetname, verbose=verbose, print_stats=print_stats)
res["css"] = css_files
return res;
# Path: tests.py
import os
import unittest
import sys
from unittest.mock import patch
from fontimize import get_used_characters_in_html, charPair, _get_char_ranges, optimise_fonts, optimise_fonts_for_files
from fontTools.ttLib import woff2, TTFont
def test_empty(self):
self.assertEqual(_get_char_ranges([]), [])
def test_single_char(self):
self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')])
def test_two_sequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')])
def test_two_nonsequential_chars(self):
self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')])
def test_multiple_ranges(self):
self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')])
# Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string
def _count_glyphs_in_font(fontpath):
# with open(fontpath, 'rb') as f:
# wfr = woff2.WOFF2Reader(f)
# cmap = font['cmap']
# return len(cmap.getBestCmap())
# font.flavor = None # Decompress the font data
font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr)
font.flavor = None # Decompress the font data
num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird
return num_glyphs
# Does a named glyph exist in the font?
def _font_contains(fontpath, charname : str) -> bool:
font = TTFont(fontpath)
font.flavor = None # Decompress the font data
return charname in font.getGlyphOrder()
class TestOptimiseFonts(unittest.TestCase):
# Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase
test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz"
def test_optimise_fonts_with_single_font(self):
result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False)
# Basics
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
# Generated with the right name
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number
# For +1, see test_optimise_fonts_with_empty_text
self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font(foundfonts['tests/Spirax-Regular.ttf']))
def test_optimise_fonts_with_multiple_fonts(self):
result = optimise_fonts(self.test_string,
['tests/Spirax-Regular.ttf', 'tests/EBGaramond-VariableFont_wght.ttf', 'tests/EBGaramond-Italic-VariableFont_wght.ttf'],
fontpath='tests/output', verbose=False, print_stats=False)
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
self.assertIn('tests/EBGaramond-VariableFont_wght.ttf', foundfonts)
self.assertEqual(foundfonts['tests/EBGaramond-VariableFont_wght.ttf'], 'tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2')
self.assertIn('tests/EBGaramond-Italic-VariableFont_wght.ttf', foundfonts)
self.assertEqual(foundfonts['tests/EBGaramond-Italic-VariableFont_wght.ttf'], 'tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number
# + 1 for the tests below -- see test_optimise_fonts_with_empty_text
self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2'))
# + 16, + 12: EB Garamond contains multiple f-ligatures (eg fi), plus other variants, so the number of glyphs is higher. Italic has fewer.
self.assertEqual(len(self.test_string) + 1 + 16, _count_glyphs_in_font('tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2'))
self.assertEqual(len(self.test_string) + 1 + 12, _count_glyphs_in_font('tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2'))
def test_optimise_fonts_with_empty_text(self):
result = optimise_fonts("",
['tests/Spirax-Regular.ttf'],
fontpath='tests/output',
verbose=False, print_stats=False)
self.assertIsInstance(result, dict)
foundfonts = result["fonts"]
self.assertIn('tests/Spirax-Regular.ttf', foundfonts)
self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2')
# If the number of glyphs in the font matches the expected number: two, because an empty string is reported as containing space, see get_used_characters_in_str
# and fonts also seem to contain ".notdef":
# > font.getGlyphOrder()
# > ['.notdef', 'space']
self.assertEqual(2, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2'))
class TestOptimiseFontsForFiles(unittest.TestCase):
def setUp(self):
self.files = ['tests/test1-index-css.html', 'tests/test.txt', 'tests/test2.html']
self.font_output_dir = 'tests/output'
self.subsetname = 'TestFilesSubset'
self.verbose = False
self.print_stats = False
# Not used by any HTML/CSS, mimics manually adding a font
self.fonts = ['tests/Whisper-Regular.ttf', 'tests/NotoSans-VariableFont_wdth,wght.ttf', 'tests/NotoSansJP-VariableFont_wght.ttf']
@patch.object(sys, 'stdout') # provides mock_stdout in order to hide and verify console output
def test_optimise_fonts_for_files(self, mock_stdout):
result = optimise_fonts_for_files(files=self.files, font_output_dir=self.font_output_dir, subsetname=self.subsetname, fonts=self.fonts,
verbose=False, print_stats=False)
# css_test.css has:
# src: url('DOESNOTEXIST.ttf') format('truetype');
# This will emit a warning, check it was written to standard output
mock_stdout.write.assert_any_call('Warning: Font file not found (may be remote not local?); skipping: DOESNOTEXIST.ttf (resolved to tests/DOESNOTEXIST.ttf)')
self.assertIsInstance(result, dict)
self.assertIn('css', result)
self.assertIn('fonts', result)
css = result['css']
self.assertIn('tests/css_test.css', css)
self.assertIn('tests/css_test-index.css', css)
self.assertEqual(len(css), 2)
fonts = result['fonts']
font_keys = fonts.keys()
self.assertEqual(len(fonts), 7)
# These five found in CSS, via HTML input
self.assertIn('tests/EBGaramond-VariableFont_wght.ttf', font_keys)
| self.assertIn('tests/Spirax-Regular.ttf', font_keys) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wanghao-cst/Omni-VideoAssistant
# Path: llava/model/multimodal_encoder/builder.py
def build_vision_tower(vision_tower_cfg, **kwargs):
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
is_absolute_path_exists = os.path.exists(vision_tower)
if is_absolute_path_exists or vision_tower.startswith("openai") or vision_tower.startswith("laion"):
return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
raise ValueError(f'Unknown vision tower: {vision_tower}')
# Path: llava/model/multimodal_projector/builder.py
def build_vision_projector(config, delay_load=False, **kwargs):
projector_type = getattr(config, 'mm_projector_type', 'linear')
if projector_type == 'linear':
return nn.Linear(config.mm_hidden_size, config.hidden_size)
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
if mlp_gelu_match:
mlp_depth = int(mlp_gelu_match.group(1))
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
for _ in range(1, mlp_depth):
modules.append(nn.GELU())
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
# import pdb;pdb.set_trace()
return nn.Sequential(*modules)
if projector_type == 'identity':
return IdentityMap()
raise ValueError(f'Unknown projector type: {projector_type}')
# Path: llava/constants.py
IGNORE_INDEX = -100
# Path: llava/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: llava/constants.py
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
# Path: llava/constants.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: llava/constants.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: llava/model/omni_arch.py
from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn
cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096])
image_token_start = image_token_indices[0] # tensor(35, device='cuda:0')
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_frames_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else: # True
# import pdb;pdb.set_trace()
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) # instru部分的embed: torch.Size([35, 4096])
cur_new_input_embeds.append(cur_frames_features) # torch.Size([1024, 4096]) input加入frames特征
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start]) # torch.Size([35]) 全-100
cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) # torch.Size([1024])
cur_labels = cur_labels[image_token_start+1:] # 339 = 375-35-1(img_token) 稍后加到cur_new_labels中
cur_video_idx += 1
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:] # torch.Size([339])
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # 空
if cur_input_ids.numel() > 0: # True
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) # [torch.Size([35, 4096])固定template,torch.Size([1024, 4096])图像特征, QA:torch.Size([339, 4096])]
if labels is not None:
cur_new_labels.append(cur_labels) # [torch.Size([35]),torch.Size([1024]), 前面全为-100 torch.Size([339])]
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) # torch.Size([1398, 4096]): 35+1024+339
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0) # torch.Size([1398])
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): # True
max_len = max(x.shape[0] for x in new_input_embeds) # 1910
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else: # False img模式默认只有256长度相等
# import pdb;pdb.set_trace()
new_input_embeds = torch.stack(new_input_embeds, dim=0) # torch.Size([4, 716, 4096]) 716=461-1imgtoken+256imgfeature
if labels is not None: # torch.Size([4, 461])
new_labels = torch.stack(new_labels, dim=0) # torch.Size([4, 716])
if attention_mask is not None: # torch.Size([4, 461])
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) # torch.Size([4, 255]个True 相当于256个img特征-1个imgtoken
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) # torch.Size([4, 716]) 716=461+255(新加入的img特征255个token mask为True)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token: # False
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if model_args.mm_use_im_start_end: # False
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = self.get_input_embeddings().weight.data
output_embeddings = self.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
if model_args.tune_mm_mlp_adapter:
for p in self.get_input_embeddings().parameters():
p.requires_grad = True
for p in self.get_output_embeddings().parameters():
p.requires_grad = False
if model_args.pretrain_mm_mlp_adapter:
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
assert num_new_tokens == 2
if input_embeddings.shape == embed_tokens_weight.shape:
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
elif embed_tokens_weight.shape[0] == num_new_tokens:
input_embeddings[-num_new_tokens:] = embed_tokens_weight
else:
raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
elif model_args.mm_use_im_patch_token: # False
| if model_args.tune_mm_mlp_adapter: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RobertCsordas/moe_attention
# Path: tasks/simple/language_model/wikitext103_sp_transformer.py
class Wikitext103SPTransformer(Enwik8Transformer):
helper: framework.helpers.TrainingHelper
def create_datasets(self):
self.batch_dim = 1
self.train_set = dataset.Wikitext103SentencePiece("train", self.helper.args.lm.unroll, n_pieces=self.helper.args.sentencepiece.n_pieces)
self.valid_sets.val = dataset.Wikitext103SentencePiece("valid", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll, n_pieces=self.helper.args.sentencepiece.n_pieces)
self.valid_sets.test = dataset.Wikitext103SentencePiece("test", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll, n_pieces=self.helper.args.sentencepiece.n_pieces)
# Path: tasks/simple/language_model/enwik8_transformer.py
class Enwik8Transformer(TransformerLMMixin, SimpleTask):
VALID_NUM_WORKERS = 1
TRAIN_NUM_WORKERS = 2
def create_state(self):
self.helper.state.epoch = 0
def create_model_interface(self):
self.model_interface = LanguageModelInterface(
self.model, drop_state_prob=self.helper.args.lm.state_drop_probability, dist_env=self.helper.dist_env,
n_ubatches=self.helper.args.n_microbatch)
self.helper.saver["interface"] = self.model_interface
def validate_on(self, set: torch.utils.data.Dataset, loader: torch.utils.data.DataLoader) -> Tuple[Any, float]:
state = self.model_interface.state
self.model_interface.reset_state()
res = super().validate_on(set, loader)
self.model_interface.state = state
return res
def log_epoch(self):
self.helper.log({"epoch": self.helper.state.epoch})
def start_next_epoch(self):
self.model_interface.reset_state()
self.helper.state.epoch += 1
self.log_epoch()
def get_train_batch(self) -> Dict[str, Any]:
try:
return next(self.data_iter)
except StopIteration:
self.start_next_epoch()
self.data_iter = iter(self.train_loader)
return next(self.data_iter)
def create_sampler(self, loader: torch.utils.data.Dataset, batch_size: int) -> \
framework.loader.sampler.MultibatchSequentialSampler:
return framework.loader.sampler.MultibatchSequentialSampler(loader, batch_size,
world_size=self.helper.dist_env.world_size, rank=self.helper.dist_env.rank)
def create_valid_loader(self, vset: torch.utils.data.Dataset) -> torch.utils.data.DataLoader:
return torch.utils.data.DataLoader(vset,
batch_sampler=self.create_sampler(vset, self.test_batch_size),
collate_fn=framework.loader.collate.VarLengthCollate(batch_dim=self.batch_dim),
num_workers=self.VALID_NUM_WORKERS)
def create_train_loader(self, loader: torch.utils.data.Dataset) -> torch.utils.data.DataLoader:
sampler = self.create_sampler(loader, self.helper.args.batch_size)
self.helper.saver.register("sampler", sampler, replace=True)
return torch.utils.data.DataLoader(loader, batch_sampler=sampler, num_workers=self.TRAIN_NUM_WORKERS,
pin_memory=True, collate_fn=framework.loader.collate.VarLengthCollate(
batch_dim=self.batch_dim))
def create_datasets(self):
self.batch_dim = 1
self.train_set = dataset.Enwik8("train", self.helper.args.lm.unroll)
self.valid_sets.val = dataset.Enwik8("valid", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll)
self.valid_sets.test = dataset.Enwik8("test", self.helper.args.lm.unroll_eval or self.helper.args.lm.unroll)
def train(self):
self.log_epoch()
super().train()
# Path: tasks/task_db.py
def task(name: Optional[str] = None):
def wrapper(cls):
n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))
assert n not in TASKS, f"Task {n} already exists"
TASKS[n] = cls
return cls
return wrapper
# Path: tasks/task_db.py
def args(fn):
global ARGS_REGISTERS
ARGS_REGISTERS.append(fn)
return fn
# Path: framework/helpers/training_helper.py
class TrainingHelper:
args: DotDict
find_slash = re.compile(r'/+')
remove_firstlast_slash = re.compile(r'^/|/$')
class Dirs:
pass
def __init__(self, register_args: Optional[Callable[[ArgumentParser], None]],
wandb_project_name: Optional[str] = None,
log_async: bool = False, extra_dirs: List[str] = [], restore: Optional[str] = None):
self.dist_env = get_dist_env()
self.dist_env.init_env()
self.is_sweep = False
self.log_async = log_async
self.wandb_project_name = wandb_project_name
self.all_dirs = ["checkpoint", "tensorboard"] + extra_dirs
self.create_parser()
self.last_saved = -1
if register_args is not None:
register_args(self.arg_parser)
self.start(restore)
def print_env_info(self):
try:
import pkg_resources
print("---------------- Environment information: ----------------")
installed_packages = pkg_resources.working_set
print(list(sorted(["%s==%s" % (i.key, i.version) for i in installed_packages])))
print("----------------------------------------------------------")
except: # noqa: E722
pass
try:
git = subprocess.run(["git", "rev-parse", "--verify", "HEAD"], stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE)
if git.returncode == 0:
print(f"Git hash: {git.stdout.decode().strip()}")
except: # noqa: E722
pass
def create_parser(self):
self.arg_parser = ArgumentParser(get_train_dir=lambda x: os.path.join("save", x.name) if x.name is not None
else None)
self.arg_parser.add_argument("-name", type=str, help="Train dir name")
self.arg_parser.add_argument("-reset", default=False, help="reset training - ignore saves", save=False)
self.arg_parser.add_argument("-log", default="tb")
self.arg_parser.add_argument("-save_interval", default="5000", parser=self.arg_parser.int_or_none_parser)
self.arg_parser.add_argument("-wandb_save_interval", default="None", parser=self.arg_parser.int_or_none_parser)
self.arg_parser.add_argument("-seed", default="none", parser=self.arg_parser.int_or_none_parser)
self.arg_parser.add_argument("-gpu", default="auto", help="use this gpu")
self.arg_parser.add_argument("-keep_alive", default=False)
self.arg_parser.add_argument("-sweep_id_for_grid_search", default=0,
help="Doesn't do anything, just to run multiple W&B iterations.")
self.arg_parser.add_argument("-restore", default="")
self.arg_parser.add_argument("-wandb_bug_workaround", default=False)
@master
def create_dirs(self):
self.dirs = self.Dirs()
self.dirs.base = self.summary.save_dir
for d in self.all_dirs:
assert d not in self.dirs.__dict__, f"Directory {d} already exists"
self.dirs.__dict__[d] = os.path.join(self.dirs.base, d)
if self.args.reset:
print("Resetting training state...")
for d in self.all_dirs:
shutil.rmtree(self.dirs.__dict__[d], ignore_errors=True)
for d in self.all_dirs:
os.makedirs(self.dirs.__dict__[d], exist_ok=True)
@master
def save_startup_log(self):
self.arg_parser.save(os.path.join(self.summary.save_dir, "args.json"))
with open(os.path.join(self.summary.save_dir, "startup_log.txt"), "a+") as f:
f.write(f"{str(datetime.now())} {socket.gethostname()}: {' '.join(sys.argv)}\n")
@master
def start_tensorboard(self):
if self.use_tensorboard:
os.makedirs(self.dirs.tensorboard, exist_ok=True)
framework.visualize.tensorboard.start(log_dir=self.dirs.tensorboard)
def use_cuda(self) -> bool:
return torch.cuda.is_available() and self.args.gpu.lower() != "none"
def setup_environment(self):
use_gpu(self.args.gpu)
if self.args.seed is not None:
assert not self.dist_env.is_distributed
seed.fix(self.args.seed)
self.device = torch.device(f"cuda:{torch.cuda.current_device()}") if self.use_cuda() else torch.device("cpu")
def get_batch_size(self, full_batch_size: Optional[int] = None) -> int:
batch_size = full_batch_size or self.args.batch_size
if self.dist_env.is_distributed:
bs = batch_size // self.dist_env.world_size
if self.dist_env.rank == 1:
bs = bs + batch_size % self.dist_env.world_size
return bs
else:
return batch_size
def get_loss_scaling(self) -> float:
# Scale that accounts for uneven world sizes. For mean reduction
return self.get_batch_size() / self.args.batch_size
def start(self, restore: Optional[str]):
self.args = self.arg_parser.parse_and_try_load()
self.restore_pending = None
self.wandb_bug_found = False
if self.dist_env.is_master():
if restore or self.args.restore:
# Restore args first such that the rest of the config is loaded correctly. Do not restore the GPU settings.
gpu_backup = self.args.gpu
reset_backup = self.args.reset
self.restore_pending = Saver.do_load(restore or self.args.restore)
self.args = self.arg_parser.from_dict(self.restore_pending["run_invariants"]["args"])
self.args.gpu = gpu_backup
self.args.reset = reset_backup
if self.dist_env.is_distributed:
torch.distributed.broadcast_object_list([self.arg_parser.to_dict()], src=0)
else:
a = [None]
torch.distributed.broadcast_object_list(a, src=0)
self.args = self.arg_parser.from_dict(a[0])
self.use_tensorboard, self.use_wandb = get_plot_config(self.args)
self.state = DotDict()
self.state.iter = 0
self.run_invariants = {
"args": self.arg_parser.to_dict()
}
if self.dist_env.is_master():
constructor = plot.AsyncLogger if self.log_async else plot.Logger
assert (not self.use_wandb) or (self.wandb_project_name is not None), \
'Must specify wandb project name if logging to wandb.'
assert self.args.name is not None or self.use_wandb, "Either name must be specified or W&B should be used"
if self.args.restore and self.restore_pending["run_invariants"]["wandb_id"]:
wandb_args = {
"project": self.restore_pending["run_invariants"]["wandb_id"]["project"],
"id": self.restore_pending["run_invariants"]["wandb_id"]["run_id"],
"resume": "must"
}
else:
wandb_args = {
"project": self.wandb_project_name,
"config": self.arg_parser.to_dict()
}
self.summary = constructor(save_dir=os.path.join("save", self.args.name) if self.args.name is not None else None,
use_tb=self.use_tensorboard,
use_wandb=self.use_wandb,
wandb_init_args=wandb_args,
wandb_extra_config={
"experiment_name": self.args.name,
"n_nodes": self.dist_env.world_size or 1,
},
get_global_step = lambda: self.state.iter)
self.run_invariants["wandb_id"] = self.summary.wandb_id
if self.summary.wandb_id:
self.wandb_project_name = self.summary.wandb_id["project"]
if self.use_wandb:
self.print_env_info()
print(self.dist_env)
self.create_dirs()
self.save_startup_log()
self.start_tensorboard()
self.saver = Saver(self.dirs.checkpoint if self.dist_env.is_master() else None, self.args.save_interval,
keep_every_n_hours=None if self.use_wandb else 4)
self.saver["state"] = self.state
self.saver["run_invariants"] = deepcopy(self.run_invariants)
self.setup_environment()
@master
def wait_for_termination(self):
if self.args.keep_alive and self.use_tensorboard and not self.use_wandb:
print("Done. Waiting for termination")
while True:
time.sleep(100)
@master
def save(self):
if not self.dist_env.is_master():
return
self.saver.save(iter=self.state.iter)
self.saver.cleanup()
@master
def tick(self):
self.saver.tick(iter=self.state.iter)
@master
def finish(self):
self.summary.finish()
if self.is_sweep or self.saver.last_saved_iter != self.state.iter:
self.save()
self.wait_for_termination()
def to_device(self, data: Any) -> Any:
return U.apply_to_tensors(data, lambda d: d.to(self.device))
def restore(self):
if self.dist_env.is_master():
if self.restore_pending is not None:
assert self.saver.load_data(self.restore_pending), "Restoring failed."
self.restore_pending = None
restored = True
else:
restored = self.saver.load()
if restored:
# Do not restore these things
self.saver.register("run_invariants", deepcopy(self.run_invariants), replace=True)
# if ditributed, send the full state to all workers
if self.dist_env.is_distributed:
# PyTorch bug: there is an int32 conversion in the distributed code that overflows if the data is
# > 2G. So send it in pieces.
for k, v in self.saver.get_data().items():
torch.distributed.broadcast_object_list([k, v], src=0)
else:
# if ditributed and worker, restore state form master
ckpt = {}
# Stich the pieces together
for _ in range(len(self.saver.get_data())):
a = [None, None]
torch.distributed.broadcast_object_list(a, src=0)
ckpt[a[0]] = a[1]
ckpt = self.to_device(ckpt)
self.saver.load_data(ckpt)
def get_storage_path(self, path: str) -> str:
assert self.dist_env.is_master()
path = os.path.join(self.dirs.export, path)
os.makedirs(os.path.dirname(path), exist_ok=True)
return path
@master
def export_tensor(self, rel_path: str, data: Union[torch.Tensor, np.ndarray]):
data = U.apply_to_tensors(data, lambda x: x.detach().cpu().numpy())
torch.save(data, self.get_storage_path(rel_path + ".pth"))
def fix_names(self, plotlist: Dict[str, Any]) -> Dict[str, Any]:
def fix_name(s: str) -> str:
s = self.find_slash.sub('/', s)
s = self.remove_firstlast_slash.sub('', s)
return s
return {fix_name(k): v for k, v in plotlist.items()}
@master
def log(self, plotlist, step=None):
if self.args.wandb_bug_workaround and self.use_wandb:
filtered = {k: v for k, v in plotlist.items() if not isinstance(v, framework.visualize.plot.TextTable)}
if len(filtered) != len(plotlist) and not self.wandb_bug_found:
print("WARNING: wandb_bug_workaround enabled. Refusing to log tables")
self.wandb_bug_found = True
plotlist = filtered
plotlist = self.fix_names(plotlist)
if plotlist:
self.summary.log(plotlist, step)
# Path: layers/transformer/relative_preln_transformer.py
class PrelnRelativeTransformerEncoderLayer(RelativeTransformerEncoderLayer):
is_preln = True
def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,
activation: ActivationFunction = F.relu, attention_dropout=0, test_pos_clamp: Optional[int] = None,
drop_expand: bool = True, head_projection_size: Optional[int] = None):
super().__init__(
d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout,
activation=activation, attention_dropout=attention_dropout, test_pos_clamp=test_pos_clamp,
drop_expand=drop_expand, head_projection_size=head_projection_size)
reset_prenorm_params(self, n_layers)
def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,
pos_offset: Optional[int] = None) -> torch.Tensor:
src2 = self.norm1(src)
src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,
pos_offset=pos_offset)
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
# Path: tasks/simple/language_model/transformer_relu_analyze.py
from typing import Any, Dict
from .wikitext103_sp_transformer import Wikitext103SPTransformer
from .enwik8_transformer import Enwik8Transformer
from ... import task, args
from framework.helpers import TrainingHelper
from layers.transformer import PrelnRelativeTransformerEncoderLayer
import torch
import torch.nn
import dataset
import framework
class TransformerReluCountAnalyzeMixin:
helper: framework.helpers.TrainingHelper
def __init__(self, helper: TrainingHelper):
super().__init__(helper)
if not isinstance(self.model.layers[0], PrelnRelativeTransformerEncoderLayer):
raise ValueError("Expected PrelnRelativeTransformerEncoderLayer")
self.counts = {}
self.sq_counts = {}
self.norm = {}
self.mod_id_to_layer = {}
def fw_pre_hook(m, inputs):
i = self.mod_id_to_layer[id(m)]
inp = inputs[0]
print("hook", i, inp.shape)
cl = (inp > 0).float().sum(-1)
self.counts[i] = self.counts.get(i, 0) + cl.sum()
self.sq_counts[i] = self.sq_counts.get(i, 0) + (cl**2).sum()
self.norm[i] = self.norm.get(i, 0) + cl.numel()
for i, l in enumerate(self.model.layers):
self.mod_id_to_layer[id(l.linear2)] = i
l.linear2.register_forward_pre_hook(fw_pre_hook)
def validate(self) -> Dict[str, Any]:
self.counts = {}
self.norm = {}
res = super().validate()
means = {k: (c/self.norm[k]).item() for k, c in self.counts.items()}
stds = {k: ((self.sq_counts[k]/self.norm[k] - means[k]**2)**0.5).item() for k in self.counts.keys()}
torch.save({
"means": means,
"stds": stds,
}, "counts.pth")
@task()
class Wikitext103SPTransformerAnalyze(TransformerReluCountAnalyzeMixin, Wikitext103SPTransformer):
| pass |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Q-Future/Q-Align
# Path: q_align/model/configuration_mplug_owl2.py
class MPLUGOwl2Config(LlamaConfig):
model_type = "mplug_owl2"
def __init__(self, visual_config=None, **kwargs):
if visual_config is None:
self.visual_config = DEFAULT_VISUAL_CONFIG
else:
self.visual_config = visual_config
super().__init__(
**kwargs,
)
# Path: q_align/model/configuration_mplug_owl2.py
class MplugOwlVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MplugOwlVisionModel`]. It is used to instantiate
a
mPLUG-Owl vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration defaults will yield a similar configuration to that of the mPLUG-Owl
[x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
```"""
model_type = "mplug_owl_vision_model"
def __init__(
self,
hidden_size=1024,
intermediate_size=4096,
projection_dim=768,
num_hidden_layers=24,
num_attention_heads=16,
num_channels=3,
image_size=448,
patch_size=14,
hidden_act="quick_gelu",
layer_norm_eps=1e-6,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
use_flash_attn=False,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.use_flash_attn = use_flash_attn
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the vision config dict if we are loading from MplugOwlConfig
if config_dict.get("model_type") == "mplug-owl":
config_dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
# Path: q_align/model/configuration_mplug_owl2.py
class MplugOwlVisualAbstractorConfig(PretrainedConfig):
model_type = "mplug_owl_visual_abstract"
def __init__(
self,
num_learnable_queries=64,
hidden_size=1024,
num_hidden_layers=6,
num_attention_heads=16,
intermediate_size=2816,
attention_probs_dropout_prob=0.,
initializer_range=0.02,
layer_norm_eps=1e-6,
encoder_hidden_size=1024,
grid_size=None,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_learnable_queries = num_learnable_queries
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.encoder_hidden_size = encoder_hidden_size
self.grid_size = grid_size if grid_size else 32
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
# get the visual_abstractor config dict if we are loading from MplugOwlConfig
if config_dict.get("model_type") == "mplug-owl":
config_dict = config_dict["abstractor_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
)
return cls.from_dict(config_dict, **kwargs)
# Path: q_align/model/visual_encoder.py
class MplugOwlVisionModel(PreTrainedModel):
main_input_name = "pixel_values"
_no_split_modules = ["MplugOwlVisionEncoderLayer"]
def __init__(self, config):
super().__init__(config)
self.config = config
self.hidden_size = config.hidden_size
self.embeddings = MplugOwlVisionEmbeddings(config)
self.encoder = MplugOwlVisionEncoder(config)
self.post_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
self.post_init()
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.post_layernorm(last_hidden_state)
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.embeddings
# Path: q_align/model/visual_encoder.py
class MplugOwlVisualAbstractorModel(PreTrainedModel):
_no_split_modules = ["MplugOwlVisualAbstractorLayer"]
def __init__(self, config, language_hidden_size):
super().__init__(config)
self.config = config
self.encoder = MplugOwlVisualAbstractorEncoder(config)
self.visual_fc = torch.nn.Linear(config.hidden_size, language_hidden_size)
self.query_embeds = torch.nn.Parameter(torch.randn(1, config.num_learnable_queries, config.hidden_size))
self.vit_eos = torch.nn.Parameter(torch.randn(1, 1, language_hidden_size))
self.post_init()
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def get_extended_attention_mask(
self,
attention_mask: torch.Tensor,
input_shape: Tuple[int],
device: torch.device,
) -> torch.Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`Tuple[int]`):
The shape of the input to the model.
device: (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(
self,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:
shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and
value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are
used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key
value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape
`(batch_size, sequence_length)`.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
query_embeds = self.query_embeds.repeat(encoder_hidden_states.shape[0], 1, 1)
embedding_output = query_embeds
input_shape = embedding_output.size()[:-1]
batch_size, seq_length = input_shape
device = embedding_output.device
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask is None:
attention_mask = torch.ones(
(query_embeds.shape[0], query_embeds.shape[1]), dtype=torch.long, device=query_embeds.device
)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
if type(encoder_hidden_states) == list:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()
else:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if type(encoder_attention_mask) == list:
encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]
elif encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = sequence_output[:, 0, :]
sequence_output = self.visual_fc(sequence_output)
sequence_output = torch.cat([sequence_output, self.vit_eos.repeat(sequence_output.shape[0], 1, 1)], dim=1)
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
# Path: q_align/model/modeling_llama2.py
def replace_llama_modality_adaptive():
transformers.models.llama.configuration_llama.LlamaConfig = LlamaConfig
transformers.models.llama.modeling_llama.LlamaAttention = LlamaAttention
transformers.models.llama.modeling_llama.LlamaFlashAttention2 = LlamaFlashAttention2
transformers.models.llama.modeling_llama.LlamaSdpaAttention = LlamaSdpaAttention
transformers.models.llama.modeling_llama.LlamaDecoderLayer = LlamaDecoderLayer
transformers.models.llama.modeling_llama.LlamaModel.forward = model_forward
transformers.models.llama.modeling_llama.LlamaForCausalLM.forward = causal_model_forward
# Path: q_align/model/modeling_mplug_owl2.py
from abc import ABC, abstractmethod
from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, CLIPImageProcessor, LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
from .visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel
from .modeling_llama2 import replace_llama_modality_adaptive
from icecream import ic
from PIL import Image
from icecream import ic
import torch
import torch.nn as nn
import copy
import os
import sys
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_image_features = image_features[cur_image_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device)
new_modality_indicators.append(cur_modality_indicators)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
cur_modality_indicators = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
# Add modality indicator
assert image_token_start == len(cur_input_ids[:image_token_start])
cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long())
cur_modality_indicators.append(torch.ones(len(cur_image_features)).long())
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long())
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
# Modality
cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators]
cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0)
new_modality_indicators.append(cur_modality_indicators)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
# Embedding
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
# Modality
new_modality_indicators_align = []
for cur_modality_indicator in new_modality_indicators:
cur_new_embed = torch.cat((cur_modality_indicator, torch.zeros(max_len - cur_modality_indicator.shape[0], dtype=cur_modality_indicator.dtype, device=cur_modality_indicator.device)), dim=0)
new_modality_indicators_align.append(cur_new_embed)
new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0)
# Label
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
# Attention Mask
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
new_modality_indicators = torch.stack(new_modality_indicators, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels
class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel):
config_class = MPLUGOwl2Config
def __init__(self, config: MPLUGOwl2Config):
super(MPLUGOwl2LlamaModel, self).__init__(config)
class MPLUGOwl2LlamaForCausalLM(LlamaForCausalLM, MPLUGOwl2MetaForCausalLM):
config_class = MPLUGOwl2Config
def __init__(self, config):
super(LlamaForCausalLM, self).__init__(config)
| self.model = MPLUGOwl2LlamaModel(config) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: berlino/gated_linear_attention
# Path: kernels/inter_chunk_contribution/preprocess_cumsum_gk.py
class PreprocessCumSum_GK(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, gk, normalizer_gk=8, clamp_min=-3):
q = q.contiguous()
k = k.contiguous()
gk = gk.contiguous()
B, H, NUM_CHUNK, CHUNK_SIZE, D = q.shape
D_k = k.shape[-1]
# D_v = v.shape[-1]
# (B, H, L, D_K, D_V)
# , memory_format=torch.contiguous_format)
# o = torch.empty_like(v).contiguous()
# share memory's limit.
# BLOCK_MODEL_K = 128
# BLOCK_MODEL_V = 128
#split k
grid = (B * H, NUM_CHUNK)
ctx.grid = grid
k_reduce = torch.empty_like(k)
q_exp = torch.empty_like(q)
gk_cumsum = torch.empty_like(gk)
gk_last_exp = torch.empty_like(gk[:, :, :, 0], dtype=torch.float32)
_fwd_preprocess_cumsum_gk[grid](
q, k, gk, gk_cumsum,
q_exp, k_reduce, gk_last_exp,
CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=normalizer_gk, clamp_min=clamp_min,
D_MODEL_K=D_k, num_warps=8 if D_k >= 512 else 4
)
ctx.grid = grid
ctx.save_for_backward(q, k, gk, gk_cumsum)
ctx.normalizer_gk = normalizer_gk
ctx.clamp_min = clamp_min
return gk_cumsum, k_reduce, q_exp, gk_last_exp
@staticmethod
def backward(ctx, dgk_cumsum, dk_reduce, dq_exp, dgk_last_exp):
dgk_cumsum = dgk_cumsum.contiguous()
dk_reduce = dk_reduce.contiguous()
dq_exp = dq_exp.contiguous()
dgk_last_exp = dgk_last_exp.contiguous()
q, k, gk, gk_cumsum = ctx.saved_tensors
grid = ctx.grid
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dgk = torch.empty_like(gk)
B, H, NUM_CHUNK, CHUNK_SIZE, D_k = q.shape
# D_v = v.shape[-1]
_bwd_preprocess_cumsum_gk[grid](
q, k, gk, gk_cumsum,
dq_exp, dk_reduce, dgk_last_exp, dgk_cumsum,
dq, dk, dgk,
CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=ctx.normalizer_gk, clamp_min = ctx.clamp_min,
D_MODEL_K=D_k, num_warps=8 if D_k >= 512 else 4
)
return dq, dk, dgk, None, None, None
# Path: kernels/inter_chunk_contribution/preprocess_cumsum_gv.py
class PreprocessCumSum_GV(torch.autograd.Function):
@staticmethod
def forward(ctx, v, gv, normalizer_gv=8, clamp_min=-3):
v = v.contiguous()
gv = gv.contiguous()
B, H, NUM_CHUNK, CHUNK_SIZE, D_v = v.shape
# D_k = k.shape[-1]
# D_v = v.shape[-1]
# (B, H, L, D_K, D_V)
# , memory_format=torch.contiguous_format)
# o = torch.empty_like(v).contiguous()
# share memory's limit.
# BLOCK_MODEL_K = 128
# BLOCK_MODEL_V = 128
#split k
grid = (B * H, NUM_CHUNK)
ctx.grid = grid
gv_cumsum = torch.empty_like(gv, dtype=torch.float32)
gv_cumsum_exp = torch.empty_like(gv)
v_reduce = torch.empty_like(v)
gv_last_exp = torch.empty_like(gv[:, :, :, 0], dtype=torch.float32)
_fwd_preprocess_cumsum_gv[grid](
v, gv, gv_cumsum, gv_cumsum_exp,
v_reduce, gv_last_exp,
CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=normalizer_gv, clamp_min=clamp_min,
D_MODEL_V=D_v, num_warps=8 if D_v >= 512 else 4
)
ctx.grid = grid
ctx.save_for_backward(v, gv, gv_cumsum)
ctx.normalizer_gv = normalizer_gv
ctx.clamp_min = clamp_min
return gv_cumsum, v_reduce, gv_cumsum_exp, gv_last_exp
@staticmethod
def backward(ctx, dgv_cumsum, dv_reduce, dgv_cumsum_exp, dgv_last_exp):
dgv_cumsum = dgv_cumsum.contiguous()
dv_reduce = dv_reduce.contiguous()
dgv_cumsum_exp = dgv_cumsum_exp.contiguous()
dgv_last_exp = dgv_last_exp.contiguous()
v, gv, gv_cumsum = ctx.saved_tensors
grid = ctx.grid
B, H, NUM_CHUNK, CHUNK_SIZE, D_v = v.shape
dv = torch.empty_like(v)
dgv = torch.empty_like(gv)
_bwd_preprocess_cumsum_gv[grid](
v, gv, gv_cumsum, dgv_cumsum_exp, dv_reduce, dgv_last_exp, dgv_cumsum,
dv, dgv,
CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=ctx.normalizer_gv, clamp_min = ctx.clamp_min,
D_MODEL_V=D_v, num_warps=8 if D_v >= 512 else 4
)
return dv, dgv, None, None, None
# Path: kernels/inter_chunk_contribution/chunk_scan_triton_full.py
class Chunk_memory_update_full(torch.autograd.Function):
@staticmethod
def forward(ctx, decay_key_last, decay_value_last, to_add):
decay_key_last = decay_key_last.contiguous()
decay_value_last = decay_value_last.contiguous()
to_add = to_add.contiguous()
B, H, N, D_k, D_v = to_add.shape
output = torch.empty_like(to_add)
BLOCK_MODEL = 32
assert D_k % 32 == 0
assert D_v % 32 == 0
assert D_k == decay_key_last.shape[-1]
assert D_v == decay_value_last.shape[-1]
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
ctx.grid = grid
ctx.BLOCK_MODEL = BLOCK_MODEL
_fwd_recurrence[grid](
to_add,
decay_key_last,
decay_value_last,
output,
D_MODEL_K=D_k, D_MODEL_V=D_v,
NUM_BLOCK=N,
BLOCK_MODEL=BLOCK_MODEL
)
output[:, :, 0] = 0
ctx.save_for_backward(output, decay_key_last, decay_value_last)
return output
@staticmethod
def backward(ctx, DO):
DO = DO.contiguous()
output, decay_key_last, decay_value_last = ctx.saved_tensors
B, H, N, D_k, D_v = output.shape
num_block = N
BLOCK_MODEL = 32
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
# I don't want atomic_add to be used in the backward pass
# so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)
# afterward, I sum over this dimension to get the correct gradient
D_p1 = torch.empty(B, H, N, D_v // BLOCK_MODEL, D_k, device=DO.device, dtype=torch.float32)
D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)
_bwd_recurrence[grid](
output, decay_key_last, decay_value_last,
DO, D_p1, D_p2,
NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL,
D_MODEL_K = D_k,
D_MODEL_V = D_v,
BLOCK_MODEL = BLOCK_MODEL
)
output[:, :, -1] = 0
D_p1[:, :, 0] = 0
D_p1[:, :, -1] = 0
D_p2[:, :, 0] = 0
D_p2[:, :, -1] = 0
return D_p1.sum(-2), D_p2.sum(-2), output
# Path: kernels/inter_chunk_contribution/chunk_scan_triton_only_gk.py
class Chunk_memory_update_only_gk(torch.autograd.Function):
@staticmethod
def forward(ctx, decay_key_last, to_add):
decay_key_last = decay_key_last.contiguous()
to_add = to_add.contiguous()
B, H, N, D_k, D_v = to_add.shape
output = torch.empty_like(to_add)
BLOCK_MODEL = 32
assert D_k % 32 == 0
assert D_v % 32 == 0
assert D_k == decay_key_last.shape[-1]
# assert D_v == to_add.shape[-1]
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
ctx.grid = grid
ctx.BLOCK_MODEL = BLOCK_MODEL
_fwd_recurrence[grid](
to_add,
decay_key_last,
output,
D_MODEL_K=D_k, D_MODEL_V=D_v,
NUM_BLOCK=N,
BLOCK_MODEL=BLOCK_MODEL
)
output[:, :, 0] = 0
ctx.save_for_backward(output, decay_key_last)
return output
@staticmethod
def backward(ctx, DO):
DO = DO.contiguous()
output, decay_key_last = ctx.saved_tensors
B, H, N, D_k, D_v = output.shape
num_block = N
BLOCK_MODEL = 32
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
# I don't want atomic_add to be used in the backward pass
# so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)
# afterward, I sum over this dimension to get the correct gradient
D_p1 = torch.empty(B, H, N, D_v // BLOCK_MODEL, D_k, device=DO.device, dtype=torch.float32)
# D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)
_bwd_recurrence[grid](
output, decay_key_last,
DO, D_p1,
NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL,
D_MODEL_K = D_k,
D_MODEL_V = D_v,
BLOCK_MODEL = BLOCK_MODEL
)
output[:, :, -1] = 0
D_p1[:, :, 0] = 0
D_p1[:, :, -1] = 0
return D_p1.sum(-2), output
# Path: kernels/inter_chunk_contribution/chunk_scan_triton_only_gv.py
class Chunk_memory_update_only_gv(torch.autograd.Function):
@staticmethod
def forward(ctx, decay_value_last, to_add):
decay_value_last = decay_value_last.contiguous()
to_add = to_add.contiguous()
B, H, N, D_k, D_v = to_add.shape
output = torch.empty_like(to_add)
BLOCK_MODEL = 32
assert D_k % 32 == 0
assert D_v % 32 == 0
# assert D_k == decay_key_last.shape[-1]
assert D_v == decay_value_last.shape[-1]
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
ctx.grid = grid
ctx.BLOCK_MODEL = BLOCK_MODEL
_fwd_recurrence[grid](
to_add,
decay_value_last,
output,
D_MODEL_K=D_k, D_MODEL_V=D_v,
NUM_BLOCK=N,
BLOCK_MODEL=BLOCK_MODEL
)
output[:, :, 0] = 0
ctx.save_for_backward(output, decay_value_last)
return output
@staticmethod
def backward(ctx, DO):
DO = DO.contiguous()
output, decay_value_last = ctx.saved_tensors
B, H, N, D_k, D_v = output.shape
num_block = N
BLOCK_MODEL = 32
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
# I don't want atomic_add to be used in the backward pass
# so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)
# afterward, I sum over this dimension to get the correct gradient
D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)
_bwd_recurrence[grid](
output, decay_value_last,
DO, D_p2,
NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL,
D_MODEL_K = D_k,
D_MODEL_V = D_v,
BLOCK_MODEL = BLOCK_MODEL
)
output[:, :, -1] = 0
# D_p1[:, :, 0] = 0
# D_p1[:, :, -1] = 0
D_p2[:, :, 0] = 0
D_p2[:, :, -1] = 0
return D_p2.sum(-2), output
# Path: kernels/inter_chunk_contribution/chunk_scan_triton_no_decay.py
class Chunk_memory_update_no_decay(torch.autograd.Function):
@staticmethod
def forward(ctx, to_add):
# decay_key_last = decay_key_last.contiguous()
# decay_value_last = decay_value_last.contiguous()
to_add = to_add.contiguous()
B, H, N, D_k, D_v = to_add.shape
output = torch.empty_like(to_add)
BLOCK_MODEL = 32
assert D_k % 32 == 0
assert D_v % 32 == 0
# assert D_k == decay_key_last.shape[-1]
# assert D_v == decay_value_last.shape[-1]
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
ctx.grid = grid
ctx.BLOCK_MODEL = BLOCK_MODEL
_fwd_recurrence[grid](
to_add,
# decay_key_last,
# decay_value_last,
output,
D_MODEL_K=D_k, D_MODEL_V=D_v,
NUM_BLOCK=N,
BLOCK_MODEL=BLOCK_MODEL
)
output[:, :, 0] = 0
ctx.save_for_backward(output)
return output
@staticmethod
def backward(ctx, DO):
DO = DO.contiguous()
output, = ctx.saved_tensors
B, H, N, D_k, D_v = output.shape
num_block = N
BLOCK_MODEL = 32
grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)
# I don't want atomic_add to be used in the backward pass
# so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)
# afterward, I sum over this dimension to get the correct gradient
# D_p1 = torch.empty(B, H, N, D_v // BLOCK_MODEL, D_k, device=DO.device, dtype=torch.float32)
# D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)
_bwd_recurrence[grid](
output,
DO,
NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL,
D_MODEL_K = D_k,
D_MODEL_V = D_v,
BLOCK_MODEL = BLOCK_MODEL
)
output[:, :, -1] = 0
return output
# Path: kernels/inter_chunk_contribution/fn.py
from .preprocess_cumsum_gk import PreprocessCumSum_GK
from .preprocess_cumsum_gv import PreprocessCumSum_GV
from .chunk_scan_triton_full import Chunk_memory_update_full
from .chunk_scan_triton_only_gk import Chunk_memory_update_only_gk
from .chunk_scan_triton_only_gv import Chunk_memory_update_only_gv
from .chunk_scan_triton_no_decay import Chunk_memory_update_no_decay
def inter_chunk_onc(query, key, value, gk, gv, normalizer_gk=16, normalizer_gv=16, clam_min=-3):
if gk is not None:
g_key_cumsum, reduce_key, q_exp, g_key_last_exp = PreprocessCumSum_GK.apply(query, key, gk, normalizer_gk, clam_min)
else:
reduce_key = key
q_exp = None
g_key_cumsum = None
g_key_last_exp = None
# gv_cumsum, v_reduce, gv_cumsum_exp, gv_last_exp
if gv is not None:
g_value_cumsum, reduce_value, g_value_cumsum_exp, g_value_last_exp = PreprocessCumSum_GV.apply( value, gv, normalizer_gv, clam_min)
else:
reduce_value = value
g_value_cumsum = None
g_value_last_exp = None
to_add = reduce_key.transpose(-1, -2) @ reduce_value
if gk is not None and gv is not None:
memory_cache = Chunk_memory_update_full.apply(g_key_last_exp, g_value_last_exp, to_add)
inter_chunk_contribution = ((q_exp) @ memory_cache) * g_value_cumsum_exp
elif gk is None and gv is not None:
memory_cache = Chunk_memory_update_only_gv.apply(g_value_last_exp, to_add)
inter_chunk_contribution = ((query) @ memory_cache) * g_value_cumsum_exp
elif gk is not None and gv is None:
memory_cache = Chunk_memory_update_only_gk.apply(g_key_last_exp, to_add)
inter_chunk_contribution = ((q_exp) @ memory_cache)
else:
memory_cache = Chunk_memory_update_no_decay.apply(to_add)
inter_chunk_contribution = ((query) @ memory_cache)
| return g_key_cumsum, g_value_cumsum, inter_chunk_contribution |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kakaobrain/honeybee
# Path: serve/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_index(self, num_frames, num_segments):
def load_video(self, path, num_frames=4):
def get_images(self, log_dir=None):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: serve/gradio_css.py
# Path: serve/model_utils.py
def post_process_code(code):
sep = "\n```"
if sep in code:
blocks = code.split(sep)
if len(blocks) % 2 == 1:
for i in range(1, len(blocks), 2):
blocks[i] = blocks[i].replace("\\_", "_")
code = sep.join(blocks)
return code
# Path: serve/model_worker.py
class Honeybee_Server:
def __init__(
self,
base_model="checkpoints/7B-C-Abs-M144/last",
log_dir="./",
load_in_8bit=False,
bf16=True,
device="cuda",
io=None,
):
self.log_dir = log_dir
self.model, self.tokenizer, self.processor = get_model(
base_model,
use_bf16=bf16,
load_in_8bit=load_in_8bit,
)
self.model.to(device)
self.bf16 = bf16
self.load_in_8bit = load_in_8bit
if not load_in_8bit:
if bf16:
self.model.bfloat16()
else:
self.model.half()
self.model.eval()
self.io = io
def evaluate(
self,
pixel_values=None,
input_ids=None,
temperature=1.0,
top_p=0.9,
top_k=5,
num_beams=3,
max_new_tokens=256,
stream_output=True,
length_penalty=1.0,
no_repeat_ngram_size=2,
do_sample=False,
early_stopping=True,
**kwargs
):
generation_config = {
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"num_beams": num_beams,
"no_repeat_ngram_size": no_repeat_ngram_size,
"do_sample": do_sample,
"early_stopping": early_stopping,
"length_penalty": length_penalty,
}
generate_params = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"return_dict_in_generate": True,
"output_scores": True,
"max_new_tokens": max_new_tokens,
}
generate_params.update(generation_config)
if stream_output:
# Stream the reply 1 token at a time.
# This is based on the trick of using 'stopping_criteria' to create an iterator,
# from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243.
def generate_with_callback(callback=None, **kwargs):
kwargs.setdefault("stopping_criteria", transformers.StoppingCriteriaList())
kwargs["stopping_criteria"].append(Stream(callback_func=callback))
with torch.no_grad():
self.model.generate(**kwargs)
def generate_with_streaming(**kwargs):
return Iteratorize(generate_with_callback, kwargs, callback=None)
with generate_with_streaming(**generate_params) as generator:
for output in generator:
# new_tokens = len(output) - len(input_ids[0])
decoded_output = self.tokenizer.decode(output)
if output[-1] in [self.tokenizer.eos_token_id]:
break
yield post_process_output(decoded_output)
return # early return for stream_output
with torch.no_grad():
generation_output = self.model.generate(
pixel_values=pixel_values,
input_ids=input_ids,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
**generation_config
)
s = generation_output.sequences[0].cpu()
output = self.tokenizer.decode(s)
yield post_process_output(output)
def predict(self, data):
prompt = [data["text_input"]]
images = data["images"] if len(data["images"]) > 0 else None
if images:
images = [Image.open(BytesIO(base64.b64decode(image))) for image in images]
inputs = self.processor(text=prompt, images=images, return_tensors="pt")
input_ids = inputs["input_ids"].to(self.model.device)
if "pixel_values" in inputs:
if self.load_in_8bit:
pixel_values = inputs["pixel_values"].half().to(self.model.device)
elif self.bf16:
pixel_values = inputs["pixel_values"].bfloat16().to(self.model.device)
else:
pixel_values = inputs["pixel_values"].half().to(self.model.device)
else:
pixel_values = None
cache = None
try:
for x in self.evaluate(
pixel_values, input_ids, stream_output=True, **data["generation_config"]
):
cache = x # noqa: F841
yield (x, True)
except ValueError as e:
print("Caught ValueError:", e)
yield (server_error_msg, False)
except torch.cuda.CudaError as e:
print("Caught torch.cuda.CudaError:", e)
yield (server_error_msg, False)
return
# Path: serve/serve_utils.py
def add_text(state, text, image, video, request: gr.Request):
if len(text) <= 0 and (image is None or video is None):
state.skip_next = True
return (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
if image is not None:
if "<image>" not in text:
text = text + "\n<image>"
text = (text, image)
if video is not None:
num_frames = 4
if "<image>" not in text:
text = text + "\n<image>" * num_frames
text = (text, video)
state.append_message(state.roles[0], text)
state.append_message(state.roles[1], None)
state.skip_next = False
return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
# Path: serve/serve_utils.py
def regenerate(state, request: gr.Request):
state.messages[-1][-1] = None
state.skip_next = False
return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
# Path: serve/serve_utils.py
class _IOWrapper:
def __init__(self):
def set_io(self, new_io):
def __getattr__(self, name):
def __str__(self):
def init():
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
def upvote_last_response(state, model_selector, request: gr.Request):
def downvote_last_response(state, model_selector, request: gr.Request):
def flag_last_response(state, model_selector, request: gr.Request):
def regenerate(state, request: gr.Request):
def clear_history(request: gr.Request):
def add_text(state, text, image, video, request: gr.Request):
def after_process_image(prompt):
# Path: serve/web_server.py
import argparse
import json
import os
import time
import gradio as gr
import requests
import torch
from .conversation import default_conversation
from .gradio_css import code_highlight_css
from .model_utils import post_process_code
from .model_worker import Honeybee_Server
from .serve_utils import add_text # noqa: F401
from .serve_utils import regenerate # noqa: F401
from .serve_utils import (
after_process_image,
clear_history,
disable_btn,
downvote_last_response,
enable_btn,
flag_last_response,
get_window_url_params,
init,
no_change_btn,
upvote_last_response,
)
output = chunk[0].strip()
output = post_process_code(output)
state.messages[-1][-1] = output + "▌"
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
else:
output = chunk[0].strip()
state.messages[-1][-1] = output
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
time.sleep(0.03)
except requests.exceptions.RequestException as e: # noqa: F841
state.messages[-1][
-1
] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5
def regenerate_http_bot(
state,
max_output_tokens,
temperature,
top_k,
top_p,
num_beams,
no_repeat_ngram_size,
length_penalty,
do_sample,
request: gr.Request,
):
state.messages[-1][-1] = None
state.skip_next = False
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
prompt = after_process_image(state.get_prompt())
images = state.get_images()
data = {
"text_input": prompt,
"images": images if len(images) > 0 else [],
"generation_config": {
"top_k": int(top_k),
"top_p": float(top_p),
"num_beams": int(num_beams),
"no_repeat_ngram_size": int(no_repeat_ngram_size),
"length_penalty": float(length_penalty),
"do_sample": bool(do_sample),
"temperature": float(temperature),
"max_new_tokens": min(int(max_output_tokens), 1536),
},
}
state.messages[-1][-1] = "▌"
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
try:
for chunk in model.predict(data):
if chunk:
if chunk[1]:
output = chunk[0].strip()
output = post_process_code(output)
state.messages[-1][-1] = output + "▌"
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
else:
output = chunk[0].strip()
state.messages[-1][-1] = output
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
time.sleep(0.03)
except requests.exceptions.RequestException as e: # noqa: F841
state.messages[-1][
-1
] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
yield (state, state.to_gradio_chatbot(), "", None, None) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
)
return
state.messages[-1][-1] = state.messages[-1][-1][:-1]
yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5
title_markdown = """
**Notice**: The output is generated by top-k sampling scheme and may involve some randomness.
"""
tos_markdown = """
### Terms of use
By using this service, users are required to agree to the following terms:
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
| **Copyright 2023 Alibaba DAMO Academy.** |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: taikinman/langrila
# Path: src/langrila/base.py
class BaseConversationLengthAdjuster(ABC):
@abstractmethod
def run(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
raise NotImplementedError
def __call__(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
return self.run(messages)
# Path: src/langrila/base.py
class BaseFilter(ABC):
@abstractmethod
def apply(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
raise NotImplementedError
@abstractmethod
def restore(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
raise NotImplementedError
# Path: src/langrila/base.py
class BaseModule(ABC):
@abstractmethod
def run(self, *args, **kwargs):
raise NotImplementedError
async def arun(self, *args, **kwargs):
raise NotImplementedError
def stream(self, *args, **kwargs):
raise NotImplementedError
async def astream(self, *args, **kwargs):
raise NotImplementedError
def __call__(self, *args, **kwargs):
_async = kwargs.pop("arun", False)
_stream = kwargs.pop("stream", False)
if _async:
if _stream:
return self.astream(*args, **kwargs)
else:
return asyncio.create_task(self.arun(*args, **kwargs))
else:
if _stream:
return self.stream(*args, **kwargs)
else:
return self.run(*args, **kwargs)
# Path: src/langrila/conversation_adjuster/truncate.py
class OldConversationTruncationModule(BaseConversationLengthAdjuster):
"""
Adjust the number of tokens to be less than or equal to context_length, starting from the oldest message forward
"""
def __init__(self, model_name: str, context_length: int):
if model_name in MODEL_POINT.keys():
print(f"{model_name} is automatically converted to {MODEL_POINT[model_name]}")
model_name = MODEL_POINT[model_name]
assert (
model_name in MODEL_CONFIG.keys()
), f"model_name must be one of {', '.join(sorted(MODEL_CONFIG.keys()))}."
self.model_name = model_name
self.context_length = context_length
def run(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:
adjusted_messages: list[dict[str, str]] = []
total_n_tokens: int = 0
for message in messages[::-1]:
if total_n_tokens <= self.context_length:
message, total_n_tokens = self.adjust_message_length_and_update_total_tokens(
message, total_n_tokens
)
if message is not None:
adjusted_messages.append(message)
return adjusted_messages[::-1]
def adjust_message_length_and_update_total_tokens(
self, message: dict[str, Any], total_n_tokens: int = 0
) -> str:
n_tokens = get_n_tokens(message, self.model_name)
if total_n_tokens + n_tokens["total"] <= self.context_length:
total_n_tokens += n_tokens["total"]
return message, total_n_tokens
else:
available_n_tokens = max(
self.context_length - total_n_tokens - n_tokens["other"], 0
) # available_n_tokens for content
if available_n_tokens > 0:
if isinstance(message["content"], str):
message["content"] = self.truncate(message["content"], available_n_tokens)
total_n_tokens += available_n_tokens + n_tokens["other"]
print(
"Input message is truncated because total length of messages exceeds context length."
)
return message, total_n_tokens
elif "vision" in self.model_name and isinstance(message["content"], list):
return None, total_n_tokens # truncate whole image
else:
raise ValueError(
f"message['content'] must be str or list, but {type(message['content'])} is given."
)
else:
return None, total_n_tokens
def truncate(self, text: str, n_tokens: int) -> str:
try:
TOKENIZER = tiktoken.encoding_for_model(self.model_name)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
TOKENIZER = tiktoken.get_encoding("cl100k_base")
if n_tokens > 0:
return TOKENIZER.decode(TOKENIZER.encode(text)[-n_tokens:])
else:
return ""
# Path: src/langrila/message.py
class Message(BaseModel):
content: str
images: Any | list[Any] | None = None
image_resolution: str | None = None
@property
def as_system(self):
return {"role": "system", "content": self.content}
@property
def as_user(self):
if self.images:
content = [{"type": "text", "text": self.content}]
if not isinstance(self.images, list):
images = [self.images]
else:
images = self.images
for image in images:
content.append(
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{encode_image(image)}",
"detail": self.image_resolution if self.image_resolution else "low",
},
}
)
return {"role": "user", "content": content}
else:
return {"role": "user", "content": self.content}
@property
def as_assistant(self):
return {"role": "assistant", "content": self.content}
@property
def as_tool(self):
return {"role": "tool", "content": self.content}
@property
def as_function(self):
return {"role": "function", "content": self.content}
@field_validator("image_resolution")
def check_image_resolution_value(cls, val):
if val not in ["low", "high"]:
raise ValueError(
"image_resolution must be either 'low' or 'high' due to token management."
)
return val
# Path: src/langrila/model_config.py
_NEWER_MODEL_CONFIG = {
"gpt-4-1106-preview": {
"max_tokens": 128000,
"prompt_cost_per_token": 0.00001,
"completion_cost_per_token": 0.00003,
},
"gpt-4-vision-preview": {
"max_tokens": 128000,
"prompt_cost_per_token": 0.00001,
"completion_cost_per_token": 0.00003,
},
"gpt-3.5-turbo-1106": {
"max_tokens": 4096,
"prompt_cost_per_token": 0.000001,
"completion_cost_per_token": 0.000002,
},
}
# Path: src/langrila/model_config.py
_OLDER_MODEL_CONFIG = {
"gpt-4-0314": {
"max_tokens": 8192,
"prompt_cost_per_token": 0.00003,
"completion_cost_per_token": 0.00006,
},
"gpt-4-0613": {
"max_tokens": 8192,
"prompt_cost_per_token": 0.00003,
"completion_cost_per_token": 0.00006,
},
"gpt-4-32k-0314": {
"max_tokens": 32768,
"prompt_cost_per_token": 0.00006,
"completion_cost_per_token": 0.00012,
},
"gpt-4-32k-0613": {
"max_tokens": 32768,
"prompt_cost_per_token": 0.00006,
"completion_cost_per_token": 0.00012,
},
"gpt-3.5-turbo-0301": {
"max_tokens": 4096,
"prompt_cost_per_token": 0.0000015,
"completion_cost_per_token": 0.000002,
},
"gpt-3.5-turbo-0613": {
"max_tokens": 4096,
"prompt_cost_per_token": 0.0000015,
"completion_cost_per_token": 0.000002,
},
"gpt-3.5-turbo-16k-0613": {
"max_tokens": 16384,
"prompt_cost_per_token": 0.000003,
"completion_cost_per_token": 0.000004,
},
"gpt-3.5-turbo-instruct": {
"max_tokens": 8192,
"prompt_cost_per_token": 0.0000015,
"completion_cost_per_token": 0.000002,
},
}
# Path: src/langrila/model_config.py
MODEL_CONFIG = {}
# Path: src/langrila/model_config.py
MODEL_POINT = {
"gpt-4": "gpt-4-0613",
"gpt-4-32k": "gpt-4-32k-0613",
"gpt-4-128k": "gpt-4-1106-preview",
"gpt-4-vision": "gpt-4-vision-preview",
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
}
# Path: src/langrila/result.py
class FunctionCallingResults(BaseModel):
usage: Usage
results: list[ToolOutput]
prompt: Optional[str | dict[str, str] | list[dict[str, str]]] = None
# Path: src/langrila/result.py
class ToolOutput(BaseModel):
call_id: str | None
funcname: str | None
args: str | None
output: Any
# Path: src/langrila/usage.py
class Usage(BaseModel):
prompt_tokens: int = 0
completion_tokens: int = 0
def __add__(self, other: __class__ | dict | CompletionUsage):
if isinstance(other, dict):
other = Usage(**other)
if hasattr(other, 'prompt_tokens'):
prompt_tokens = self.prompt_tokens + other.prompt_tokens
else:
prompt_tokens = self.prompt_tokens
if hasattr(other, 'completion_tokens'):
completion_tokens = self.completion_tokens + other.completion_tokens
else:
completion_tokens = self.completion_tokens
return Usage(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
)
def __sub__(self, other: __class__ | dict | CompletionUsage):
if isinstance(other, dict):
other = Usage(**other)
if hasattr(other, 'prompt_tokens'):
prompt_tokens = self.prompt_tokens - other.prompt_tokens
else:
prompt_tokens = self.prompt_tokens
if hasattr(other, 'completion_tokens'):
completion_tokens = self.completion_tokens - other.completion_tokens
else:
completion_tokens = self.completion_tokens
return Usage(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
)
@property
def total_tokens(self):
return self.prompt_tokens + self.completion_tokens
@field_validator('prompt_tokens')
def check_prompt_tokens(cls, v):
if v < 0:
raise ValueError('prompt_tokens must be greater or equal to 0')
return v
@field_validator('completion_tokens')
def check_completion_tokens(cls, v):
if v < 0:
raise ValueError('completion_tokens must be greater or equal to 0')
return v
def __repr__(self):
return f'Usage(prompt_tokens={self.prompt_tokens}, completion_tokens={self.completion_tokens}, total_tokens={self.total_tokens})'
# Path: src/langrila/utils.py
def get_async_client(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
api_type: Optional[str] = "openai",
timeout: int = 60,
max_retries: int = 5,
):
if api_type == "azure":
return AsyncAzureOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
api_version=api_version,
endpoint_env_name=endpoint_env_name,
deployment_id_env_name=deployment_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
elif api_type == "openai":
return AsyncOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
else:
raise ValueError(f"api_type must be 'azure' or 'openai'. Got {api_type}.")
# Path: src/langrila/utils.py
def get_client(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
api_type: Optional[str] = "openai",
timeout: int = 60,
max_retries: int = 5,
):
if api_type == "azure":
assert (
api_version and endpoint_env_name and deployment_id_env_name
), "api_version, endpoint_env_name, and deployment_id_env_name must be specified when api_type is 'azure'."
return AzureOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
api_version=api_version,
endpoint_env_name=endpoint_env_name,
deployment_id_env_name=deployment_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
elif api_type == "openai":
return OpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
else:
raise ValueError(f"api_type must be 'azure' or 'openai'. Got {api_type}.")
# Path: src/langrila/utils.py
def get_token_limit(model_name: str):
if model_name in MODEL_ZOO:
return MODEL_CONFIG[model_name]["max_tokens"]
else:
raise NotImplementedError(
f"get_token_limit() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
# Path: src/langrila/utils.py
def make_batch(iterable, batch_size=1):
length = len(iterable)
for ndx in range(0, length, batch_size):
yield iterable[ndx : min(ndx + batch_size, length)]
# Path: src/langrila/chat_module/function_calling.py
import asyncio
import json
from typing import Callable, Optional
from pydantic import BaseModel, field_validator
from ..base import BaseConversationLengthAdjuster, BaseFilter, BaseModule
from ..conversation_adjuster.truncate import OldConversationTruncationModule
from ..message import Message
from ..model_config import _NEWER_MODEL_CONFIG, _OLDER_MODEL_CONFIG, MODEL_CONFIG, MODEL_POINT
from ..result import FunctionCallingResults, ToolOutput
from ..usage import Usage
from ..utils import get_async_client, get_client, get_token_limit, make_batch
type: str
description: str
def model_dump(self):
return {self.name: super().model_dump(exclude=["name"])}
@field_validator("type")
def check_type_value(cls, v):
if v not in {"string", "number", "boolean"}:
raise ValueError("type must be one of string or number.")
return v
class ToolParameter(BaseModel):
type: str = "object"
properties: list[ToolProperty]
required: Optional[list[str]] = None
def model_dump(self):
dumped = super().model_dump(exclude=["properties", "required"])
_properties = {}
for p in self.properties:
_properties.update(p.model_dump())
dumped["properties"] = _properties
if self.required is not None:
dumped["required"] = self.required
return dumped
@field_validator("type")
def check_type_value(cls, v):
if v not in {"object"}:
raise ValueError("supported type is only object")
return v
@field_validator("required")
def check_required_value(cls, required, values):
properties = values.data["properties"]
property_names = {p.name for p in properties}
if required is not None:
for r in required:
if r not in property_names:
raise ValueError(f"required property '{r}' is not defined in properties.")
return required
class ToolConfig(BaseModel):
name: str
type: str = "function"
description: str
parameters: ToolParameter
def model_dump(self):
dumped = super().model_dump(exclude=["parameters", "type"])
dumped["parameters"] = self.parameters.model_dump()
return {"type": self.type, self.type: dumped}
@field_validator("type")
def check_type_value(cls, v):
if v not in {"function"}:
raise ValueError("supported type is only function")
return v
class BaseFunctionCallingModule(BaseModule):
def __init__(
self,
api_key_env_name: str,
model_name: str,
tools: list[Callable],
tool_configs: list[ToolConfig],
tool_choice: str = "auto",
api_type: str = "openai",
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
timeout: int = 30,
max_retries: int = 2,
max_tokens: int = 2048,
seed: Optional[int] = None,
) -> None:
assert api_type in ["openai", "azure"], "api_type must be 'openai' or 'azure'."
if api_type == "azure":
assert (
api_version and endpoint_env_name and deployment_id_env_name
), "api_version, endpoint_env_name, and deployment_id_env_name must be specified for Azure API."
self.api_key_env_name = api_key_env_name
self.organization_id_env_name = organization_id_env_name
self.api_type = api_type
self.api_version = api_version
self.endpoint_env_name = endpoint_env_name
self.deployment_id_env_name = deployment_id_env_name
self.model_name = model_name
self.timeout = timeout
self.max_retries = max_retries
self.tools = {f.__name__: f for f in tools}
_tool_names_from_config = {f.name for f in tool_configs}
assert (
len(_tool_names_from_config ^ set(self.tools.keys())) == 0
), f"tool names in tool_configs must be the same as the function names in tools. tool names in tool_configs: {_tool_names_from_config}, function names in tools: {set(self.tools.keys())}"
self.tool_choice = tool_choice
self.max_tokens = max_tokens
self.additional_inputs = {}
if model_name in _NEWER_MODEL_CONFIG.keys():
self.seed = seed
self.additional_inputs["seed"] = seed
self.tool_configs = [f.model_dump() for f in tool_configs]
self.additional_inputs["tools"] = self.tool_configs
self.additional_inputs["tool_choice"] = self.tool_choice
else:
if seed:
| print( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Open-All-Scale-Causal-Engine/OpenASCE
# Path: openasce/inference/tree/tree_node.py
class GradientCausalTreeNode(CausalTreeNode):
"""A class for a node in a Gradient Boosting Causal Tree."""
def __init__(self, conf: ConfigTree = None, **kwargs):
super().__init__(conf, **kwargs)
self.eta = kwargs.get('eta', None)
def estimate(self, G, H, **kwargs):
"""
Estimate the treatment effect given the gradients and hessians.
Arguments:
G: The gradients.
H: The hessians.
**kwargs: Additional keyword arguments.
Returns:
ndarray: The estimated treatment effect.
"""
lambd = kwargs.get('lambd', 0)
return -G / (H + lambd)
# Path: openasce/inference/tree/dataset.py
class Dataset(object):
"""Abstract interface of class dataset"""
def __init__(self):
pass
def __len__(self):
return self.features.shape[0]
@staticmethod
def new_instance(conf):
data_conf = conf.get('dataset', conf)
cls_name = data_conf.get('type', 'dataset.CSVDataset')
return get_class(cls_name).new_instance(conf)
def read(self, filename):
pass
def sub_dataset(self, index=None):
"""
Abstract interface of sub-sampling
Arguments:
index (_type_, optional): _description_. Defaults to None.
Raises:
NotImplementedError: _description_
"""
raise NotImplementedError
def description(self, detail: bool = False) -> None:
"""
description the dataset
Arguments:
detail (bool, optional): [description]. Defaults to False.
"""
n_ins, n_feat = self.features.shape
n_y_len = self.targets.shape[1]
# calculate treatment distinct count
treats = np.unique(self.treatment)
logger.info(f'#inst: {n_ins}')
logger.info(f'#feat: {n_feat}')
logger.info(f'#time serise length: {n_y_len}')
logger.info(f'#treatments : {len(treats)}')
@property
def targets(self):
raise NotImplementedError
@property
def features(self):
raise NotImplementedError
@property
def treatment(self):
raise NotImplementedError
@property
def feature_columns(self):
if hasattr(self, 'used_features'):
return getattr(self, 'used_features')
elif isinstance(self.features, pd.DataFrame):
return self.features.columns
else:
raise RuntimeError('There is no attribute `feature_columns`!')
# Path: openasce/inference/tree/histogram.py
class Histogram(object):
def __init__(self, conf: ConfigTree):
hist_conf = conf.get('histogram', conf)
self.conf = conf
self.info = CausalDataInfo(conf)
self.tr_dts = []
self.max_bin_num = hist_conf.max_bin_num # Maximum number of bins
self.min_point_per_bin = hist_conf.min_point_per_bin # Minimum number of points for binning
# [leaf, feature, treatment, bin, target]
self.bin_counts = None
self.bin_hists = {}
self._data = None
def update_hists(self, target, index, leaves_range, treatment, bin_features, is_gradient, is_splitting, threads):
"""
Update histograms for all nodes in the same level of a tree
Arguments:
target (_type_): _description_
index (_type_): _description_
leaves_range (_type_): _description_
treatment (_type_): _description_
bin_features (_type_): _description_
is_gradient (bool): _description_
is_splitting (bool): _description_
threads (_type_): _description_
Raises:
ValueError: _description_
Returns:
_type_: _description_
"""
n, m = bin_features.shape
n_w = self.info.n_treatment
l = len(leaves_range)
leaves = list(range(0, l, 2))
n_bins = self.max_bin_num
if is_gradient:
assert isinstance(target, (dict, )), f'target should be a dict!'
keys = [k for k in target.keys()]
outs = [np.zeros([l, m, n_bins, n_w, target[k].shape[1]], target[k].dtype) for k in keys]
targets = [target[k] for k in keys]
# update histogram of target
update_histograms(targets, bin_features, index, leaves_range, treatment, outs, leaves, n_w, n_bins, threads)
for i, k in enumerate(keys):
if l > 1:
outs[i][1::2] = self.bin_hists[k][is_splitting] - outs[i][::2]
self.bin_hists[k] = outs[i]
else:
assert isinstance(target, (dict, )), ''
keys = target.keys()
outs = [np.zeros([l, m, n_bins, n_w, target[k].shape[1]], target[k].dtype) for k in keys]
targets = [target[k] for k in keys]
# update histogram of target
update_histograms(targets, bin_features, index, leaves_range, treatment, outs, leaves, n_w, n_bins, threads)
for i, k in enumerate(keys):
if l > 1:
outs[i][1::2] = self.bin_hists[k][is_splitting] - outs[i][::2]
self.bin_hists[k] = outs[i]
# update counts
out = np.zeros([l, m, n_bins, n_w, 1], np.int32)
update_histogram(np.ones([n, 1], np.int32), bin_features, index, leaves_range, treatment, out, leaves, n_w,
n_bins, threads)
if l > 1:
out[1::2] = np.expand_dims(self.bin_counts[is_splitting], -1) - out[::2]
self.bin_counts = out[:, :, :, :, 0]
return self
def __getattr__(self, __name: str):
"""
Get the attribute value.
Arguments:
__name (str): The name of the attribute.
Returns:
ndarray: The attribute value.
Raises:
AttributeError: If the attribute is not found.
"""
if __name in self.bin_hists:
return self.bin_hists[__name]
raise AttributeError()
@classmethod
def new_instance(cls, dataset: Dataset, conf: ConfigTree = None, **kwargs):
"""
Create a new instance of the histogram.
Arguments:
dataset (Dataset): The dataset.
conf (ConfigTree): The configuration tree.
kwargs: Additional keyword arguments.
Returns:
Histogram: The new instance of the histogram.
"""
hist = cls(conf, dataset.treatment, dataset.targets)
hist.binning(dataset)
return hist
# Path: openasce/inference/tree/information.py
class CausalDataInfo(object):
def __init__(self, conf, **kwargs):
data_conf = conf.get('dataset', conf)
self.n_treatment = data_conf.get('n_treatment')
self.feature_columns = data_conf.get('feature', None)
self.treatment_column = data_conf.get('treatment', None)
self.feature_ratio = conf.get('feature_ratio', None)
self.instance_ratio = conf.get('instance_ratio', None)
self.n_period = data_conf.get('n_period')
self.treat_dt = data_conf.get('treat_dt')
hist_conf = conf.get('histogram', {})
self.n_bins = hist_conf.get('max_bin_num', 64)
self.min_point_per_bin = hist_conf.get('min_point_per_bin', 10)
tree_conf = conf.get('tree', {})
self.lambd = tree_conf.get('lambd', None)
self.gamma = tree_conf.get('gamma', None)
self.coef = tree_conf.get('coefficient', None)
self.parallel_l2 = tree_conf.get('parallel_l2', None)
self.min_point_num_node = tree_conf.get('min_point_num_node', None)
self.max_depth = tree_conf.get('max_depth', None)
# Path: openasce/inference/tree/bin.py
class BinMapper(KBinsDiscretizer):
"""A class for binning numerical features."""
def __init__(self, conf: ConfigTree):
self.info = CausalDataInfo(conf)
self._binmaper_cpp: list = None
def transform(self, X):
"""
Transform the input features using the bin mapper.
Arguments:
X: Input features.
Returns:
The transformed features.
"""
return value_bin_parallel(X, self._binmaper_cpp)
def fit(self, X, y=None):
"""
Fit the bin mapper on the input features.
Arguments:
X: Input features.
y: The target variable (not used).
Returns:
The fitted bin mapper object.
"""
xshape = X.shape
assert len(xshape) == 2, f'`X` must be 2-dimension!'
self._binmaper_cpp = find_bin_parallel(X, self.info.n_bins, self.info.min_point_per_bin,
self.info.min_point_per_bin, True)
self.description()
return self
def description(self):
"""Print the description of the bin mapper."""
TRACE(f'{"*"*43}description bin{"*"*43}')
TRACE(f'*{len(self._binmaper_cpp)} features*')
TRACE(f'*number of bins:{[len(b.GetUpperBoundValue()) for b in self._binmaper_cpp]}')
TRACE(f'{"*"*100}')
def inverse_transform(self, Xt, index: int = None):
"""
Inverse transform the transformed features to the original values.
Arguments:
Xt: Transformed features.
index: Index of the feature to inverse transform.
Returns:
The inverse transformed features.
"""
if index is not None:
assert len(self._binmaper_cpp) > index and index >= 0, f'index must between [0, {len(self._binmaper_cpp)})!'
return self._binmaper_cpp[index].BinToValue(Xt)
raise NotImplementedError
def fit_transform(self, X, y=None, **fit_params):
"""
Fit the bin mapper on the input features and transform them.
Arguments:
X: Input features.
y: The target variable (not used).
fit_params: Additional parameters for fitting.
Returns:
The transformed features.
"""
self.fit(X)
return self.transform(X)
def fit_dataset(self, data):
"""
Fit the bin mapper on the dataset.
Arguments:
data: Dataset object containing the input features.
"""
x = to_row_major(data.features)
if self.is_fit is False:
self.fit(x)
bin_features = self.transform(x)
bin_features = pd.DataFrame(bin_features, columns=data.feature_columns)
data.bin_features = bin_features
@property
def is_fit(self):
"""
Check if the bin mapper is fit.
Returns:
True if the bin mapper is fit, False otherwise.
"""
return self._binmaper_cpp is not None
@property
def upper_bounds(self):
"""
Get the upper bounds of the bins.
Returns:
The upper bounds of the bins.
"""
return np.asfarray([m.GetUpperBoundValue() for m in self._binmaper_cpp])
# Path: openasce/inference/tree/cppnode.py
def create_didnode_from_dict(info):
"""
Create a CppDebiasNode from a dictionary.
Arguments:
info (Dict): The node information.
Returns:
CppDebiasNode: The CppDebiasNode instance.
"""
# basic information for tree node
assert len(info['children']) == 2, f'children should be 2!'
node = common.CppDebiasNode()
basic_keys = node.get_property_keys()
for k in info.keys():
if k in basic_keys:
setattr(node, k, info[k])
else:
node.set_info(k, info[k])
return node
# Path: openasce/inference/tree/cppnode.py
def predict(nodes: List[common.CppDiDNode], x, out, key, threads=20):
"""
Predict using the tree nodes.
Arguments:
nodes (List): The list of tree nodes.
x (ndarray): The input data.
out (ndarray): The output array.
key (ndarray): The prediction key.
threads (int): The number of threads.
Returns:
ndarray: The predicted values.
Raises:
RuntimeError: If the number of nodes is less than or equal to 0.
ValueError: If the node type is not supported.
"""
if len(nodes) <= 0:
raise RuntimeError(f'The number of nodes must be greater than 0!')
elif isinstance(nodes[0], list) is False:
nodes = [nodes]
if isinstance(nodes[0][0], common.CppDiDNode):
return common.predict_did(nodes, out, x, key, threads)
elif isinstance(nodes[0][0], common.CppDebiasNode):
return common.predict_debias(nodes, out, x, key, threads)
else:
raise ValueError(f'{type(nodes[0][0])} is not supported!')
# Path: openasce/inference/tree/losses.py
class Loss(metaclass=ABCMeta):
"""Abstract base class for loss functions."""
def __init__(self, **kwargs):
self._name = kwargs.get('name', self.__class__.__name__)
self.classification = True
@staticmethod
def new_instance(conf):
"""
Create a new instance of the loss function.
Arguments:
conf: Configuration.
Returns:
An instance of the loss function.
"""
conf = conf.get('tree', conf)
loss_cls = conf.get('loss_cls', None)
return new_instance(loss_cls)
@abstractmethod
def loss(self, target, prediction, *args):
"""
Calculate the loss.
Arguments:
target: Target values.
prediction: Predicted values.
args: Additional arguments.
Raises:
NotImplementedError: If the method is not implemented.
Returns:
The loss value.
"""
raise NotImplementedError
# Path: openasce/inference/tree/gradient_causal_tree.py
from typing import Dict, List
from collections import OrderedDict
from pyhocon import ConfigTree
from .tree_node import GradientCausalTreeNode
from .dataset import Dataset
from .histogram import Histogram
from .information import CausalDataInfo
from .bin import BinMapper
from .splitting_losses import *
from .cppnode import create_didnode_from_dict, predict
from .losses import Loss
from .utils import *
import numpy as np
tr_data: The training dataset.
hist (Histogram): The histogram object.
idx_map: The index mapping.
leaves (List[GradientCausalTreeNode]): The list of leaves.
leaves_range: The range of leaves.
eta: The parallel interval between the treated and control group.
Returns:
leaves_new (List[GradientCausalTreeNode]): The new leaves.
leaves_range_new: The new range of leaves.
"""
leaves, leaves_range, is_splitting = _filter(leaves, leaves_range)
n_leaf = len(split_conds)
if len(leaves) == 0 or len(split_conds) == 0:
return leaves, leaves_range
x_binned = to_row_major(tr_data.bin_features[self.feature_used], np.int32)
treatment = to_row_major(tr_data.treatment, np.int32)
sorted_split = OrderedDict(sorted(split_conds.items()))
split_info = np.asarray([[info['feature'], info['threshold']] for _, info in sorted_split.items()]).astype(
np.int32
)
out = np.zeros([n_leaf * 2, 2], np.int32)
update_x_map(x_binned, idx_map, split_info, leaves_range, out)
leaves_range_new = out
hist.update_hists(
{
'bin_grad_hist': gradients[0],
'bin_hess_hist': gradients[1],
'bin_cgrad_hist': cgradients[0],
'bin_chess_hist': cgradients[1],
'bin_eta_hist': eta,
},
idx_map,
leaves_range_new,
treatment,
x_binned,
is_gradient=True,
is_splitting=is_splitting,
threads=self.nthreads,
)
# create new node
leaves_new = []
for i, leaf in enumerate(leaves):
ltheta, rtheta = split_conds[leaf.level_id]['theta']
l_eta, r_eta = split_conds[leaf.level_id]['eta']
leaf._children = [
GradientCausalTreeNode(
self.conf, leaf_id=leaf.leaf_id * 2 + 1, level_id=i * 2, theta=ltheta, eta=l_eta
),
GradientCausalTreeNode(
self.conf, leaf_id=leaf.leaf_id * 2 + 2, level_id=i * 2 + 1, theta=rtheta, eta=r_eta
),
]
leaves_new.extend(leaf._children)
fid, bin_id = split_info[i]
leaf.split_feature = self.feature_used_map[fid]
leaf.split_thresh = bin_id
leaf.split_rawthresh = self.bin_mapper.inverse_transform(bin_id, self.feature_used_map[fid])
return leaves_new, leaves_range_new
def _split_cpp(self, leaves: List[GradientCausalTreeNode], hist: Histogram):
"""
Split the leaf nodes at the current level using C++ implementation.
Arguments:
leaves: The list of leaf nodes.
hist: The histogram object.
Returns:
split_conds: The split conditions.
"""
# Step 1: Collect all the split points that need to calculate the loss
# Step 2: Perform the split
info = self.info
n_leaves, m, n_bins, n_w, n_y = hist.bin_grad_hist.shape
t0, T, n_w = info.treat_dt, info.n_period, info.n_treatment
lambd = info.lambd
coef = info.coef
min_num = self.info.min_point_num_node
configs = {leaf.level_id: {fid: [0, n_bins] for fid in range(m)} for leaf in leaves}
parameters = f"""{{
"tree": {{
"lambd": {lambd},
"coeff": {coef},
"min_point_num_node": {min_num},
"min_var_rate": {0.1},
"monotonic_constraints": {self.w_monotonic},
"parallel_l2": {self.info.parallel_l2}
}},
"threads": {self.nthreads},
"dataset": {{
"treat_dt": {t0}
}}
}}"""
res = didtree_splitting_losses(
configs,
hist.bin_grad_hist,
hist.bin_hess_hist,
hist.bin_cgrad_hist,
hist.bin_chess_hist,
hist.bin_eta_hist,
hist.bin_counts,
parameters,
)
split_conds = {}
for leaf in leaves:
level_id = leaf.level_id
if level_id not in res:
leaf.is_leaf = True
continue
# opt_feature, opt_bin_idx, opt_loss
if bool(np.isinf(res[level_id][2])) is False:
etas = res[level_id][4]
split_conds[level_id] = {
'feature': res[level_id][0],
'threshold': res[level_id][1],
'loss': res[level_id][2],
'theta': (res[level_id][3][0], res[level_id][3][1]),
| 'eta': (np.asarray([etas[0], -etas[0]]), np.asarray([etas[1], -etas[1]])), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: 8none1/idealLED
# Path: custom_components/ideal_led/idealled.py
class IDEALLEDInstance:
def __init__(self, address, reset: bool, delay: int, hass) -> None:
self.loop = asyncio.get_running_loop()
self._mac = address
self._reset = reset
self._delay = delay
self._hass = hass
self._device: BLEDevice | None = None
self._device = bluetooth.async_ble_device_from_address(self._hass, address)
if not self._device:
raise ConfigEntryNotReady(
f"You need to add bluetooth integration (https://www.home-assistant.io/integrations/bluetooth) or couldn't find a nearby device with address: {address}"
)
self._connect_lock: asyncio.Lock = asyncio.Lock()
self._client: BleakClientWithServiceCache | None = None
self._disconnect_timer: asyncio.TimerHandle | None = None
self._cached_services: BleakGATTServiceCollection | None = None
self._expected_disconnect = False
self._is_on = None
self._rgb_color = None
self._brightness = 255
self._effect = None
self._effect_speed = 0x64
self._color_mode = ColorMode.RGB
self._write_uuid = None
self._write_colour_uuid = None
self._read_uuid = None
self._turn_on_cmd = None
self._turn_off_cmd = None
self._model = self._detect_model()
self._on_update_callbacks = []
LOGGER.debug(
"Model information for device %s : ModelNo %s. MAC: %s",
self._device.name,
self._model,
self._mac,
)
def _detect_model(self):
x = 0
for name in NAME_ARRAY:
if self._device.name.lower().startswith(name.lower()): # TODO: match on BLE provided model instead of name
return x
x = x + 1
async def _write(self, data: bytearray):
"""Send command to device and read response."""
await self._ensure_connected()
cipher = AES.new(SECRET_ENCRYPTION_KEY, AES.MODE_ECB)
ciphered_data = cipher.encrypt(data)
await self._write_while_connected(ciphered_data)
async def _write_colour_data(self, data: bytearray):
"""Send command to device and read response."""
await self._ensure_connected()
await self._write_colour_while_connected(data)
async def _write_while_connected(self, data: bytearray):
LOGGER.debug(f"Writing data to {self.name}: {data}")
await self._client.write_gatt_char(self._write_uuid, data, False)
async def _write_colour_while_connected(self, data: bytearray):
LOGGER.debug(f"Writing colour data to {self.name}: {data}")
await self._client.write_gatt_char(self._write_colour_uuid, data, False)
def _notification_handler(self, _sender: BleakGATTCharacteristic, data: bytearray) -> None:
# This doesn't work. I can't get the controller to send notifications.
"""Handle BLE notifications from the device. Update internal state to reflect the device state."""
LOGGER.debug("N: %s: Notification received", self.name)
#self.local_callback()
@property
def mac(self):
return self._device.address
@property
def reset(self):
return self._reset
@property
def name(self):
return self._device.name
@property
def rssi(self):
return self._device.rssi
@property
def is_on(self):
return self._is_on
@property
def brightness(self):
return self._brightness
@property
def rgb_color(self):
return self._rgb_color
@property
def effect_list(self) -> list[str]:
return EFFECT_LIST
@property
def effect(self):
return self._effect
@property
def color_mode(self):
return self._color_mode
@retry_bluetooth_connection_error
async def set_rgb_color(self, rgb: Tuple[int, int, int], brightness: int | None = None):
# TODO: Add support for brightness
self._rgb_color = rgb
if brightness is None:
if self._brightness is None:
self._brightness = 255
else:
brightness = self._brightness
brightness_percent = int(brightness * 100 / 255)
# Now adjust the RBG values to match the brightness
red = int(rgb[0] * brightness_percent / 100)
green = int(rgb[1] * brightness_percent / 100)
blue = int(rgb[2] * brightness_percent / 100)
# RGB packet
rgb_packet = bytearray.fromhex("0F 53 47 4C 53 00 00 64 50 1F 00 00 1F 00 00 32")
red = int(red >> 3) # You CAN send 8 bit colours to this thing, but you probably shouldn't for power reasons. Thanks to the good folks at Hacker News for that insight.
green = int(green >> 3)
blue = int(blue >> 3)
rgb_packet[9] = red
rgb_packet[12] = red
rgb_packet[10] = green
rgb_packet[13] = green
rgb_packet[11] = blue
rgb_packet[14] = blue
await self._write(rgb_packet)
@retry_bluetooth_connection_error
# effect, reverse=0, speed=50, saturation=50, colour_data=COLOUR_DATA
async def set_effect(self, effect: str, brightness: int | None = NotImplemented):
if effect not in EFFECT_LIST:
LOGGER.error("Effect %s not supported", effect)
return
self._effect = effect
effect_id = EFFECT_MAP.get(effect)
if effect_id > 11: effect = 11
packet = bytearray.fromhex("0A 4D 55 4C 54 08 00 64 50 07 32 00 00 00 00 00")
packet[5] = effect_id
packet[6] = 0 # reverse
packet[8] = 50 # speed
packet[10] = 50 # saturation (brightness?)
await self._write(packet)
# Now we send the colour data
await self.write_colour_data()
@retry_bluetooth_connection_error
async def write_colour_data(self):
# This is sent after switching to an effect to tell the device what sort of pattern to show.
# In the app you can edit this yourself, but HA doesn't have the UI for such a thing
# so for now I'm just going to hardcode it to a rainbow pattern. You could change this to
# whatever you want, but for an effect the maximum length is 7 colours.
colour_list = []
colour_divisions = int(360 / 7)
for i in range(7):
h = i * colour_divisions
r, g, b = colorsys.hsv_to_rgb(h / 360, 1, 1)
r = int(r * 255)
g = int(g * 255)
b = int(b * 255)
colour_list.append((r, g, b))
#print(f"Colour list: {colour_list}")
length = len(colour_list)
colour_data = []
colour_data.append(length*3) # 3 bytes per colour
colour_data.append(0) # Don't know what this is, perhaps just a separator
for colour in colour_list:
colour_data.append(colour[0])
colour_data.append(colour[1])
colour_data.append(colour[2])
await self._write_colour_data(colour_data)
@retry_bluetooth_connection_error
async def turn_on(self):
packet = bytearray.fromhex("05 54 55 52 4E 01 00 00 00 00 00 00 00 00 00 00")
packet[5] = 1
await self._write(packet)
self._is_on = True
@retry_bluetooth_connection_error
async def turn_off(self):
packet = bytearray.fromhex("05 54 55 52 4E 01 00 00 00 00 00 00 00 00 00 00")
packet[5] = 0
await self._write(packet)
self._is_on = False
@retry_bluetooth_connection_error
async def update(self):
LOGGER.debug("%s: Update in lwdnetwf called", self.name)
try:
await self._ensure_connected()
self._is_on = False
except Exception as error:
self._is_on = None # failed to connect, this should mark it as unavailable
LOGGER.error("Error getting status: %s", error)
track = traceback.format_exc()
LOGGER.debug(track)
async def _ensure_connected(self) -> None:
"""Ensure connection to device is established."""
if self._connect_lock.locked():
LOGGER.debug(
"%s: Connection already in progress, waiting for it to complete",
self.name,
)
if self._client and self._client.is_connected:
self._reset_disconnect_timer()
return
async with self._connect_lock:
# Check again while holding the lock
if self._client and self._client.is_connected:
self._reset_disconnect_timer()
return
LOGGER.debug("%s: Connecting", self.name)
client = await establish_connection(
BleakClientWithServiceCache,
self._device,
self.name,
self._disconnected,
cached_services=self._cached_services,
ble_device_callback=lambda: self._device,
)
LOGGER.debug("%s: Connected", self.name)
resolved = self._resolve_characteristics(client.services)
if not resolved:
# Try to handle services failing to load
resolved = self._resolve_characteristics(await client.get_services())
self._cached_services = client.services if resolved else None
self._client = client
self._reset_disconnect_timer()
# Subscribe to notification is needed for LEDnetWF devices to accept commands
self._notification_callback = self._notification_handler
await client.start_notify(self._read_uuid, self._notification_callback)
LOGGER.debug("%s: Subscribed to notifications", self.name)
def _resolve_characteristics(self, services: BleakGATTServiceCollection) -> bool:
"""Resolve characteristics."""
for characteristic in NOTIFY_CHARACTERISTIC_UUIDS:
if char := services.get_characteristic(characteristic):
self._read_uuid = char
LOGGER.debug("%s: Read UUID: %s", self.name, self._read_uuid)
break
for characteristic in WRITE_CMD_CHARACTERISTIC_UUIDS:
if char := services.get_characteristic(characteristic):
self._write_uuid = char
LOGGER.debug("%s: Write UUID: %s", self.name, self._write_uuid)
break
for characteristic in WRITE_COL_CHARACTERISTIC_UUIDS:
if char := services.get_characteristic(characteristic):
self._write_colour_uuid = char
LOGGER.debug("%s: Write colour UUID: %s", self.name, self._write_colour_uuid)
break
return bool(self._read_uuid and self._write_uuid and self._write_colour_uuid)
def _reset_disconnect_timer(self) -> None:
"""Reset disconnect timer."""
if self._disconnect_timer:
self._disconnect_timer.cancel()
self._expected_disconnect = False
if self._delay is not None and self._delay != 0:
LOGGER.debug(
"%s: Configured disconnect from device in %s seconds",
self.name,
self._delay
)
self._disconnect_timer = self.loop.call_later(self._delay, self._disconnect)
def _disconnected(self, client: BleakClientWithServiceCache) -> None:
"""Disconnected callback."""
if self._expected_disconnect:
LOGGER.debug("%s: Disconnected from device", self.name)
return
LOGGER.warning("%s: Device unexpectedly disconnected", self.name)
def _disconnect(self) -> None:
"""Disconnect from device."""
self._disconnect_timer = None
asyncio.create_task(self._execute_timed_disconnect())
async def stop(self) -> None:
"""Stop the LEDBLE."""
LOGGER.debug("%s: Stop", self.name)
await self._execute_disconnect()
async def _execute_timed_disconnect(self) -> None:
"""Execute timed disconnection."""
LOGGER.debug(
"%s: Disconnecting after timeout of %s",
self.name,
self._delay
)
await self._execute_disconnect()
async def _execute_disconnect(self) -> None:
"""Execute disconnection."""
async with self._connect_lock:
read_char = self._read_uuid
client = self._client
self._expected_disconnect = True
self._client = None
self._write_uuid = None
self._read_uuid = None
if client and client.is_connected:
await client.stop_notify(read_char) # TODO: I don't think this is needed. Bleak docs say it isnt.
await client.disconnect()
LOGGER.debug("%s: Disconnected", self.name)
def local_callback(self):
# Placeholder to be replaced by a call from light.py
# I can't work out how to plumb a callback from here to light.py
return
# Path: custom_components/ideal_led/const.py
DOMAIN = "ideal_led"
# Path: custom_components/ideal_led/const.py
CONF_RESET = "reset"
# Path: custom_components/ideal_led/const.py
CONF_DELAY = "delay"
# Path: custom_components/ideal_led/config_flow.py
import asyncio
import voluptuous as vol
import logging
from .idealled import IDEALLEDInstance
from typing import Any
from bluetooth_data_tools import human_readable_name
from homeassistant import config_entries
from homeassistant.const import CONF_MAC
from homeassistant.helpers.device_registry import format_mac
from homeassistant.data_entry_flow import FlowResult
from homeassistant.core import callback
from homeassistant.components.bluetooth import (
BluetoothServiceInfoBleak,
async_discovered_service_info,
)
from bluetooth_sensor_state_data import BluetoothData
from home_assistant_bluetooth import BluetoothServiceInfo
from .const import DOMAIN, CONF_RESET, CONF_DELAY
LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({("host"): str})
class DeviceData(BluetoothData):
def __init__(self, discovery_info) -> None:
self._discovery = discovery_info
LOGGER.debug("Discovered bluetooth devices, DeviceData, : %s , %s", self._discovery.address, self._discovery.name)
def supported(self):
| return self._discovery.name.lower().startswith("isp-") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amirzandieh/HyperAttention
# Path: src/flash_attn_triton.py
def _fwd_kernel(
Q,
K,
V,
Bias,
Out,
Lse,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_ob,
stride_oh,
stride_om,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _bwd_preprocess_do_o_dot(
Out,
DO,
Delta,
stride_ob,
stride_oh,
stride_om,
stride_dob,
stride_doh,
stride_dom,
nheads,
seqlen_q,
seqlen_q_rounded,
headdim,
BLOCK_M: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
):
def _bwd_store_dx(
dx_ptrs,
dx,
offs_n,
offs_d,
seqlen,
headdim,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
even_headdim,
):
def _bwd_kernel_one_col_block(
start_n,
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_bm,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
seqlen_q,
seqlen_k,
headdim,
ATOMIC_ADD: tl.constexpr,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def init_to_zero(name):
def _bwd_kernel(
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_dob,
stride_doh,
stride_dom,
stride_dqb,
stride_dqh,
stride_dqm,
stride_dkb,
stride_dkh,
stride_dkn,
stride_dvb,
stride_dvh,
stride_dvn,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
SEQUENCE_PARALLEL: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
def _flash_attn_backward(
do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None
):
def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):
def backward(ctx, do, dlse_use_needed=None):
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
BLOCK = 128
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
class FlashAttnFunc(torch.autograd.Function):
# Path: src/hyper_attn_triton.py
def _fwd_hyper_kernel(
Q,
K,
V,
q_sort_idx,
k_sort_idx,
Out,
Lse,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_q_sort_idxb,
stride_q_sort_idxh,
stride_q_sort_idxm,
stride_k_sort_idxb,
stride_k_sort_idxh,
stride_k_sort_idxn,
stride_ob,
stride_oh,
stride_om,
nheads,
block_size,
sample_size,
seqlen_k,
seqlen_q,
headdim,
v_headdim,
smooth_block,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BLOCK_HEADDIM: tl.constexpr,
V_BLOCK_HEADDIM: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
EVEN_V_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _bwd_preprocess_do_o_dot(
Out,
DO,
Delta,
stride_ob,
stride_oh,
stride_om,
stride_dob,
stride_doh,
stride_dom,
nheads,
seqlen_q,
v_headdim,
BLOCK_M: tl.constexpr,
V_BLOCK_HEADDIM: tl.constexpr,
):
def _bwd_store_dx(
dx_ptrs,
dx,
offs_d,
headdim,
even_headdim,
):
def _bwd_blocked_kernel_one_col(
start_n,
Q,
K,
V,
Q_idx,
K_idx,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
stride_q_idxm,
stride_k_idxn,
seqlen_q,
block_size,
headdim,
v_headdim,
smooth_block,
BLOCK_HEADDIM: tl.constexpr,
V_BLOCK_HEADDIM: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
EVEN_V_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _bwd_permuted_block_diagonal_kernel(
Q,
K,
V,
q_sort_idx,
k_sort_idx,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_q_sort_idxb,
stride_q_sort_idxh,
stride_q_sort_idxm,
stride_k_sort_idxb,
stride_k_sort_idxh,
stride_k_sort_idxn,
stride_dob,
stride_doh,
stride_dom,
stride_dqb,
stride_dqh,
stride_dqm,
stride_dkb,
stride_dkh,
stride_dkn,
stride_dvb,
stride_dvh,
stride_dvn,
nheads,
seqlen_q,
block_size,
headdim,
v_headdim,
smooth_block,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BLOCK_HEADDIM: tl.constexpr,
V_BLOCK_HEADDIM: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
EVEN_V_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _bwd_sampled_col_kernel(
Q,
K,
V,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_dob,
stride_doh,
stride_dom,
stride_dqb,
stride_dqh,
stride_dqm,
stride_dkb,
stride_dkh,
stride_dkn,
stride_dvb,
stride_dvh,
stride_dvn,
nheads,
seqlen_q,
headdim,
v_headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BLOCK_HEADDIM: tl.constexpr,
V_BLOCK_HEADDIM: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
EVEN_V_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
def _hyper_attn_forward(q, k, v, q_sort_idx, k_sort_idx, block_size, sample_size, softmax_scale=None,
smooth_block=False):
def _hyper_attn_backward(
do, q, k, v, q_sort_idx, k_sort_idx, o, lse, dq, dk, dv, block_size, sample_size, softmax_scale=None,
smooth_block=False):
def forward(ctx, q, k, v, q_sort_idx, k_sort_idx, block_size, sample_size=0, softmax_scale=None,
smooth_block=False):
def backward(ctx, do, dlse_use_needed=None):
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
V_BLOCK_HEADDIM = max(triton.next_power_of_2(v_headdim), 16)
BLOCK = 128
V_BLOCK_HEADDIM = max(triton.next_power_of_2(v_headdim), 16)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
BLOCK = 128
class HyperAttnFunc(torch.autograd.Function):
# Path: src/attn_utils.py
def add_self_attentions(attn1, lse1, attn2, lse2):
"""
inputs:
- attn1, attn2: 4d-tensors with shape [b, h, n, d]
- lse1, lse2: 4d-tensors of log-sum-exp with shape [b, h, n, 1]
output:
- attn
= (attn1 * exp(lse1) + attn2 * exp(lse2)) / (exp(lse1) + exp(lse2))
= (attn1 + attn2 * exp(lse2 - lse1)) / (1 + exp(lse2-lse1))
= attn1 * c + attn2 * (1-c), where c=1/(1 + exp(lse2-lse1)),
- lse
= log(exp(lse1) + exp(lse2))
= log(exp(lse1) * (1 + exp(lse2 - lse1)))
= lse1 + log(1 + exp(lse2 - lse1)) = lse1 - log(c)
"""
c = (1 / (1 + (lse2 - lse1).exp())).to(dtype=attn1.dtype)
attn = c * attn1 + (1-c) * attn2
lse = lse1 - (c + torch.finfo(lse1.dtype).eps).log()
return attn, lse
# Path: src/attn_utils.py
def indexing(x, indices, chunk_size=-1):
"""
inputs:
- x: 4d-tensor with shape [b, h, n, d]
- indices: 3d-tensor with shape [b, h, s] where each entry should be in [0, n-1]
output:
- out: 4d-tensor with shape [b, h, s, d] where out[i,j] = x[i,j][indices[i,j],:]
A naive implementation:
out = torch.zeros(b, h, s, d)
for i in range(b):
for j in range(h):
out[i,j] = x[i,j][idx[i,j],:]
return out
"""
if chunk_size < 0 or (chunk_size > 0 and x.shape[-2] % chunk_size == 0):
return x.gather(2, indices.unsqueeze(-1).expand(-1, -1, -1, x.shape[-1]))
else:
x = x.gather(2, indices.unsqueeze(-1).expand(-1, -1, -1, x.shape[-1]))
new_n = math.ceil(x.shape[2] / chunk_size) * chunk_size
if new_n <= 0 or new_n - x.shape[2] <= 0:
import pdb;
pdb.set_trace();
return torch.nn.functional.pad(x, (0, 0, 0, new_n - x.shape[2]), mode='constant', value=0.)
# Path: unit_tests/test_hyper_attention.py
import unittest
import time
import torch
import triton
import math
import sys; sys.path.append("/home/ec2-user/workspace/hyper_attention")
from src.flash_attn_triton import flash_attn_func
from src.hyper_attn_triton import hyper_attn_func
from src.attn_utils import add_self_attentions, indexing
value = torch.randn((batch_size, head_size, seq_len, dim), device='cuda', dtype=dtype)
q_buckets_idx = torch.randint(0, 64, (batch_size, head_size, seq_len), device='cuda')
k_buckets_idx = torch.randint(0, 64, (batch_size, head_size, seq_len), device='cuda')
_, query_sort_idx = torch.sort(q_buckets_idx, dim=2, stable=True)
_, key_sort_idx = torch.sort(k_buckets_idx, dim=2, stable=True)
check_memory()
# compute attention by presorting queries and sorting back the output attention
t0 = time.time()
query_sort_idx_inv = torch.argsort(query_sort_idx, dim=2, stable=True)
query_sorted = indexing(query, query_sort_idx)
key_sorted = indexing(key, key_sort_idx)
value_sorted = indexing(value, key_sort_idx)
query_split_per_block = query_sorted.view(-1, 1, block_size, dim)
key_split_per_block = key_sorted.view(-1, 1, block_size, dim)
value_split_per_block = value_sorted.view(-1, 1, block_size, dim)
attn_block, lse_block = flash_attn_func(query_split_per_block.transpose(1, 2),
key_split_per_block.transpose(1, 2),
value_split_per_block.transpose(1, 2))
attn_sample, lse_sample = flash_attn_func(query.transpose(1, 2),
key[:, :, :sample_size, :].transpose(1, 2),
value[:, :, :sample_size, :].transpose(1, 2))
attn_block = attn_block.transpose(1, 2)
attn_block = attn_block.view(batch_size, head_size, query_sorted.shape[2], -1)
attn_sample = attn_sample.transpose(1, 2)
lse_block = lse_block[:, :, :query_sorted.shape[2]]
lse_block = lse_block.view(batch_size, head_size, query_sorted.shape[2], -1)
flash_attn_block = indexing(attn_block, query_sort_idx_inv)+attn_sample
lse_block = indexing(lse_block, query_sort_idx_inv)
attn, lse = add_self_attentions(flash_attn_block, lse_block, attn_sample, lse_sample.unsqueeze(-1))
lse = lse.squeeze(-1)
t1 = time.time()
check_memory()
print('the runtime of flash attention with permutation and indexing of queries:', t1-t0)
# torch lse computation
qk = query_split_per_block @ key_split_per_block.transpose(-1, -2) / math.sqrt(dim)
lse_block_torch = torch.logsumexp(qk, dim=-1, keepdim=True)
lse_block_torch = lse_block_torch.view(batch_size, head_size, query_sorted.shape[2], -1)
lse_block_torch = indexing(lse_block_torch, query_sort_idx_inv).squeeze(-1)
lse_sample_torch = torch.logsumexp(
query @ key[:, :, :sample_size, :].transpose(-1, -2) / math.sqrt(dim),
dim=-1,
keepdim=True
).squeeze(-1)
lse_torch = (lse_sample_torch.exp() + lse_block_torch.exp()).log().to(dtype=lse_block_torch.dtype)
print('diff between lse with sample and without: ', (lse_block_torch - lse_torch).norm(), lse_torch.norm())
print('error flash attention:', (lse - lse_torch).norm(), lse_torch.norm())
# compute attention kernel which permutes queries in triton
check_memory(0)
t2 = time.time()
attn_hyper, lse_hyper = hyper_attn_func(
query.transpose(1, 2),
key.transpose(1, 2),
value.transpose(1, 2),
query_sort_idx.transpose(1, 2),
key_sort_idx.transpose(1, 2),
block_size,
sample_size,
1./math.sqrt(dim),
smooth_block,
)
attn_hyper = attn_hyper.transpose(1, 2)
t3 = time.time()
check_memory()
print('the runtime of hyper attention:', t3 - t2)
print('diff lse hyper_attention and flash with indexing and permutation: ', (lse - lse_hyper).norm(), lse.norm())
print('error hyper attention lse: ', (lse_hyper - lse_torch).norm(), lse_torch.norm())
# check if dimension of V can be different from that of Q and K
value_small = value[:, :, :, :dim//2].clone()
attn_triton_unequal_dim, lse_triton_unequal_dim = hyper_attn_func(
query.transpose(1, 2),
key.transpose(1, 2),
value_small.transpose(1, 2),
query_sort_idx.transpose(1, 2),
key_sort_idx.transpose(1, 2),
block_size,
sample_size,
)
attn_triton_unequal_dim = attn_triton_unequal_dim.transpose(1, 2)
print('testing unequal dimension for V compared to Q, K')
print((attn_hyper[:, :, :, :dim//2] - attn_triton_unequal_dim).norm())
def test_gradient(self):
print()
print("2. this is gradients test")
dtype = torch.bfloat16
batch_size = 4
block_size = 256
dim = 64
head_size = 32
seq_len = 2048
sample_size = 128
query = torch.randn((batch_size, head_size, seq_len, dim), device='cuda', dtype=dtype, requires_grad=True)
key = torch.randn((batch_size, head_size, seq_len, dim), device='cuda', dtype=dtype, requires_grad=True)
value = torch.randn((batch_size, head_size, seq_len, dim), device='cuda', dtype=dtype, requires_grad=True)
do = torch.randn_like(value)
q_buckets_idx = torch.randint(0, 64, (batch_size, head_size, seq_len), device='cuda')
k_buckets_idx = torch.randint(0, 64, (batch_size, head_size, seq_len), device='cuda')
_, query_sort_idx = torch.sort(q_buckets_idx, dim=2, stable=True)
_, key_sort_idx = torch.sort(k_buckets_idx, dim=2, stable=True)
t0 = time.time()
query_sort_idx_inv = torch.argsort(query_sort_idx, dim=2, stable=True)
query_sorted = indexing(query, query_sort_idx)
key_sorted = indexing(key, key_sort_idx)
value_sorted = indexing(value, key_sort_idx)
query_split_per_block = query_sorted.view(-1, 1, block_size, dim)
| key_split_per_block = key_sorted.view(-1, 1, block_size, dim) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Psivant/femto
# Path: femto/fe/atm/_setup.py
def _offset_ligand(
ligand: parmed.Structure, offset: openmm.unit.Quantity
) -> parmed.Structure:
"""Offsets the coordinates of the specified ligand by a specified amount.
Args:
ligand: The ligand to offset.
offset: The amount to offset the ligand by.
Returns:
The offset ligand.
"""
# we copy in this strange way because parmed doesn't
# copy all attrs correctly when using copy.deepycopy
with tempfile.TemporaryDirectory() as tmpdir:
ligand.save(f"{tmpdir}/ligand.parm7")
ligand.save(f"{tmpdir}/ligand.mol2")
ligand = parmed.amber.AmberParm(
f"{tmpdir}/ligand.parm7", f"{tmpdir}/ligand.mol2"
)
for atom in ligand.atoms:
atom.xx += offset[0].value_in_unit(openmm.unit.angstrom)
atom.xy += offset[1].value_in_unit(openmm.unit.angstrom)
atom.xz += offset[2].value_in_unit(openmm.unit.angstrom)
return ligand
# Path: femto/fe/atm/_setup.py
def select_displacement(
receptor: parmed.amber.AmberParm,
ligand_1: parmed.amber.AmberParm,
ligand_2: parmed.amber.AmberParm | None,
distance: openmm.unit.Quantity,
) -> openmm.unit.Quantity:
"""Attempts to automatically select a displacement vector for the ligands.
Args:
receptor: The receptor.
ligand_1: The first ligand positioned in the binding site.
ligand_2: The second ligand positioned in the binding site.
distance: The distance to translate ligands along the displacement vector by.
Returns:
The displacement vector.
"""
ligand_coords = numpy.vstack(
[ligand_1.coordinates] + ([] if ligand_2 is None else [ligand_2.coordinates])
)
receptor_coords = receptor.coordinates
directions = numpy.array(
[
[-1.0, -1.0, -1.0],
[+1.0, -1.0, -1.0],
[+1.0, +1.0, -1.0],
[-1.0, +1.0, -1.0],
[-1.0, -1.0, +1.0],
[+1.0, -1.0, +1.0],
[+1.0, +1.0, +1.0],
[-1.0, +1.0, +1.0],
]
)
directions /= numpy.linalg.norm(directions, axis=1, keepdims=True)
closest_distances = []
for direction in directions:
displacement = direction * distance.value_in_unit(openmm.unit.angstrom)
offset_coords = ligand_coords + displacement
distances = scipy.spatial.distance.cdist(offset_coords, receptor_coords)
closest_distances.append(distances.min())
direction = directions[numpy.argmax(closest_distances)]
return direction.flatten() * distance
# Path: femto/fe/atm/_setup.py
def setup_system(
config: "femto.fe.atm.ATMSetupStage",
receptor: parmed.amber.AmberParm,
ligand_1: parmed.amber.AmberParm,
ligand_2: parmed.amber.AmberParm | None,
displacement: openmm.unit.Quantity,
receptor_ref_query: str | None,
ligand_1_ref_query: tuple[str, str, str] | None = None,
ligand_2_ref_query: tuple[str, str, str] | None = None,
) -> tuple[parmed.Structure, openmm.System]:
"""Prepares a system ready for running the ATM method.
Returns:
The prepared topology and OpenMM system object.
"""
_LOGGER.info(f"setting up an {'ABFE' if ligand_2 is None else 'RBFE'} calculation")
if receptor_ref_query is None:
# we need to select the receptor cavity atoms before offsetting any ligands
# as the query is distance based
_LOGGER.info("selecting receptor reference atoms")
receptor_ref_query = femto.fe.reference.select_protein_cavity_atoms(
receptor,
[ligand_1] + ([] if ligand_2 is None else [ligand_2]),
config.reference.receptor_cutoff,
)
ligand_1_ref_idxs, ligand_2_ref_idxs = None, None
# we carve out a 'cavity' where the first ligand will be displaced into during the
# ATM calculations. this should make equilibration at all states easier.
cavity_formers = [_offset_ligand(ligand_1, displacement)]
if ligand_2 is not None:
# we make sure that when placing solvent molecules we don't accidentally place
# any on top of the ligands in the cavity itself
cavity_formers.append(ligand_2)
(
ligand_1_ref_idxs,
ligand_2_ref_idxs,
) = femto.fe.reference.select_ligand_idxs(
ligand_1,
ligand_2,
config.reference.ligand_method,
ligand_1_ref_query,
ligand_2_ref_query,
)
assert ligand_2_ref_idxs is not None, "ligand 2 ref atoms were not selected"
ligand_2_ref_idxs = tuple(i + len(ligand_1.atoms) for i in ligand_2_ref_idxs)
ligand_2 = _offset_ligand(ligand_2, displacement)
_LOGGER.info("solvating system")
topology = femto.md.solvate.solvate_system(
receptor,
ligand_1,
ligand_2,
config.solvent,
displacement,
cavity_formers=cavity_formers,
)
_LOGGER.info("creating OpenMM system")
system = topology.createSystem(
nonbondedMethod=openmm.app.PME,
nonbondedCutoff=0.9 * openmm.unit.nanometer,
constraints=openmm.app.HBonds,
rigidWater=True,
)
if config.apply_hmr:
_LOGGER.info("applying HMR.")
hydrogen_mass = config.hydrogen_mass
femto.md.system.apply_hmr(system, topology, hydrogen_mass)
ligand_1_idxs = list(range(len(ligand_1.atoms)))
ligand_2_idxs = None
if ligand_2 is not None:
ligand_2_idxs = [i + len(ligand_1_idxs) for i in range(len(ligand_2.atoms))]
if config.apply_rest:
_LOGGER.info("applying REST2.")
solute_idxs = ligand_1_idxs + ([] if ligand_2_idxs is None else ligand_2_idxs)
femto.md.rest.apply_rest(system, set(solute_idxs), config.rest_config)
_LOGGER.info("applying restraints.")
ligands = [ligand_1] + ([] if ligand_2 is None else [ligand_2])
idx_offset = sum(len(ligand.atoms) for ligand in ligands)
receptor_ref_mask = parmed.amber.AmberMask(receptor, receptor_ref_query).Selection()
receptor_ref_idxs = [i + idx_offset for i, m in enumerate(receptor_ref_mask) if m]
_LOGGER.info(f"receptor ref idxs={receptor_ref_idxs}")
_apply_atm_restraints(
system,
config.restraints,
ligand_1_com_idxs=ligand_1_idxs,
ligand_1_ref_idxs=ligand_1_ref_idxs,
ligand_2_com_idxs=ligand_2_idxs,
ligand_2_ref_idxs=ligand_2_ref_idxs,
receptor_ref_idxs=receptor_ref_idxs,
offset=displacement,
)
restraint_query = config.restraints.receptor_query
restraint_mask = parmed.amber.AmberMask(receptor, restraint_query).Selection()
restraint_idxs = [i + idx_offset for i, match in enumerate(restraint_mask) if match]
_apply_receptor_restraints(
system, config.restraints, {i: topology.positions[i] for i in restraint_idxs}
)
femto.md.utils.openmm.assign_force_groups(system)
return topology, system
# Path: femto/fe/tests/systems.py
TEMOA_SYSTEM = TestSystem(
directory=TEMOA_DATA_DIR,
receptor_name="temoa",
receptor_coords=TEMOA_DATA_DIR / "temoa.rst7",
receptor_params=TEMOA_DATA_DIR / "temoa.parm7",
receptor_cavity_mask="@1-40",
receptor_ref_atoms=("@1", "@2", "@3"),
ligand_1_name="g1",
ligand_1_coords=TEMOA_DATA_DIR / "g1.rst7",
ligand_1_params=TEMOA_DATA_DIR / "g1.parm7",
ligand_1_ref_atoms=("@8", "@6", "@4"),
ligand_2_name="g4",
ligand_2_coords=TEMOA_DATA_DIR / "g4.rst7",
ligand_2_params=TEMOA_DATA_DIR / "g4.parm7",
ligand_2_ref_atoms=("@3", "@5", "@1"),
)
# Path: femto/md/constants.py
LIGAND_1_RESIDUE_NAME = "L1"
# Path: femto/md/constants.py
LIGAND_2_RESIDUE_NAME = "R1"
# Path: femto/md/constants.py
class OpenMMForceGroup(enum.IntEnum):
"""Standard force groups to assign to common OpenMM forces to make them easier to
identify."""
BOND = 0
ANGLE = 1
DIHEDRAL = 2
NONBONDED = 3
COM_RESTRAINT = 4
POSITION_RESTRAINT = 5
ALIGNMENT_RESTRAINT = 6
BAROSTAT = 7
ATM = 8
OTHER = 16
# Path: femto/md/tests/mocking.py
def build_mock_structure(smiles: list[str]) -> parmed.Structure:
"""Build a mock structure from a list of SMILES patterns
Notes:
* A conformer is generated for each molecule.
Args:
smiles: A list of SMILES patterns.
Returns:
The mock structure.
"""
molecules = [Chem.MolFromSmiles(pattern) for pattern in smiles]
for molecule, pattern in zip(molecules, smiles, strict=True):
assert molecule is not None, f"{pattern} is not a valid SMILES pattern"
complex = Chem.Mol()
for i, molecule in enumerate(molecules):
molecule = Chem.AddHs(molecule)
AllChem.EmbedMolecule(molecule)
is_water = Chem.MolToSmiles(Chem.RemoveHs(molecule)) == "O"
residue_name = (
"WAT"
if is_water
else (
f"{molecule.GetAtomWithIdx(0).GetSymbol()}"
if molecule.GetNumAtoms() == 1
else "UNK"
)
)
symbol_count = collections.defaultdict(int)
for atom in molecule.GetAtoms():
atom_name = f"{atom.GetSymbol()}{symbol_count[atom.GetSymbol()] + 1}"
atom_info = Chem.AtomPDBResidueInfo(
atom_name.ljust(4, " "), atom.GetIdx(), "", residue_name, i
)
atom.SetMonomerInfo(atom_info)
symbol_count[atom.GetSymbol()] += 1
complex = Chem.CombineMols(complex, molecule)
with tempfile.NamedTemporaryFile(suffix=".pdb") as tmp_file:
Chem.MolToPDBFile(complex, tmp_file.name)
structure = parmed.load_file(tmp_file.name, structure=True)
return structure
# Path: femto/md/utils/openmm.py
def all_close(
v1: openmm.unit.Quantity,
v2: openmm.unit.Quantity,
rtol=1.0e-5,
atol=1.0e-8,
equal_nan=False,
) -> bool:
"""Compares if all values in two unit wrapped array are close using
``numpy.allclose``
"""
if not v1.unit.is_compatible(v2.unit):
return False
if v1.shape != v2.shape:
return False
return numpy.allclose(
v1.value_in_unit(v1.unit),
v2.value_in_unit(v1.unit),
atol=atol,
rtol=rtol,
equal_nan=equal_nan,
)
# Path: femto/fe/tests/atm/test_setup.py
import collections
import numpy
import openmm.app
import openmm.unit
import parmed
import pytest
import femto.md.config
import femto.md.system
import femto.fe.atm
from femto.fe.atm._setup import _offset_ligand, select_displacement, setup_system
from femto.fe.tests.systems import TEMOA_SYSTEM
from femto.md.constants import (
LIGAND_1_RESIDUE_NAME,
LIGAND_2_RESIDUE_NAME,
OpenMMForceGroup,
)
from femto.md.tests.mocking import build_mock_structure
from femto.md.utils.openmm import all_close
assert all_close(displacement, expected_displacement)
def test_offset_ligand():
ligand = build_mock_structure(["[Ar]"])
system = openmm.System()
system.addParticle(1.0)
force = openmm.NonbondedForce()
force.addParticle(0.0, 1.0, 0.0)
system.addForce(force)
ligand = parmed.openmm.load_topology(ligand.topology, system, ligand.coordinates)
coords_0 = ligand.coordinates
offset = numpy.array([5.0, 4.0, 3.0])
ligand_offset = _offset_ligand(ligand, offset * openmm.unit.angstrom)
assert numpy.allclose(ligand.coordinates, coords_0)
assert numpy.allclose(ligand_offset.coordinates, coords_0 + offset)
def test_setup_system_abfe(temoa_ligand_1, temoa_receptor, mock_setup_config, mocker):
n_ligand_atoms = len(temoa_ligand_1.atoms)
n_receptor_atoms = len(temoa_receptor.atoms)
def mock_solvate_fn(receptor, lig_1, lig_2, *_, **__):
assert lig_2 is None
complex = lig_1 + receptor
complex.box = [100, 100, 100, 90, 90, 90]
return complex
mock_solvate = mocker.patch(
"femto.md.solvate.solvate_system",
autospec=True,
side_effect=mock_solvate_fn,
)
mock_apply_hmr = mocker.patch("femto.md.system.apply_hmr", autospec=True)
mock_apply_rest = mocker.patch("femto.md.rest.apply_rest", autospec=True)
mock_setup_config.apply_rest = True
expected_h_mass = 2.0 * openmm.unit.amu
mock_setup_config.hydrogen_mass = expected_h_mass
topology, system = setup_system(
mock_setup_config,
temoa_receptor,
temoa_ligand_1,
None,
numpy.ones(3) * 22.0 * openmm.unit.angstrom,
receptor_ref_query=":1",
ligand_1_ref_query=None,
ligand_2_ref_query=None,
)
assert isinstance(topology, parmed.Structure)
assert isinstance(system, openmm.System)
mock_solvate.assert_called_once()
mock_apply_hmr.assert_called_once_with(mocker.ANY, mocker.ANY, expected_h_mass)
mock_apply_rest.assert_called_once()
assert mock_apply_rest.call_args.args[1] == set(range(n_ligand_atoms))
assert len(topology.atoms) == system.getNumParticles()
assert len(topology[f":{LIGAND_1_RESIDUE_NAME}"].residues) == 1
forces = collections.defaultdict(list)
for force in system.getForces():
forces[force.getForceGroup()].append(force)
assert len(forces[OpenMMForceGroup.NONBONDED]) == 1
nonbonded_force = forces[OpenMMForceGroup.NONBONDED][0]
assert nonbonded_force.getNumParticles() == len(topology.atoms)
assert len(forces[OpenMMForceGroup.COM_RESTRAINT]) == 1
com_force = forces[OpenMMForceGroup.COM_RESTRAINT][0]
assert isinstance(com_force, openmm.CustomCentroidBondForce)
assert com_force.getNumBonds() == 1 # between ligand 1 com and receptor com
expected_ligand_com_idxs = tuple(range(n_ligand_atoms))
ligand_com_idxs, _ = com_force.getGroupParameters(0)
assert ligand_com_idxs == expected_ligand_com_idxs
expected_receptor_com_idxs = tuple(
range(n_ligand_atoms, n_ligand_atoms + n_receptor_atoms)
)
receptor_com_idxs, _ = com_force.getGroupParameters(1)
assert receptor_com_idxs == expected_receptor_com_idxs
# make sure the receptor position restraints are applied to the right atoms
restraint_forces = forces[OpenMMForceGroup.POSITION_RESTRAINT]
assert len(restraint_forces) == 1
restraint_force: openmm.CustomExternalForce = restraint_forces[0]
assert restraint_force.getNumParticles() == n_receptor_atoms
atom_idx, restraint_params = restraint_force.getParticleParameters(0)
assert atom_idx == n_ligand_atoms
x0, y0, z0, k, radius = restraint_params
assert numpy.isclose(x0, 1.76658000)
assert numpy.isclose(y0, 2.76679000)
assert numpy.isclose(z0, 2.33774000)
expected_k = mock_setup_config.restraints.receptor.k.value_in_unit_system(
openmm.unit.md_unit_system
)
expected_radius = (
mock_setup_config.restraints.receptor.radius
).value_in_unit_system(openmm.unit.md_unit_system)
assert numpy.isclose(k, expected_k)
| assert numpy.isclose(radius, expected_radius) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AIFSH/NativeDancer
# Path: nativedancer/helperdoc.py
DOC =\
{
'python_not_supported': 'Python version is not supported, upgrade to {version} or higher',
'ffmpeg_not_installed': 'FFMpeg is not installed',
'install_dependency_help': 'select the variant of {dependency} to install',
'source_help': 'select a source image',
'target_help': 'select a target image or video',
'output_help': 'specify the output file or directory',
'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)',
'frame_processor_model_help': 'choose the model for the frame processor',
'frame_processor_blend_help': 'specify the blend factor for the frame processor',
'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)',
'keep_fps_help': 'preserve the frames per second (fps) of the target',
'keep_temp_help': 'retain temporary frames after processing',
'skip_audio_help': 'omit audio from the target',
'trim_frame_start_help': 'specify the start frame for extraction',
'trim_frame_end_help': 'specify the end frame for extraction',
'temp_frame_format_help': 'specify the image format used for frame extraction',
'temp_frame_quality_help': 'specify the image quality used for frame extraction',
'output_image_quality_help': 'specify the quality used for the output image',
'output_video_encoder_help': 'specify the encoder used for the output video',
'output_video_quality_help': 'specify the quality used for the output video',
'max_memory_help': 'specify the maximum amount of ram to be used (in gb)',
'execution_providers_help': 'choose from the available execution providers',
'execution_thread_count_help': 'specify the number of execution threads',
'execution_queue_count_help': 'specify the number of execution queries',
'skip_download_help': 'omit automate downloads and lookups',
'headless_help': 'run the program in headless mode',
'creating_temp': 'Creating temporary resources',
'extracting_frames_fps': 'Extracting frames with {fps} FPS',
'analysing': 'Analysing',
'processing': 'Processing',
'downloading': 'Downloading',
'temp_frames_not_found': 'Temporary frames not found',
'compressing_image': 'Compressing image',
'compressing_image_failed': 'Compressing image failed',
'merging_video_fps': 'Merging video with {fps} FPS',
'merging_video_failed': 'Merging video failed',
'skipping_audio': 'Skipping audio',
'restoring_audio': 'Restoring audio',
'restoring_audio_failed': 'Restoring audio failed',
'clearing_temp': 'Clearing temporary resources',
'processing_image_succeed': 'Processing to image succeed',
'processing_image_failed': 'Processing to image failed',
'processing_video_succeed': 'Processing to video succeed',
'processing_video_failed': 'Processing to video failed',
'model_download_not_done': 'Download of the model is not done',
'model_file_not_present': 'File of the model is not present',
'select_image_source': 'Select an image for source path',
'select_image_or_video_target': 'Select an image or video for target path',
'select_file_or_directory_output': 'Select an file or directory for output path',
'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded',
'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly',
'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded',
'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly',
'donate_button_label': 'DONATE',
'start_button_label': 'START ENHANCER',
'stop_button_label': 'STOP',
'clear_button_label': 'CLEAR',
'benchmark_runs_checkbox_group_label': 'BENCHMARK RUNS',
'benchmark_results_dataframe_label': 'BENCHMARK RESULTS',
'benchmark_cycles_slider_label': 'BENCHMARK CYCLES',
'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS',
'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT',
'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT',
'max_memory_slider_label': 'MAX MEMORY',
'output_image_or_video_label': 'OUTPUT',
'output_path_textbox_label': 'OUTPUT PATH',
'output_image_quality_slider_label': 'OUTPUT IMAGE QUALITY',
'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER',
'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY',
'preview_image_label': 'PREVIEW',
'preview_frame_slider_label': 'PREVIEW FRAME',
'frame_processors_checkbox_group_label': 'FRAME PROCESSORS',
'frame_enhancer_model_dropdown_label': 'FRAME ENHANCER MODEL',
'frame_enhancer_blend_slider_label': 'FRAME ENHANCER BLEND',
'common_options_checkbox_group_label': 'OPTIONS',
'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT',
'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY',
'trim_frame_start_slider_label': 'TRIM FRAME START',
'trim_frame_end_slider_label': 'TRIM FRAME END',
'source_file_label': 'SOURCE',
'target_file_label': 'TARGET',
'webcam_image_label': 'WEBCAM',
'webcam_mode_radio_label': 'WEBCAM MODE',
'webcam_resolution_dropdown': 'WEBCAM RESOLUTION',
'webcam_fps_slider': 'WEBCAM FPS',
'point': '.',
'comma': ',',
'colon': ':',
'question_mark': '?',
'exclamation_mark': '!',
'random_seed_help' : 'random seed for magicanimate generate',
'guidance_scale_help' : 'guidance scale for magicanimate generate',
'step_help' : 'step per frame for magicanimate generate',
'face_debugger_items_help': 'specify the face debugger items',
'face_analyser_order_help': 'specify the order used for the face analyser',
'face_analyser_age_help': 'specify the age used for the face analyser',
'face_analyser_gender_help': 'specify the gender used for the face analyser',
'face_detector_model_help': 'specify the model used for the face detector',
'face_detector_size_help': 'specify the size threshold used for the face detector',
'face_detector_score_help': 'specify the score threshold used for the face detector',
'face_selector_mode_help': 'specify the mode for the face selector',
'reference_face_position_help': 'specify the position of the reference face',
'reference_face_distance_help': 'specify the distance between the reference face and the target face',
'reference_frame_number_help': 'specify the number of the reference frame',
'face_mask_blur_help': 'specify the blur amount for face mask',
'face_mask_padding_help': 'specify the face mask padding (top, right, bottom, left) in percent',
'no_source_face_detected': 'No source face detected',
'face_analyser_order_dropdown_label': 'FACE ANALYSER ORDER',
'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE',
'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER',
'face_detector_model_dropdown_label': 'FACE DETECTOR MODEL',
'face_detector_size_dropdown_label': 'FACE DETECTOR SIZE',
'face_detector_score_slider_label': 'FACE DETECTOR SCORE',
'face_selector_mode_dropdown_label': 'FACE SELECTOR MODE',
'reference_face_gallery_label': 'REFERENCE FACE',
'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE',
'face_mask_blur_slider_label': 'FACE MASK BLUR',
'face_mask_padding_top_slider_label': 'FACE MASK PADDING TOP',
'face_mask_padding_bottom_slider_label': 'FACE MASK PADDING BOTTOM',
'face_mask_padding_left_slider_label': 'FACE MASK PADDING LEFT',
'face_mask_padding_right_slider_label': 'FACE MASK PADDING RIGHT',
'face_swapper_model_dropdown_label': 'FACE SWAPPER MODEL',
'face_enhancer_model_dropdown_label': 'FACE ENHANCER MODEL',
'face_enhancer_blend_slider_label': 'FACE ENHANCER BLEND',
'face_debugger_items_checkbox_group_label': 'FACE DEBUGGER ITEMS',
'animate_seed_label' : 'ANIMATE SEED',
'animate_step_label' : 'ANIMATE STEP',
'animate_scale_label': 'ANIMATE SCALE',
'animate_start_label': 'GENERATE ANIMATE',
'animate_video_label': 'ANIMATE VIDEO'
}
def get(key : str) -> str:
# Path: nativedancer/face_analyser.py
def get_many_faces(frame : Frame) -> List[Face]:
try:
faces_cache = get_faces_cache(frame)
if faces_cache:
faces = faces_cache
else:
faces = extract_faces(frame)
set_faces_cache(frame, faces)
if nativedancer.globals.face_analyser_order:
faces = sort_by_order(faces, nativedancer.globals.face_analyser_order)
if nativedancer.globals.face_analyser_age:
faces = filter_by_age(faces, nativedancer.globals.face_analyser_age)
if nativedancer.globals.face_analyser_gender:
faces = filter_by_gender(faces, nativedancer.globals.face_analyser_gender)
return faces
except (AttributeError, ValueError):
return []
# Path: nativedancer/face_analyser.py
def clear_face_analyser() -> Any:
global FACE_ANALYSER
FACE_ANALYSER = None
# Path: nativedancer/face_helper.py
def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:
normed_template = TEMPLATES.get(template) * size[1] / size[0]
affine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.LMEDS)[0]
crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)
return crop_frame, affine_matrix
# Path: nativedancer/face_helper.py
def paste_back(temp_frame : Frame, crop_frame: Frame, affine_matrix : Matrix, face_mask_blur : float, face_mask_padding : Padding) -> Frame:
inverse_matrix = cv2.invertAffineTransform(affine_matrix)
temp_frame_size = temp_frame.shape[:2][::-1]
mask_size = tuple(crop_frame.shape[:2])
mask_frame = create_static_mask_frame(mask_size, face_mask_blur, face_mask_padding)
inverse_mask_frame = cv2.warpAffine(mask_frame, inverse_matrix, temp_frame_size).clip(0, 1)
inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)
paste_frame = temp_frame.copy()
paste_frame[:, :, 0] = inverse_mask_frame * inverse_crop_frame[:, :, 0] + (1 - inverse_mask_frame) * temp_frame[:, :, 0]
paste_frame[:, :, 1] = inverse_mask_frame * inverse_crop_frame[:, :, 1] + (1 - inverse_mask_frame) * temp_frame[:, :, 1]
paste_frame[:, :, 2] = inverse_mask_frame * inverse_crop_frame[:, :, 2] + (1 - inverse_mask_frame) * temp_frame[:, :, 2]
return paste_frame
# Path: nativedancer/content_analyser.py
def clear_content_analyser() -> None:
global CONTENT_ANALYSER
CONTENT_ANALYSER = None
# Path: nativedancer/typing.py
# Path: nativedancer/utils.py
def conditional_download(download_directory_path : str, urls : List[str]) -> None:
with ThreadPoolExecutor() as executor:
for url in urls:
executor.submit(get_download_size, url)
for url in urls:
download_file_path = os.path.join(download_directory_path, os.path.basename(url))
total = get_download_size(url)
if is_file(download_file_path):
initial = os.path.getsize(download_file_path)
else:
initial = 0
if initial < total:
with tqdm(total = total, initial = initial, desc = helperdoc.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =') as progress:
subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])
current = initial
while current < total:
if is_file(download_file_path):
current = os.path.getsize(download_file_path)
progress.update(current - progress.n)
# Path: nativedancer/utils.py
def resolve_relative_path(path : str) -> str:
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
# Path: nativedancer/utils.py
def is_image(image_path : str) -> bool:
if is_file(image_path):
mimetype = filetype.guess(image_path).mime
return bool(mimetype and mimetype.startswith('image/'))
return False
# Path: nativedancer/utils.py
def is_video(video_path : str) -> bool:
if is_file(video_path):
mimetype = filetype.guess(video_path).mime
return bool(mimetype and mimetype.startswith('video/'))
return False
# Path: nativedancer/utils.py
def is_file(file_path : str) -> bool:
return bool(file_path and os.path.isfile(file_path))
# Path: nativedancer/utils.py
def is_download_done(url : str, file_path : str) -> bool:
if is_file(file_path):
return get_download_size(url) == os.path.getsize(file_path)
return False
# Path: nativedancer/utils.py
def create_metavar(ranges : List[Any]) -> str:
return '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'
# Path: nativedancer/utils.py
def update_status(message : str, scope : str = 'NATIVEDANCER.CORE') -> None:
print('[' + scope + '] ' + message)
# Path: nativedancer/vision.py
def read_image(image_path : str) -> Optional[Frame]:
if image_path:
return cv2.imread(image_path)
return None
# Path: nativedancer/vision.py
@lru_cache(maxsize = 128)
def read_static_image(image_path : str) -> Optional[Frame]:
return read_image(image_path)
# Path: nativedancer/vision.py
def write_image(image_path : str, frame : Frame) -> bool:
if image_path:
return cv2.imwrite(image_path, frame)
return False
# Path: nativedancer/processors/frame/globals.py
# Path: nativedancer/processors/frame/choices.py
# Path: nativedancer/processors/frame/modules/face_enhancer.py
from typing import Any, List, Dict, Literal, Optional
from argparse import ArgumentParser
from nativedancer import helperdoc
from nativedancer.face_analyser import get_many_faces, clear_face_analyser
from nativedancer.face_helper import warp_face, paste_back
from nativedancer.content_analyser import clear_content_analyser
from nativedancer.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel
from nativedancer.utils import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, create_metavar, update_status
from nativedancer.vision import read_image, read_static_image, write_image
from nativedancer.processors.frame import globals as frame_processors_globals
from nativedancer.processors.frame import choices as frame_processors_choices
import cv2
import threading
import numpy
import onnxruntime
import nativedancer.globals
import nativedancer.processors.frame.core as frame_processors
FRAME_PROCESSOR = None
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
THREAD_LOCK : threading.Lock = threading.Lock()
NAME = 'NATIVEDANCER.FRAME_PROCESSOR.FACE_ENHANCER'
MODELS : Dict[str, ModelValue] =\
{
'codeformer':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx',
'path': resolve_relative_path('../weights/face_enhancer/codeformer.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.2':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.2.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.3':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.3.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gfpgan_1.4':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.4.onnx'),
'template': 'ffhq',
'size': (512, 512)
},
'gpen_bfr_256':
{
'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx',
'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_256.onnx'),
| 'template': 'arcface_v2', |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ethanweber/nerfiller
# Path: nerfiller/dreambooth/dataset.py
class EquiDataset(Dataset):
"""
A dataset that operates on an equirectangular image and masks.
"""
def __init__(
self,
instance_prompt: str,
tokenizer,
dataset_type: str,
dataset_name: Path,
fov: float = 90.0,
length: int = 100,
resolution: int = 512,
scale_factor: int = 4,
max_distance: float = 10.0,
tile_images: bool = False,
tile_images_percentage: float = 0.5,
mask_type: str = "rectangle",
device: str = "cuda:0",
):
self.instance_prompt = instance_prompt
self.tokenizer = tokenizer
self.fov = fov
self.length = length
self.resolution = resolution
self.scale_factor = scale_factor
self.max_distance = max_distance
self.tile_images = tile_images
self.tile_images_percentage = tile_images_percentage
self.mask_type = mask_type
self.device = device
image, distance = image_distance_from_dataset(
dataset_type=dataset_type,
dataset_name=dataset_name,
device=self.device,
max_distance=self.max_distance,
)
self.diameter = distance[distance != 0.0].min()
self.equi_image = EquiImage(
num_images=1,
width=image.shape[-1],
height=image.shape[-2],
device=self.device,
)
self.equi_image.image.data = image
self.equi_image.distance = distance
self.text_inputs = tokenize_prompt(self.tokenizer, self.instance_prompt, tokenizer_max_length=None)
self.image_transforms = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self.length
def get_crop(self, index):
num_crops = 1
image_indices = torch.zeros((num_crops)).int().to(self.device)
yaws = torch.rand((num_crops), dtype=torch.float32) * 360
pitch_range = [-55, 55]
pitches = torch.rand((num_crops), dtype=torch.float32) * (pitch_range[1] - pitch_range[0]) + pitch_range[0]
with torch.no_grad():
ei_output = self.equi_image.forward(
image_indices,
yaws,
pitches,
self.fov,
self.fov,
self.resolution,
self.resolution,
)
image, distance = ei_output.image, ei_output.distance
return image, distance
def get_info(self, index):
image, distance = self.get_crop(index)
if self.mask_type == "rectangle":
mask = random_mask_custom((self.resolution, self.resolution), max_np=20, min_np=10)[None, None]
elif self.mask_type == "depth-aware":
depth = distance_to_depth(distance, self.fov, self.fov)
with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=False):
mask, rendered_image = create_depth_aware_mask(
image,
distance,
self.fov,
max_distance=float(self.diameter),
resolution=self.resolution,
scale_factor=self.scale_factor,
)
mask = mask[None, None]
rendered_image = rendered_image.permute(2, 0, 1)[None]
return image, distance, depth, mask, rendered_image
def __getitem__(self, index):
example = {}
if self.tile_images and torch.rand(1).item() < self.tile_images_percentage:
# tile 4 images
image1, distance1, depth1, mask1, rendered_image1 = self.get_info(index)
image2, distance2, depth2, mask2, rendered_image2 = self.get_info(index)
image3, distance3, depth3, mask3, rendered_image3 = self.get_info(index)
image4, distance4, depth4, mask4, rendered_image4 = self.get_info(index)
image = torch.cat(
[
torch.cat([image1, image2], dim=-1),
torch.cat([image3, image4], dim=-1),
],
dim=-2,
)
image = torch.nn.functional.interpolate(image, size=(self.resolution, self.resolution))
distance = torch.cat(
[
torch.cat([distance1, distance2], dim=-1),
torch.cat([distance3, distance4], dim=-1),
],
dim=-2,
)
distance = torch.nn.functional.interpolate(distance, size=(self.resolution, self.resolution))
depth = torch.cat(
[
torch.cat([depth1, depth2], dim=-1),
torch.cat([depth3, depth4], dim=-1),
],
dim=-2,
)
depth = torch.nn.functional.interpolate(depth, size=(self.resolution, self.resolution))
mask = torch.cat(
[
torch.cat([mask1, mask2], dim=-1),
torch.cat([mask3, mask4], dim=-1),
],
dim=-2,
)
mask = torch.nn.functional.interpolate(mask, size=(self.resolution, self.resolution), mode="nearest")
rendered_image = torch.cat(
[
torch.cat([rendered_image1, rendered_image2], dim=-1),
torch.cat([rendered_image3, rendered_image4], dim=-1),
],
dim=-2,
)
rendered_image = torch.nn.functional.interpolate(rendered_image, size=(self.resolution, self.resolution))
else:
image, distance, depth, mask, rendered_image = self.get_info(index)
# shape is [3,H,W] or [1,H,W]
example["image"] = image[0]
example["distance"] = distance[0]
example["depth"] = depth[0]
example["rendered_image"] = rendered_image[0]
example["mask"] = mask[0]
# text stuff
example["input_ids"] = self.text_inputs.input_ids[0] # shape (seq_len,)
example["attention_mask"] = self.text_inputs.attention_mask[0] # shape (seq_len,)
return example
# Path: nerfiller/dreambooth/dataset.py
class NerfbustersDataset(Dataset):
"""
A dataset that operates on a folder of images.
"""
def __init__(
self,
instance_prompt: str,
tokenizer,
dataset_type: str,
dataset_name: Path,
fov: float = 90.0,
path_prefix: Path = Path("data"),
tile_images: bool = False,
tile_images_percentage: float = 0.5,
length: int = 100,
resolution: int = 512,
scale_factor: int = 4,
mask_type: str = "rectangle",
device: str = "cuda:0",
):
assert dataset_type == "nerfbusters"
self.instance_prompt = instance_prompt
self.tokenizer = tokenizer
self.path_prefix = path_prefix
self.dataset_type = dataset_type
self.dataset_name = dataset_name
self.fov = fov
self.resolution = resolution
self.scale_factor = scale_factor
self.mask_type = mask_type
self.device = device
self.image_folder = self.path_prefix / self.dataset_type / self.dataset_name / "images"
self.image_filenames = sorted(glob.glob(str(self.image_folder / "*")))
if self.mask_type == "train-dist":
self.mask_filenames = sorted(
glob.glob(str(self.path_prefix / self.dataset_type / self.dataset_name / "masks" / "*"))
)
if len(self.mask_filenames) == 0:
raise ValueError("no filenames in mask folder")
self.mask_transforms = v2.Compose(
[
v2.RandomResizedCrop(
size=(512, 512),
antialias=True,
interpolation=InterpolationMode.NEAREST,
),
v2.RandomHorizontalFlip(p=0.5),
v2.RandomVerticalFlip(p=0.5),
]
)
self.text_inputs = tokenize_prompt(self.tokenizer, self.instance_prompt, tokenizer_max_length=None)
self.image_transforms = v2.Compose(
[
v2.RandomResizedCrop(size=(512, 512), antialias=True),
v2.RandomHorizontalFlip(p=0.5),
]
)
if self.mask_type == "depth-aware":
self.depth_inpainter = DepthInpainter(depth_method="zoedepth", device=self.device)
self.idx_to_depth = {}
def __len__(self):
return len(self.image_filenames)
def get_crop(self, index):
idx = random.randint(0, len(self.image_filenames) - 1)
filename = self.image_filenames[idx]
image = torch.from_numpy(mediapy.read_image(filename)) / 255.0
image = image.permute(2, 0, 1)
if self.mask_type == "depth-aware":
if idx not in self.idx_to_depth:
with torch.no_grad():
depth = (
self.depth_inpainter.get_depth(image=image[None].to(self.depth_inpainter.device))[0]
.detach()
.cpu()
)
self.idx_to_depth[idx] = depth
else:
depth = self.idx_to_depth[idx]
else:
depth = torch.zeros_like(image[:1])
image, depth = torch.split(self.image_transforms(torch.cat([image, depth])), [3, 1])
return image, depth
def __getitem__(self, index):
example = {}
image, depth = self.get_crop(index)
if self.mask_type == "rectangle":
mask = random_mask_custom((self.resolution, self.resolution), max_np=20, min_np=10)[None]
elif self.mask_type == "depth-aware":
with torch.autocast(device_type="cuda", dtype=torch.float16, enabled=False):
mask, rendered_image = create_depth_aware_mask(
image[None].to(self.device),
depth[None].to(self.device),
self.fov,
max_distance=depth.median().item(),
resolution=self.resolution,
scale_factor=self.scale_factor,
)
mask = mask[None].detach().cpu()
elif self.mask_type == "train-dist":
idx = random.randint(0, len(self.mask_filenames) - 1)
filename = self.mask_filenames[idx]
mask = 1.0 - torch.from_numpy(mediapy.read_image(filename))[None] / 255.0
mask = self.mask_transforms(mask)
# shape is [3,H,W] or [1,H,W]
example["image"] = image
example["mask"] = mask
example["depth"] = torch.zeros_like(mask)
# text stuff
example["input_ids"] = self.text_inputs.input_ids[0] # shape (seq_len,)
example["attention_mask"] = self.text_inputs.attention_mask[0] # shape (seq_len,)
return example
# Path: nerfiller/utils/diff_utils.py
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
)
return text_inputs
# Path: nerfiller/utils/diff_utils.py
def encode_prompt(text_encoder, input_ids, attention_mask, text_encoder_use_attention_mask=None):
text_input_ids = input_ids.to(text_encoder.device)
if text_encoder_use_attention_mask:
attention_mask = attention_mask.to(text_encoder.device)
else:
attention_mask = None
prompt_embeds = text_encoder(
text_input_ids,
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
return prompt_embeds
# Path: nerfiller/utils/image_utils.py
def get_inpainted_image_row(
image: Float[Tensor, "B 3 H W"],
mask: Float[Tensor, "B 1 H W"],
inpainted_image: Optional[Float[Tensor, "B 3 H W"]] = None,
color: Tuple[float, float, float] = Colors.NEON_PINK.value,
show_original: bool = False,
):
"""Returns an image concatenated along the x-axis. It has the following form:
image with inpaint regions highlighted | image with inpainted regions
Inpaint where mask == 1.
The default color is neon pink.
If the inpainted image is None, then just show the `image with inpaint regions highlighted`.
"""
device = image.device
c = torch.tensor(color, device=device).view(1, 3, 1, 1)
color_image = torch.ones_like(image) * c
image_with_highlights = torch.where(mask == 1, color_image, image)
image_list = [image_with_highlights]
if inpainted_image is not None:
image_list = image_list + [inpainted_image]
if show_original:
image_list = [image] + image_list
im = torch.cat(image_list, dim=-2)
return im
# Path: nerfiller/scripts/train_dreambooth_lora.py
import argparse
import gc
import itertools
import logging
import math
import multiprocessing
import os
import shutil
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import transformers
import diffusers
import wandb
import xformers
import bitsandbytes as bnb
from pathlib import Path
from typing import Dict
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import AutoTokenizer, PretrainedConfig
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
StableDiffusionInpaintPipeline,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.loaders import LoraLoaderMixin, text_encoder_lora_state_dict
from diffusers.models.attention_processor import (
AttnAddedKVProcessor,
AttnAddedKVProcessor2_0,
LoRAAttnAddedKVProcessor,
LoRAAttnProcessor,
LoRAAttnProcessor2_0,
SlicedAttnAddedKVProcessor,
)
from diffusers.optimization import get_scheduler
from diffusers.utils import check_min_version, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from nerfiller.dreambooth.dataset import EquiDataset, NerfbustersDataset
from nerfiller.utils.diff_utils import tokenize_prompt, encode_prompt
from nerfiller.utils.image_utils import get_inpainted_image_row
from datetime import datetime
from transformers import CLIPTextModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesModelWithTransformation,
)
from transformers import T5EncoderModel
#!/usr/bin/env python
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.18.0.dev0")
logger = get_logger(__name__)
def prepare_mask_and_masked_image(image, mask):
image = np.array(image.convert("RGB"))
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32) / 255.0
| mask = mask[None, None] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nnanhuang/Customize-it-3D
# Path: nerf/utils.py
def custom_meshgrid(*args):
# ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid
if pver.parse(torch.__version__) < pver.parse('1.10'):
return torch.meshgrid(*args)
else:
return torch.meshgrid(*args, indexing='ij')
# Path: nerf/utils.py
def safe_normalize(x, eps=1e-20):
return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps, max=1e32))
# Path: nerf/renderer.py
import os
import math
import cv2
import trimesh
import open3d as o3d
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import mcubes
import raymarching
import xatlas
import nvdiffrast.torch as dr
from .utils import custom_meshgrid, safe_normalize
from meshutils import decimate_mesh, clean_mesh, poisson_mesh_reconstruction
from sklearn.neighbors import NearestNeighbors
from scipy.ndimage import binary_dilation, binary_erosion
n_step = max(min(N // n_alive, 8), 1)
xyzs, dirs, deltas = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, 128, perturb if step == 0 else False, dt_gamma, max_steps)
sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)
normals = (normals + 1) / 2
raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, normals, deltas, weights_sum, depth, image, normal, T_thresh)
rays_alive = rays_alive[rays_alive >= 0]
step += n_step
# mix background color
if bg_color is None:
bg_color = 1
image = image + (1 - weights_sum).unsqueeze(-1) * bg_color
image = image.view(*prefix, 3)
if not self.training:
normal = normal + (1 - weights_sum).unsqueeze(-1) * bg_color
normal = normal.view(*prefix, 3)
bg_depth = self.opt.max_depth
depth = depth + (1 - weights_sum) * bg_depth
if depth_scale is not None:
depth = depth.view(*prefix, 1) * depth_scale.view(*prefix, 1)
else:
depth = depth.view(*prefix, 1)
weights_sum = weights_sum.reshape(*prefix)
mask = (nears < fars).reshape(*prefix)
results['image'] = image
results['depth'] = depth
results['weights_sum'] = weights_sum
results['mask'] = mask
if not self.training:
results['normal'] = normal
return results
@torch.no_grad()
def update_extra_state(self, decay=0.95, S=128):
# call before each epoch to update extra states.
if not self.cuda_ray:
return
### update density grid
tmp_grid = - torch.ones_like(self.density_grid)
X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)
Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)
Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)
for xs in X:
for ys in Y:
for zs in Z:
# construct points
xx, yy, zz = custom_meshgrid(xs, ys, zs)
coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)
indices = raymarching.morton3D(coords).long() # [N]
xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]
# cascading
for cas in range(self.cascade):
bound = min(2 ** cas, self.bound)
half_grid_size = bound / self.grid_size
# scale to current cascade's resolution
cas_xyzs = xyzs * (bound - half_grid_size)
# add noise in [-hgs, hgs]
cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size
# query density
sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()
# assign
tmp_grid[cas, indices] = sigmas
# ema update
valid_mask = self.density_grid >= 0
self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])
self.mean_density = torch.mean(self.density_grid[valid_mask]).item()
self.iter_density += 1
# convert to bitfield
density_thresh = min(self.mean_density, self.density_thresh)
self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)
### update step counter
total_step = min(16, self.local_step)
if total_step > 0:
self.mean_count = int(self.step_counter[:total_step, 0].sum().item() / total_step)
self.local_step = 0
# print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f} | [step counter] mean={self.mean_count}')
def render(self, rays_o, rays_d, depth_scale=None, staged=False, max_ray_batch=4096, **kwargs):
# rays_o, rays_d: [B, N, 3], assumes B == 1
# return: pred_rgb: [B, N, 3]
if self.cuda_ray:
_run = self.run_cuda
else:
_run = self.run
B, N = rays_o.shape[:2]
device = rays_o.device
# never stage when cuda_ray
if staged and not self.cuda_ray:
depth = torch.empty((B, N, 1), device=device)
image = torch.empty((B, N, 3), device=device)
weights_sum = torch.empty((B, N), device=device)
for b in range(B):
head = 0
while head < N:
tail = min(head + max_ray_batch, N)
results_ = _run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)
| depth[b:b+1, head:tail] = results_['depth'] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TaoHuang13/diffusion_reward
# Path: diffusion_reward/models/video_models/vqdiffusion/distributed/distributed.py
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
# Path: diffusion_reward/models/video_models/vqdiffusion/distributed/distributed.py
def is_primary():
return get_rank() == 0
# Path: diffusion_reward/models/video_models/vqdiffusion/distributed/distributed.py
def reduce_dict(input_dict, average=True):
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
keys = []
values = []
for k in sorted(input_dict.keys()):
keys.append(k)
values.append(input_dict[k])
values = torch.stack(values, 0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
values /= world_size
reduced_dict = {k: v for k, v in zip(keys, values)}
return reduced_dict
# Path: diffusion_reward/models/video_models/vqdiffusion/engine/ema.py
class EMA(object):
def __init__(self,
model,
decay=0.99,
update_interval=1,
device=torch.device('cpu')):
self.decay = decay
self.update_iterval = update_interval
self.device = device
self.model = model
with torch.no_grad():
if hasattr(model, 'get_ema_model') and callable(model.get_ema_model):
self.ema_model = copy.deepcopy(model.get_ema_model())
self.cur_state_dict = model.get_ema_model().state_dict()
else:
self.ema_model = copy.deepcopy(model)
self.cur_state_dict = model.state_dict()
self.ema_model.to(self.device)
self.cur_state_dict = {k: v.clone().to(self.device) for k, v in self.cur_state_dict.items()}
def update(self, iteration):
if (iteration + 1) % self.update_iterval == 0:
# print('{} Update ema'.format(iteration))
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
cur_state_dict = self.model.get_ema_model().state_dict()
else:
cur_state_dict = self.model.state_dict()
ema_state_dict = self.ema_model.state_dict()
for k in ema_state_dict.keys():
ema_state_dict[k] = ema_state_dict[k] * self.decay + cur_state_dict[k].clone().to(self.device) * (1-self.decay)
self.ema_model.load_state_dict(ema_state_dict)
def state_dict(self):
return self.ema_model.state_dict()
def load_state_dict(self, state_dict, strict=True):
state_dict_ = {k: v.clone().to(self.device) for k, v in state_dict.items()}
self.ema_model.load_state_dict(state_dict_, strict=strict)
def modify_to_inference(self):
# get current model
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
self.cur_state_dict = self.model.get_ema_model().state_dict()
else:
self.cur_state_dict = self.model.state_dict()
self.cur_state_dict = {k: v.clone().to(self.device) for k, v in self.cur_state_dict.items()}
ema_state_dict = self.ema_model.state_dict()
ema_state_dict = {k: v.to(self.model.device) for k, v in ema_state_dict.items()}
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
self.model.get_ema_model().load_state_dict(ema_state_dict)
else:
self.model.load_state_dict(ema_state_dict)
def modify_to_train(self):
self.cur_state_dict = {k: v.clone().to(self.model.device) for k, v in self.cur_state_dict.items()}
if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):
self.model.get_ema_model().load_state_dict(self.cur_state_dict)
else:
self.model.load_state_dict(self.cur_state_dict)
# Path: diffusion_reward/models/video_models/vqdiffusion/engine/lr_scheduler.py
class ReduceLROnPlateauWithWarmup(object):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This scheduler reads a metrics
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Args:
optimizer (Optimizer): Wrapped optimizer.
mode (str): One of `min`, `max`. In `min` mode, lr will
be reduced when the quantity monitored has stopped
decreasing; in `max` mode it will be reduced when the
quantity monitored has stopped increasing. Default: 'min'.
factor (float): Factor by which the learning rate will be
reduced. new_lr = lr * factor. Default: 0.1.
patience (int): Number of epochs with no improvement after
which learning rate will be reduced. For example, if
`patience = 2`, then we will ignore the first 2 epochs
with no improvement, and will only decrease the LR after the
3rd epoch if the loss still hasn't improved then.
Default: 10.
threshold (float): Threshold for measuring the new optimum,
to only focus on significant changes. Default: 1e-4.
threshold_mode (str): One of `rel`, `abs`. In `rel` mode,
dynamic_threshold = best * ( 1 + threshold ) in 'max'
mode or best * ( 1 - threshold ) in `min` mode.
In `abs` mode, dynamic_threshold = best + threshold in
`max` mode or best - threshold in `min` mode. Default: 'rel'.
cooldown (int): Number of epochs to wait before resuming
normal operation after lr has been reduced. Default: 0.
min_lr (float or list): A scalar or a list of scalars. A
lower bound on the learning rate of all param groups
or each group respectively. Default: 0.
eps (float): Minimal decay applied to lr. If the difference
between new and old lr is smaller than eps, the update is
ignored. Default: 1e-8.
verbose (bool): If ``True``, prints a message to stdout for
each update. Default: ``False``.
warmup_lr: float or None, the learning rate to be touched after warmup
warmup: int, the number of steps to warmup
"""
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
threshold=1e-4, threshold_mode='rel', cooldown=0,
min_lr=0, eps=1e-8, verbose=False, warmup_lr=None,
warmup=0):
if factor >= 1.0:
raise ValueError('Factor should be < 1.0.')
self.factor = factor
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.warmup_lr = warmup_lr
self.warmup = warmup
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.eps = eps
self.last_epoch = 0
self._init_is_better(mode=mode, threshold=threshold,
threshold_mode=threshold_mode)
self._reset()
def _prepare_for_warmup(self):
if self.warmup_lr is not None:
if isinstance(self.warmup_lr, (list, tuple)):
if len(self.warmup_lr) != len(self.optimizer.param_groups):
raise ValueError("expected {} warmup_lrs, got {}".format(
len(self.optimizer.param_groups), len(self.warmup_lr)))
self.warmup_lrs = list(self.warmup_lr)
else:
self.warmup_lrs = [self.warmup_lr] * len(self.optimizer.param_groups)
else:
self.warmup_lrs = None
if self.warmup > self.last_epoch:
curr_lrs = [group['lr'] for group in self.optimizer.param_groups]
self.warmup_lr_steps = [max(0, (self.warmup_lrs[i] - curr_lrs[i])/float(self.warmup)) for i in range(len(curr_lrs))]
else:
self.warmup_lr_steps = None
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
def step(self, metrics):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
epoch = self.last_epoch + 1
self.last_epoch = epoch
if epoch <= self.warmup:
self._increase_lr(epoch)
else:
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self._reduce_lr(epoch)
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def _reduce_lr(self, epoch):
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr * self.factor, self.min_lrs[i])
if old_lr - new_lr > self.eps:
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: reducing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
def _increase_lr(self, epoch):
# used for warmup
for i, param_group in enumerate(self.optimizer.param_groups):
old_lr = float(param_group['lr'])
new_lr = max(old_lr + self.warmup_lr_steps[i], self.min_lrs[i])
param_group['lr'] = new_lr
if self.verbose:
print('Epoch {:5d}: increasing learning rate'
' of group {} to {:.4e}.'.format(epoch, i, new_lr))
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def is_better(self, a, best):
if self.mode == 'min' and self.threshold_mode == 'rel':
rel_epsilon = 1. - self.threshold
return a < best * rel_epsilon
elif self.mode == 'min' and self.threshold_mode == 'abs':
return a < best - self.threshold
elif self.mode == 'max' and self.threshold_mode == 'rel':
rel_epsilon = self.threshold + 1.
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + self.threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if threshold_mode not in {'rel', 'abs'}:
raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
if mode == 'min':
self.mode_worse = np.inf
else: # mode == 'max':
self.mode_worse = -np.inf
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self._prepare_for_warmup()
def state_dict(self):
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)
# Path: diffusion_reward/models/video_models/vqdiffusion/utils/misc.py
def format_seconds(seconds):
h = int(seconds // 3600)
m = int(seconds // 60 - h * 60)
s = int(seconds % 60)
d = int(h // 24)
h = h - d * 24
if d == 0:
if h == 0:
if m == 0:
ft = '{:02d}s'.format(s)
else:
ft = '{:02d}m:{:02d}s'.format(m, s)
else:
ft = '{:02d}h:{:02d}m:{:02d}s'.format(h, m, s)
else:
ft = '{:d}d:{:02d}h:{:02d}m:{:02d}s'.format(d, h, m, s)
return ft
# Path: diffusion_reward/models/video_models/vqdiffusion/utils/misc.py
def get_model_parameters_info(model):
# for mn, m in model.named_modules():
parameters = {'overall': {'trainable': 0, 'non_trainable': 0, 'total': 0}}
for child_name, child_module in model.named_children():
parameters[child_name] = {'trainable': 0, 'non_trainable': 0}
for pn, p in child_module.named_parameters():
if p.requires_grad:
parameters[child_name]['trainable'] += p.numel()
else:
parameters[child_name]['non_trainable'] += p.numel()
parameters[child_name]['total'] = parameters[child_name]['trainable'] + parameters[child_name]['non_trainable']
parameters['overall']['trainable'] += parameters[child_name]['trainable']
parameters['overall']['non_trainable'] += parameters[child_name]['non_trainable']
parameters['overall']['total'] += parameters[child_name]['total']
# format the numbers
def format_number(num):
K = 2**10
M = 2**20
G = 2**30
if num > G: # K
uint = 'G'
num = round(float(num)/G, 2)
elif num > M:
uint = 'M'
num = round(float(num)/M, 2)
elif num > K:
uint = 'K'
num = round(float(num)/K, 2)
else:
uint = ''
return '{}{}'.format(num, uint)
def format_dict(d):
for k, v in d.items():
if isinstance(v, dict):
format_dict(v)
else:
d[k] = format_number(v)
format_dict(parameters)
return parameters
# Path: diffusion_reward/models/video_models/vqdiffusion/utils/misc.py
def instantiate_from_config(config):
if config is None:
return None
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
module, cls = config["target"].rsplit(".", 1)
cls = getattr(importlib.import_module(module, package=None), cls)
return cls(**config.get("params", dict()))
# Path: diffusion_reward/models/video_models/vqdiffusion/engine/solver.py
import copy
import math
import os
import time
import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from omegaconf import OmegaConf
from PIL import Image
from torch.optim.lr_scheduler import ReduceLROnPlateau
from ..distributed.distributed import get_rank, is_primary, reduce_dict
from ..engine.ema import EMA
from ..engine.lr_scheduler import ReduceLROnPlateauWithWarmup
from ..utils.misc import (format_seconds, get_model_parameters_info,
instantiate_from_config)
from torch.cuda.amp import GradScaler, autocast
try:
self.ema.load_state_dict(state_dict['ema'])
except:
model_dict = self.ema.state_dict()
temp_state_dict = {k:v for k,v in state_dict['ema'].items() if k in model_dict.keys()}
model_dict.update(temp_state_dict)
self.ema.load_state_dict(model_dict)
if 'clip_grad_norm' in state_dict and self.clip_grad_norm is not None:
self.clip_grad_norm.load_state_dict(state_dict['clip_grad_norm'])
# handle optimizer and scheduler
for op_sc_n, op_sc in state_dict['optimizer_and_scheduler'].items():
for k in op_sc:
if k in ['optimizer', 'scheduler']:
for kk in op_sc[k]:
if kk == 'module' and load_optimizer_and_scheduler:
self.optimizer_and_scheduler[op_sc_n][k][kk].load_state_dict(op_sc[k][kk])
elif load_others: # such as step_iteration, ...
self.optimizer_and_scheduler[op_sc_n][k][kk] = op_sc[k][kk]
elif load_others: # such as start_epoch, end_epoch, ....
self.optimizer_and_scheduler[op_sc_n][k] = op_sc[k]
self.logger.log_info('Resume from {}'.format(path))
def train_epoch(self):
self.model.train()
self.last_epoch += 1
if self.args.distributed:
self.dataloader['train_loader'].sampler.set_epoch(self.last_epoch)
epoch_start = time.time()
itr_start = time.time()
itr = -1
for itr, batch in enumerate(self.dataloader['train_loader']):
if itr == 0:
print("time2 is " + str(time.time()))
data_time = time.time() - itr_start
step_start = time.time()
self.last_iter += 1
loss = self.step(batch, phase='train')
# logging info
if self.logger is not None and self.last_iter % self.args.log_frequency == 0:
info = '{}: train'.format(self.args.exp_name)
info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])
for loss_n, loss_dict in loss.items():
info += ' ||'
loss_dict = reduce_dict(loss_dict)
info += '' if loss_n == 'none' else ' {}'.format(loss_n)
# info = info + ': Epoch {}/{} iter {}/{}'.format(self.last_epoch, self.max_epochs, self.last_iter%self.dataloader['train_iterations'], self.dataloader['train_iterations'])
for k in loss_dict:
info += ' | {}: {:.4f}'.format(k, float(loss_dict[k]))
self.logger.add_scalar(tag='train/{}/{}'.format(loss_n, k), scalar_value=float(loss_dict[k]), global_step=self.last_iter)
# log lr
lrs = self._get_lr(return_type='dict')
for k in lrs.keys():
lr = lrs[k]
self.logger.add_scalar(tag='train/{}_lr'.format(k), scalar_value=lrs[k], global_step=self.last_iter)
# add lr to info
info += ' || {}'.format(self._get_lr())
# add time consumption to info
spend_time = time.time() - self.start_train_time
itr_time_avg = spend_time / (self.last_iter + 1)
info += ' || data_time: {dt}s | fbward_time: {fbt}s | iter_time: {it}s | iter_avg_time: {ita}s | epoch_time: {et} | spend_time: {st} | left_time: {lt}'.format(
dt=round(data_time, 1),
it=round(time.time() - itr_start, 1),
fbt=round(time.time() - step_start, 1),
ita=round(itr_time_avg, 1),
et=format_seconds(time.time() - epoch_start),
st=format_seconds(spend_time),
lt=format_seconds(itr_time_avg*self.max_epochs*self.dataloader['train_iterations']-spend_time)
)
self.logger.log_info(info)
itr_start = time.time()
# modify here to make sure dataloader['train_iterations'] is correct
assert itr >= 0, "The data is too less to form one iteration!"
self.dataloader['train_iterations'] = itr + 1
def validate_epoch(self):
if 'validation_loader' not in self.dataloader:
val = False
else:
if isinstance(self.validation_epochs, int):
val = (self.last_epoch + 1) % self.validation_epochs == 0
else:
val = (self.last_epoch + 1) in self.validation_epochs
is_best = False
if val:
if self.args.distributed:
self.dataloader['validation_loader'].sampler.set_epoch(self.last_epoch)
self.model.eval()
overall_loss = None
epoch_start = time.time()
itr_start = time.time()
itr = -1
for itr, batch in enumerate(self.dataloader['validation_loader']):
data_time = time.time() - itr_start
step_start = time.time()
loss = self.step(batch, phase='val')
for loss_n, loss_dict in loss.items():
loss[loss_n] = reduce_dict(loss_dict)
if overall_loss is None:
overall_loss = loss
else:
for loss_n, loss_dict in loss.items():
for k, v in loss_dict.items():
overall_loss[loss_n][k] = (overall_loss[loss_n][k] * itr + loss[loss_n][k]) / (itr + 1)
if self.logger is not None and (itr+1) % self.args.log_frequency == 0:
info = '{}: val'.format(self.args.exp_name)
info = info + ': Epoch {}/{} | iter {}/{}'.format(self.last_epoch, self.max_epochs, itr, self.dataloader['validation_iterations'])
| for loss_n, loss_dict in loss.items(): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mmathew23/improved_edm
# Path: pipeline.py
class KarrasPipeline(DiffusionPipeline):
model_cpu_offload_seq = "unet"
def __init__(self, unet, scheduler, method='euler'):
super().__init__()
# we ignore this, just having a scheduler for HF compatibility
scheduler = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=unet, scheduler=scheduler)
self.trained_image_size = unet.config.sample_size
self.method = method
# Adjust noise levels based on what's supported by the network.
self.sigma_min = 0.002
self.sigma_max = 80
self.rho = 7
def step(self, x, t, num_inference_steps=50):
if self.method == 'euler':
return self.step_euler(x, t, num_inference_steps=num_inference_steps)
elif self.method == 'rk':
return self.step_rk(x, t, num_inference_steps=num_inference_steps)
else:
raise NotImplementedError()
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
eta: float = 0.0,
num_inference_steps: int = 50,
use_clipped_model_output: Optional[bool] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
S_churn: float = 0.0,
S_min: float = 0.0,
S_max: float = float("inf"),
S_noise: float = 1.0,
to_device: Optional[torch.device] = None,
second_order: bool = True,
class_labels: Optional[torch.Tensor] = None,
) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size, int):
image_shape = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype)
# image += 0.1 * torch.randn(
# (image.shape[0], image.shape[1], 1, 1), device=image.device)
# set step values
self.scheduler.set_timesteps(num_inference_steps)
# Time step discretization.
step_indices = torch.arange(num_inference_steps, dtype=torch.float64, device=image.device)
t_steps = (self.sigma_max ** (1 / self.rho) + step_indices / (num_inference_steps - 1) * (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho))) ** self.rho
t_steps = torch.cat([torch.as_tensor(t_steps), torch.zeros_like(t_steps[:1])]).to(dtype=torch.float16)
t_steps[-1] = 1e-6
image = image * t_steps[0]
for t in self.progress_bar(range(num_inference_steps)):
t_cur = t_steps[t]
t_next = t_steps[t + 1]
gamma = min(S_churn / num_inference_steps, math.sqrt(2) - 1) if S_min <= t_cur <= S_max else 0
t_hat = torch.as_tensor(t_cur + gamma * t_cur)
x_hat = image + (t_hat ** 2 - t_cur ** 2).sqrt() * S_noise * torch.randn_like(image)
denoised = self.unet(x_hat, t_hat, class_labels=class_labels).sample
d_cur = (x_hat - denoised) / t_hat
image = x_hat + (t_next - t_hat) * d_cur
if second_order and t < num_inference_steps - 1:
denoised = self.unet(image, t_next, class_labels=class_labels).sample
d_prime = (image - denoised) / t_next
image = x_hat + (t_next - t_hat) * (0.5 * d_cur + 0.5 * d_prime)
image = (image / 2 + 0.5).clamp(0, 1)
if output_type == "pil":
image = image.cpu()
image = self.numpy_to_pil(image.permute(0, 2, 3, 1).numpy())
elif output_type == "numpy":
image = image.cpu()
image = image.permute(0, 2, 3, 1).numpy()
else:
if to_device is not None:
image = image.to(to_device)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
# Path: model.py
class UNet2DModel(ModelMixin, ConfigMixin):
r"""
A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) -
1)`.
in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
time_embedding_type (`str`, *optional*, defaults to `"positional"`): Type of time embedding to use.
freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding.
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
Whether to flip sin to cos for Fourier time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to `("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D")`):
Tuple of downsample block types.
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2D"`):
Block type for middle of UNet, it can be either `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`.
up_block_types (`Tuple[str]`, *optional*, defaults to `("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D")`):
Tuple of upsample block types.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`):
Tuple of block output channels.
layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.
mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.
norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization.
num_class_embeds (`int`, *optional*, defaults to `None`):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class
conditioning with `class_embed_type` equal to `None`.
"""
@register_to_config
def __init__(
self,
sample_size: Optional[Union[int, Tuple[int, int]]] = None,
in_channels: int = 3,
out_channels: int = 3,
center_input_sample: bool = False,
down_block_types: Tuple[str] = ("DownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D"),
up_block_types: Tuple[str] = ("AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "UpBlock2D"),
block_out_channels: Tuple[int] = (224, 448, 672, 896),
layers_per_block: int = 2,
mid_block_scale_factor: float = 1,
dropout: float = 0.0,
attention_head_dim: Optional[int] = 8,
norm_eps: float = 1e-4,
add_attention: bool = True,
num_class_embeds: Optional[int] = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
# input in_channels+1 due to concating of one's to mitigate removing bias
self.conv_in = Conv2d(in_channels+1, block_out_channels[0], kernel_size=3, padding=(1, 1), bias=False)
# time
self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=0.25)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# loss weighting
self.loss_mlp = nn.Sequential(GaussianFourierProjection(embedding_size=block_out_channels[0], scale=0.25), Linear(timestep_input_dim, 1, bias=False))
# class embedding
if num_class_embeds is not None:
self.class_embedding = ClassEmbedding(num_classes=num_class_embeds, embedding_size=time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
# down
resnet_out_scale_factor = 1.0
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_out_scale_factor=resnet_out_scale_factor,
attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
dropout=dropout,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = nn.ModuleList()
self.add_attention = add_attention
if add_attention:
self.mid_block.append(
AttnDownBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
dropout=dropout,
num_layers=1,
resnet_eps=norm_eps,
output_scale_factor=resnet_out_scale_factor,
attention_head_dim=attention_head_dim,
add_downsample=False
)
)
self.mid_block.append(
DownBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
dropout=dropout,
num_layers=1,
resnet_eps=norm_eps,
output_scale_factor=resnet_out_scale_factor,
add_downsample=False
)
)
# up
reversed_block_out_channels = list(reversed(block_out_channels))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
is_final_block = i == len(block_out_channels) - 1
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=not is_final_block,
resnet_eps=norm_eps,
attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,
dropout=dropout,
resnet_out_scale_factor=resnet_out_scale_factor,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_out = Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1, bias=False)
# init weights to normal since weight normalization
recursive_normal_init(self)
self.gain = nn.Parameter(torch.ones(1, 1, 1, 1))
def get_loss_module_weight(self, timestep):
return self.loss_mlp(timestep)
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
class_labels: Optional[torch.Tensor] = None,
return_dict: bool = True,
return_loss_mlp: bool = False,
) -> Union[UNet2DOutput, Tuple]:
r"""
The [`UNet2DModel`] forward method.
Args:
sample (`torch.FloatTensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
class_labels (`torch.FloatTensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.
Returns:
[`~models.unet_2d.UNet2DOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is
returned where the first element is the sample tensor.
"""
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)
t_emb = self.time_proj(timesteps)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when doing class conditioning")
class_emb = self.class_embedding(class_labels, sample.device, self.dtype).to(dtype=self.dtype)
emb = emb + class_emb
elif self.class_embedding is None and class_labels is not None:
raise ValueError("class_embedding needs to be initialized in order to use class conditioning")
# 2. pre-process
skip_sample = sample
# Create a tensor of ones with the same dtype and device
b, c, h, w = sample.shape
ones_tensor = torch.ones(b, 1, h, w, dtype=sample.dtype, device=sample.device)
# Concatenate along the channel dimension
c_in = 1 / torch.sqrt(0.25+timesteps**2)
sample = torch.cat((sample*c_in[:, None, None, None], ones_tensor), dim=1)
sample = self.conv_in(sample)
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "skip_conv"):
sample, res_samples, skip_sample = downsample_block(
hidden_states=sample, temb=emb, skip_sample=skip_sample
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
# 4. mid
for i, block in enumerate(self.mid_block):
if i == 0 and self.add_attention and isinstance(block, Attention):
sample = block(sample)
if isinstance(sample, tuple):
sample = sample[0]
else:
sample = block(sample, emb)
if isinstance(sample, tuple):
sample = sample[0]
# 5. up
for upsample_block in self.up_blocks:
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
if hasattr(upsample_block, "skip_conv"):
sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)
else:
sample = upsample_block(sample, res_samples, emb)
# 6. post-process
c_out = (timesteps*0.5) / torch.sqrt(timesteps**2 + 0.25)
sample = self.conv_out(sample) * c_out[:, None, None, None]
if skip_sample is not None:
c_skip = 0.25 / (0.25+timesteps**2)
sample += skip_sample * c_skip[:, None, None, None]
if return_loss_mlp:
loss_w = self.get_loss_module_weight(timesteps)
if not return_dict:
return (sample,), loss_w
return UNet2DOutput(sample=sample), loss_w
if not return_dict:
return (sample,)
return UNet2DOutput(sample=sample)
# Path: train.py
import torch
import torch.nn.functional as F
import hydra
import os
import shutil
import math
import numpy as np
import re
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from diffusers.utils import make_image_grid
from torchvision.transforms import Compose, ToTensor, Normalize, RandomHorizontalFlip
from omegaconf import DictConfig
from hydra.core.hydra_config import HydraConfig
from diffusers.optimization import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup
from diffusers import EMAModel
from pipeline import KarrasPipeline
from accelerate import Accelerator, DistributedDataParallelKwargs
from accelerate.utils import LoggerType
from tqdm import tqdm
from datasets import load_dataset
from model import UNet2DModel
model.load_state_dict(torch_dict['model'])
latest_checkpoint = torch_dict['latest_checkpoint']
start_step = torch_dict['step']
if config.use_ema:
parent_dir = os.path.dirname(config.resume)
parent_dir_children = os.listdir(parent_dir)
numbers = [int(re.match(r"ema_checkpoints_(\d+)", f).group(1)) for f in parent_dir_children if re.match(r"ema_checkpoints_(\d+)", f)]
num = max(numbers)
print(f"Using EMA checkpoint {num}")
ema.from_pretrained(os.path.join(parent_dir, f'ema_checkpoints_{num}'), model_cls=UNet2DModel)
if accelerator.is_main_process:
# Create output directory if needed, and asserted for not None in train
os.makedirs(config.output_dir, exist_ok=True)
hydra_dir = os.path.join(HydraConfig.get().runtime.output_dir, '.hydra')
print(f'copying from hydra dir {hydra_dir}')
f_name = 'config.yaml'
shutil.copy2(os.path.join(hydra_dir, f_name), os.path.join(config.output_dir, f_name))
f_name = 'hydra.yaml'
shutil.copy2(os.path.join(hydra_dir, f_name), os.path.join(config.output_dir, f_name))
f_name = 'overrides.yaml'
shutil.copy2(os.path.join(hydra_dir, f_name), os.path.join(config.output_dir, f_name))
accelerator.init_trackers(
config.model_name,
config={
'resume': config.resume if 'resume' in config else '',
'batch_size': config.train_batch_size,
'num_train_kimg': config.num_train_kimg,
'lr': config.learning_rate,
'dataset': config.data.dataset.path,
},
)
# Prepare everything
# There is no specific order to remember, you just need to unpack the
# objects in the same order you gave them to the prepare method.
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, lr_scheduler
)
if config.use_ema:
ema.to(accelerator.device)
P_mean, P_std, sigma_data = config.training.P_mean, config.training.P_std, config.training.sigma_data
total_steps = get_total_steps(config)
progress_bar = tqdm(total=total_steps-start_step, disable=not accelerator.is_local_main_process)
progress_bar.set_description("Train")
train_iter = iter(train_dataloader)
loss_type = config.loss_type if hasattr(config, 'loss_type') else 'mlp'
assert loss_type in ['mlp', 'scaled'], 'loss type not supported'
for step in range(start_step, total_steps):
batch = next(train_iter)
images = batch["images"]
label = None
if "label" in batch:
label = batch["label"]
noise = torch.randn_like(images)
# Add noise to the clean images according to the noise magnitude at each timestep
sigma = get_sigma(images.shape[0], P_mean, P_std, images.device)
noisy_images = add_noise(images, noise, sigma)
loss_w = get_sigma_weight(sigma, sigma_data)
with accelerator.accumulate(model):
# Predict the noise
pred, u_sigma = model(noisy_images, sigma[:, 0, 0, 0], class_labels=label, return_dict=False, return_loss_mlp=True)
loss = F.mse_loss(pred[0], images, reduction="none")
loss = loss.mean(dim=(1,2,3))
scaled_loss = loss_w[:, 0, 0, 0] * loss
u_sigma = u_sigma[:, 0]
scaled_loss_mlp = (scaled_loss / u_sigma.exp() + u_sigma)
if loss_type == 'scaled':
accelerator.backward(scaled_loss.mean())
elif loss_type == 'mlp':
accelerator.backward(scaled_loss_mlp.mean())
else:
raise NotImplementedError(f'loss_type {loss_type} not supported')
replace_grad_nans(model)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
if config.use_ema:
ema.step(model.parameters())
progress_bar.update(config.gradient_accumulation_steps)
loss = accelerator.gather(loss).detach()
scaled_loss = accelerator.gather(scaled_loss).detach()
scaled_loss_mlp = accelerator.gather(scaled_loss_mlp).detach()
logs = {
"loss": loss.mean().item(),
"scaled_loss": scaled_loss.mean().item(),
"scaled_loss_std": scaled_loss.std().item(),
"mlp_loss": scaled_loss_mlp.mean().item(),
"lr": lr_scheduler.get_last_lr()[0],
"step": step+1
}
p_logs = {
"loss": f'{logs["loss"]:7.5f}',
"scaled_loss": f'{logs["scaled_loss"]:6.4f}',
"mlp_loss": f'{logs["mlp_loss"]:7.4f}',
"lr": f'{logs["lr"]:.6f}',
"step": step+1
}
progress_bar.set_postfix(**p_logs)
accelerator.log(logs, step=step+1)
if accelerator.is_main_process:
save_image = (step + 1) % (config.save_image_steps*config.gradient_accumulation_steps) == 0 or step == total_steps - 1
save_model = (step + 1) % (config.save_model_steps*config.gradient_accumulation_steps) == 0 or step == total_steps - 1
if save_image or save_model:
if is_distributed:
pipeline = KarrasPipeline(unet=accelerator.unwrap_model(model.module), scheduler=noise_scheduler)
else:
pipeline = KarrasPipeline(unet=accelerator.unwrap_model(model), scheduler=noise_scheduler)
if save_image:
if config.use_ema:
ema.store(model.parameters())
| ema.copy_to(model.parameters()) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: youngskkim/CRN
# Path: callbacks/ema.py
class EMACallback(Callback):
def __init__(self, len_updates) -> None:
super().__init__()
self.len_updates = len_updates
def on_fit_start(self, trainer, pl_module):
# Todo (@[email protected]): delete manually specified device
from torch.nn.modules.batchnorm import SyncBatchNorm
bn_model_list = list()
bn_model_dist_group_list = list()
for model_ref in trainer.model.modules():
if isinstance(model_ref, SyncBatchNorm):
bn_model_list.append(model_ref)
bn_model_dist_group_list.append(model_ref.process_group)
model_ref.process_group = None
trainer.ema_model = ModelEMA(trainer.model.module.module.model.cuda(),
0.9990)
for bn_model, dist_group in zip(bn_model_list,
bn_model_dist_group_list):
bn_model.process_group = dist_group
trainer.ema_model.updates = self.len_updates
def on_train_batch_end(self,
trainer,
pl_module,
outputs,
batch,
batch_idx,
unused=0):
trainer.ema_model.update(trainer, trainer.model.module.module.model)
def on_train_epoch_end(self, trainer, pl_module) -> None:
state_dict = trainer.ema_model.ema.state_dict()
state_dict_keys = list(state_dict.keys())
# TODO: Change to more elegant way.
for state_dict_key in state_dict_keys:
new_key = 'model.' + state_dict_key
state_dict[new_key] = state_dict.pop(state_dict_key)
checkpoint = {
# the epoch and global step are saved for
# compatibility but they are not relevant for restoration
'epoch': trainer.current_epoch,
'global_step': trainer.global_step,
'state_dict': state_dict
}
torch.save(
checkpoint,
os.path.join(trainer.log_dir, f'{trainer.current_epoch}.pth'))
# Path: utils/torch_dist.py
def all_gather_object(obj):
world_size = get_world_size()
if world_size < 2:
return [obj]
output = [None for _ in range(world_size)]
dist.all_gather_object(output, obj)
return output
# Path: utils/torch_dist.py
def synchronize():
"""Helper function to synchronize (barrier)
among all processes when using distributed training"""
if not dist.is_available():
return
if not dist.is_initialized():
return
current_world_size = dist.get_world_size()
if current_world_size == 1:
return
dist.barrier()
# Path: exps/base_exp.py
class BEVDepthLightningModel(LightningModule):
MODEL_NAMES = sorted(name for name in models.__dict__
if name.islower() and not name.startswith('__')
and callable(models.__dict__[name]))
def __init__(self,
gpus: int = 1,
data_root='data/nuScenes',
eval_interval=1,
batch_size_per_device=8,
class_names=CLASSES,
backbone_img_conf=backbone_img_conf,
head_conf=head_conf,
ida_aug_conf=ida_aug_conf,
bda_aug_conf=bda_aug_conf,
rda_aug_conf=rda_aug_conf,
default_root_dir='./outputs/',
**kwargs):
super().__init__()
self.save_hyperparameters()
self.gpus = gpus
self.optimizer_config = optimizer_config
self.pretrain_config = pretrain_config
self.eval_interval = eval_interval
self.batch_size_per_device = batch_size_per_device
self.data_root = data_root
self.class_names = class_names
self.backbone_img_conf = backbone_img_conf
self.head_conf = head_conf
self.ida_aug_conf = ida_aug_conf
self.bda_aug_conf = bda_aug_conf
self.rda_aug_conf = rda_aug_conf
mmcv.mkdir_or_exist(default_root_dir)
self.default_root_dir = default_root_dir
self.evaluator = DetNuscEvaluator(class_names=self.class_names,
output_dir=self.default_root_dir)
self.model = BaseBEVDepth(self.backbone_img_conf,
self.head_conf)
self.mode = 'valid'
self.img_conf = img_conf
self.data_use_cbgs = False
self.load_interval = 1
self.num_sweeps = 1
self.sweep_idxes = list()
self.key_idxes = list()
self.data_return_depth = True
self.downsample_factor = self.backbone_img_conf['downsample_factor']
self.dbound = self.backbone_img_conf['d_bound']
self.depth_channels = int(
(self.dbound[1] - self.dbound[0]) / self.dbound[2])
self.use_fusion = False
self.train_info_paths = 'data/nuScenes/nuscenes_infos_train.pkl'
self.val_info_paths = 'data/nuScenes/nuscenes_infos_val.pkl'
self.predict_info_paths = 'data/nuScenes/nuscenes_infos_test.pkl'
self.return_image = True
self.return_depth = True
self.return_radar_pv = False
self.remove_z_axis = True
def forward(self, sweep_imgs, mats, is_train=False, **inputs):
return self.model(sweep_imgs, mats, is_train=is_train)
def training_step(self, batch):
if self.global_rank == 0:
for pg in self.trainer.optimizers[0].param_groups:
self.log('learning_rate', pg["lr"])
(sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch
if torch.cuda.is_available():
if self.return_image:
sweep_imgs = sweep_imgs.cuda()
for key, value in mats.items():
mats[key] = value.cuda()
if self.return_radar_pv:
pts_pv = pts_pv.cuda()
gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d]
gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d]
preds, depth_preds = self(sweep_imgs, mats,
pts_pv=pts_pv,
is_train=True)
targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d)
loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds)
if len(depth_labels.shape) == 5:
# only key-frame will calculate depth loss
depth_labels = depth_labels[:, 0, ...].contiguous()
loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds)
self.log('train/detection', loss_detection)
self.log('train/heatmap', loss_heatmap)
self.log('train/bbox', loss_bbox)
self.log('train/depth', loss_depth)
return loss_detection + loss_depth
def get_depth_loss(self, depth_labels, depth_preds, weight=3.):
depth_labels = self.get_downsampled_gt_depth(depth_labels)
depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view(
-1, self.depth_channels)
fg_mask = torch.max(depth_labels, dim=1).values > 0.0
with autocast(enabled=False):
loss_depth = (F.binary_cross_entropy(
depth_preds[fg_mask],
depth_labels[fg_mask],
reduction='none',
).sum() / max(1.0, fg_mask.sum()))
return weight * loss_depth
def get_downsampled_gt_depth(self, gt_depths):
"""
Input:
gt_depths: [B, N, H, W]
Output:
gt_depths: [B*N*h*w, d]
"""
B, N, H, W = gt_depths.shape
gt_depths = gt_depths.view(
B * N,
H // self.downsample_factor,
self.downsample_factor,
W // self.downsample_factor,
self.downsample_factor,
1,
)
gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous()
gt_depths = gt_depths.view(
-1, self.downsample_factor * self.downsample_factor)
gt_depths_tmp = torch.where(gt_depths == 0.0,
1e5 * torch.ones_like(gt_depths),
gt_depths)
gt_depths = torch.min(gt_depths_tmp, dim=-1).values
gt_depths = gt_depths.view(B * N, H // self.downsample_factor,
W // self.downsample_factor)
gt_depths = (gt_depths -
(self.dbound[0] - self.dbound[2])) / self.dbound[2]
gt_depths = torch.where(
(gt_depths < self.depth_channels + 1) & (gt_depths > 0.),
gt_depths, torch.zeros_like(gt_depths))
gt_depths = F.one_hot(gt_depths.long(),
num_classes=self.depth_channels + 1).view(
-1, self.depth_channels + 1)[:, 1:]
return gt_depths.float()
def eval_step(self, batch, batch_idx, prefix: str):
(sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch
if torch.cuda.is_available():
if self.return_image:
sweep_imgs = sweep_imgs.cuda()
for key, value in mats.items():
mats[key] = value.cuda()
if self.return_radar_pv:
pts_pv = pts_pv.cuda()
preds = self(sweep_imgs, mats,
pts_pv=pts_pv,
is_train=False)
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
results = self.model.module.get_bboxes(preds, img_metas)
else:
results = self.model.get_bboxes(preds, img_metas)
for i in range(len(results)):
results[i][0] = results[i][0].tensor.detach().cpu().numpy()
results[i][1] = results[i][1].detach().cpu().numpy()
results[i][2] = results[i][2].detach().cpu().numpy()
results[i].append(img_metas[i])
return results
def validation_epoch_end(self, validation_step_outputs):
detection_losses = list()
heatmap_losses = list()
bbox_losses = list()
depth_losses = list()
for validation_step_output in validation_step_outputs:
detection_losses.append(validation_step_output[0])
heatmap_losses.append(validation_step_output[1])
bbox_losses.append(validation_step_output[2])
depth_losses.append(validation_step_output[3])
synchronize()
self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True)
self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True)
self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True)
self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True)
def validation_step(self, batch, batch_idx):
(sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch
if torch.cuda.is_available():
if self.return_image:
sweep_imgs = sweep_imgs.cuda()
for key, value in mats.items():
mats[key] = value.cuda()
if self.return_radar_pv:
pts_pv = pts_pv.cuda()
gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d]
gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d]
with torch.no_grad():
preds, depth_preds = self(sweep_imgs, mats,
pts_pv=pts_pv,
is_train=True)
targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d)
loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds)
if len(depth_labels.shape) == 5:
# only key-frame will calculate depth loss
depth_labels = depth_labels[:, 0, ...].contiguous()
loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.)
return loss_detection, loss_heatmap, loss_bbox, loss_depth
def test_epoch_end(self, test_step_outputs):
all_pred_results = list()
all_img_metas = list()
for test_step_output in test_step_outputs:
for i in range(len(test_step_output)):
all_pred_results.append(test_step_output[i][:3])
all_img_metas.append(test_step_output[i][3])
synchronize()
# TODO: Change another way.
dataset_length = len(self.val_dataloader().dataset)
all_pred_results = sum(
map(list, zip(*all_gather_object(all_pred_results))),
[])[:dataset_length]
all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),
[])[:dataset_length]
if self.global_rank == 0:
self.evaluator.evaluate(all_pred_results, all_img_metas)
def configure_optimizers(self):
optimizer = build_optimizer(self.model, self.optimizer_config)
scheduler = MultiStepLR(optimizer, [19, 23])
return [[optimizer], [scheduler]]
def train_dataloader(self):
train_dataset = NuscDatasetRadarDet(
ida_aug_conf=self.ida_aug_conf,
bda_aug_conf=self.bda_aug_conf,
rda_aug_conf=self.rda_aug_conf,
img_backbone_conf=self.backbone_img_conf,
classes=self.class_names,
data_root=self.data_root,
info_paths=self.train_info_paths,
is_train=True,
use_cbgs=self.data_use_cbgs,
img_conf=self.img_conf,
load_interval=self.load_interval,
num_sweeps=self.num_sweeps,
sweep_idxes=self.sweep_idxes,
key_idxes=self.key_idxes,
return_image=self.return_image,
return_depth=self.return_depth,
return_radar_pv=self.return_radar_pv,
remove_z_axis=self.remove_z_axis,
depth_path='depth_gt',
radar_pv_path='radar_pv_filter'
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=self.batch_size_per_device,
num_workers=4,
drop_last=True,
shuffle=False,
collate_fn=partial(collate_fn,
is_return_image=self.return_image,
is_return_depth=self.return_depth,
is_return_radar_pv=self.return_radar_pv),
sampler=None,
)
return train_loader
def val_dataloader(self):
val_dataset = NuscDatasetRadarDet(
ida_aug_conf=self.ida_aug_conf,
bda_aug_conf=self.bda_aug_conf,
rda_aug_conf=self.rda_aug_conf,
img_backbone_conf=self.backbone_img_conf,
classes=self.class_names,
data_root=self.data_root,
info_paths=self.val_info_paths,
is_train=False,
img_conf=self.img_conf,
load_interval=self.load_interval,
num_sweeps=self.num_sweeps,
sweep_idxes=self.sweep_idxes,
key_idxes=self.key_idxes,
return_image=self.return_image,
return_depth=self.return_depth,
return_radar_pv=self.return_radar_pv,
remove_z_axis=self.remove_z_axis,
radar_pv_path='radar_pv_filter',
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=self.batch_size_per_device,
num_workers=4,
shuffle=False,
collate_fn=partial(collate_fn,
is_return_image=self.return_image,
is_return_depth=self.return_depth,
is_return_radar_pv=self.return_radar_pv),
sampler=None,
)
return val_loader
def test_dataloader(self):
return self.val_dataloader()
def predict_dataloader(self):
predict_dataset = NuscDatasetRadarDet(
ida_aug_conf=self.ida_aug_conf,
bda_aug_conf=self.bda_aug_conf,
rda_aug_conf=self.rda_aug_conf,
img_backbone_conf=self.backbone_img_conf,
classes=self.class_names,
data_root=self.data_root,
info_paths=self.val_info_paths,
is_train=False,
img_conf=self.img_conf,
load_interval=self.load_interval,
num_sweeps=self.num_sweeps,
sweep_idxes=self.sweep_idxes,
key_idxes=self.key_idxes,
return_image=self.return_image,
return_depth=self.return_depth,
return_radar_pv=self.return_radar_pv,
remove_z_axis=self.remove_z_axis,
radar_pv_path='radar_pv_filter',
)
predict_loader = torch.utils.data.DataLoader(
predict_dataset,
batch_size=self.batch_size_per_device,
num_workers=4,
shuffle=False,
collate_fn=partial(collate_fn,
is_return_image=self.return_image,
is_return_depth=self.return_depth,
is_return_radar_pv=self.return_radar_pv),
sampler=None,
)
return predict_loader
def test_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, 'test')
def predict_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, 'predict')
@staticmethod
def add_model_specific_args(parent_parser): # pragma: no-cover
return parent_parser
# Path: exps/base_cli.py
import os
import pytorch_lightning as pl
from argparse import ArgumentParser
from pytorch_lightning.callbacks.model_summary import ModelSummary
from callbacks.ema import EMACallback
from utils.torch_dist import all_gather_object, synchronize
from .base_exp import BEVDepthLightningModel
# Copyright (c) Megvii Inc. All rights reserved.
def run_cli(model_class=BEVDepthLightningModel,
exp_name='base_exp',
use_ema=False,
ckpt_path=None):
parent_parser = ArgumentParser(add_help=False)
parent_parser = pl.Trainer.add_argparse_args(parent_parser)
parent_parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parent_parser.add_argument('-p',
'--predict',
dest='predict',
action='store_true',
help='predict model on testing set')
parent_parser.add_argument('-b', '--batch_size_per_device', type=int)
parent_parser.add_argument('--seed',
type=int,
default=0,
help='seed for initializing training.')
parent_parser.add_argument('--ckpt_path', type=str)
parser = BEVDepthLightningModel.add_model_specific_args(parent_parser)
parser.set_defaults(profiler='simple',
deterministic=False,
max_epochs=24,
strategy='ddp',
# strategy='ddp_find_unused_parameters_false',
num_sanity_val_steps=0,
check_val_every_n_epoch=1,
gradient_clip_val=5,
limit_val_batches=0.25,
log_every_n_steps=50,
enable_checkpointing=True,
precision=16,
default_root_dir=os.path.join('./outputs/', exp_name))
args = parser.parse_args()
| if args.seed is not None: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KAIST-VICLab/From_Ground_To_Objects
# Path: networks/layers.py
class BackprojectDepth(nn.Module):
"""Layer to transform a depth image into a point cloud
"""
def __init__(self, batch_size, height, width):
super(BackprojectDepth, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy')
self.id_coords = np.stack(meshgrid, axis=0).astype(np.float32)
self.id_coords = nn.Parameter(torch.from_numpy(self.id_coords),
requires_grad=False)
self.ones = nn.Parameter(torch.ones(self.batch_size, 1, self.height * self.width),
requires_grad=False)
self.pix_coords = torch.unsqueeze(torch.stack(
[self.id_coords[0].view(-1), self.id_coords[1].view(-1)], 0), 0)
self.pix_coords = self.pix_coords.repeat(batch_size, 1, 1)
self.pix_coords = nn.Parameter(torch.cat([self.pix_coords, self.ones], 1),
requires_grad=False)
def forward(self, depth, inv_K):
b = depth.size(0)
cam_points = torch.matmul(inv_K[:, :3, :3], self.pix_coords[:b])
cam_points = depth.view(b, 1, -1) * cam_points
cam_points = torch.cat([cam_points, self.ones[:b]], 1)
return cam_points
# Path: networks/layers.py
class Project3D(nn.Module):
"""Layer which projects 3D points into a camera with intrinsics K and at position T
"""
def __init__(self, batch_size, height, width, eps=1e-7):
super(Project3D, self).__init__()
self.batch_size = batch_size
self.height = height
self.width = width
self.eps = eps
def forward(self, points, K, T):
b = points.size(0)
P = torch.matmul(K, T)[:, :3, :]
cam_points = torch.matmul(P[:b], points)
pix_coords = cam_points[:, :2, :] / (cam_points[:, 2, :].unsqueeze(1) + self.eps)
pix_coords = pix_coords.view(b, 2, self.height, self.width)
pix_coords = pix_coords.permute(0, 2, 3, 1)
pix_coords[..., 0] /= self.width - 1
pix_coords[..., 1] /= self.height - 1
pix_coords = (pix_coords - 0.5) * 2
return pix_coords
# Path: networks/resnet_encoder.py
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import torch.utils.model_zoo as model_zoo
from .layers import BackprojectDepth, Project3D
from .transformer import feature_add_position, FeatureTransformer
from .asp_oc_block import ASP_OC_Module
encoder = resnets[num_layers](pretrained)
self.con_layer0 = nn.Sequential(encoder.conv1, encoder.bn1, encoder.relu)
self.con_layer1 = nn.Sequential(encoder.maxpool, encoder.layer1)
if num_layers > 34:
self.num_ch_enc[1:] *= 4
self.backprojector = BackprojectDepth(batch_size=self.num_depth_bins,
height=self.matching_height,
width=self.matching_width)
self.projector = Project3D(batch_size=self.num_depth_bins,
height=self.matching_height,
width=self.matching_width)
self.compute_depth_bins(min_depth_bin, max_depth_bin)
self.prematching_conv = nn.Sequential(nn.Conv2d(64, out_channels=16,
kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True)
)
self.reduce_conv = nn.Sequential(nn.Conv2d(self.num_ch_enc[1] + self.num_depth_bins,
out_channels=self.num_ch_enc[1],
kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True)
)
def compute_depth_bins(self, min_depth_bin, max_depth_bin):
"""Compute the depths bins used to build the cost volume. Bins will depend upon
self.depth_binning, to either be linear in depth (linear) or linear in inverse depth
(inverse)"""
if self.depth_binning == 'inverse':
self.depth_bins = 1 / np.linspace(1 / max_depth_bin,
1 / min_depth_bin,
self.num_depth_bins)[::-1] # maintain depth order
elif self.depth_binning == 'linear':
self.depth_bins = np.linspace(min_depth_bin, max_depth_bin, self.num_depth_bins)
else:
raise NotImplementedError
self.depth_bins = torch.from_numpy(self.depth_bins).float()
self.warp_depths = []
for depth in self.depth_bins:
depth = torch.ones((1, self.matching_height, self.matching_width)) * depth
self.warp_depths.append(depth)
self.warp_depths = torch.stack(self.warp_depths, 0).float()
if self.is_cuda:
self.warp_depths = self.warp_depths.cuda()
def match_features(self, current_feats, lookup_feats, relative_poses, K, invK):
"""Compute a cost volume based on L1 difference between current_feats and lookup_feats.
We backwards warp the lookup_feats into the current frame using the estimated relative
pose, known intrinsics and using hypothesised depths self.warp_depths (which are either
linear in depth or linear in inverse depth).
If relative_pose == 0 then this indicates that the lookup frame is missing (i.e. we are
at the start of a sequence), and so we skip it"""
batch_cost_volume = [] # store all cost volumes of the batch
cost_volume_masks = [] # store locations of '0's in cost volume for confidence
for batch_idx in range(len(current_feats)):
volume_shape = (self.num_depth_bins, self.matching_height, self.matching_width)
cost_volume = torch.zeros(volume_shape, dtype=torch.float, device=current_feats.device)
counts = torch.zeros(volume_shape, dtype=torch.float, device=current_feats.device)
# select an item from batch of ref feats
_lookup_feats = lookup_feats[batch_idx:batch_idx + 1]
_lookup_poses = relative_poses[batch_idx:batch_idx + 1]
_K = K[batch_idx:batch_idx + 1]
_invK = invK[batch_idx:batch_idx + 1]
world_points = self.backprojector(self.warp_depths, _invK)
# loop through ref images adding to the current cost volume
for lookup_idx in range(_lookup_feats.shape[1]):
lookup_feat = _lookup_feats[:, lookup_idx] # 1 x C x H x W
lookup_pose = _lookup_poses[:, lookup_idx]
# ignore missing images
if lookup_pose.sum() == 0:
continue
lookup_feat = lookup_feat.repeat([self.num_depth_bins, 1, 1, 1])
pix_locs = self.projector(world_points, _K, lookup_pose)
warped = F.grid_sample(lookup_feat, pix_locs, padding_mode='zeros', mode='bilinear',
align_corners=True)
# mask values landing outside the image (and near the border)
# we want to ignore edge pixels of the lookup images and the current image
# because of zero padding in ResNet
# Masking of ref image border
x_vals = (pix_locs[..., 0].detach() / 2 + 0.5) * (
self.matching_width - 1) # convert from (-1, 1) to pixel values
y_vals = (pix_locs[..., 1].detach() / 2 + 0.5) * (self.matching_height - 1)
edge_mask = (x_vals >= 2.0) * (x_vals <= self.matching_width - 2) * \
(y_vals >= 2.0) * (y_vals <= self.matching_height - 2)
edge_mask = edge_mask.float()
# masking of current image
current_mask = torch.zeros_like(edge_mask)
current_mask[:, 2:-2, 2:-2] = 1.0
edge_mask = edge_mask * current_mask
diffs = torch.abs(warped - current_feats[batch_idx:batch_idx + 1]).mean(
1) * edge_mask
# integrate into cost volume
cost_volume = cost_volume + diffs
counts = counts + (diffs > 0).float()
# average over lookup images
cost_volume = cost_volume / (counts + 1e-7)
# if some missing values for a pixel location (i.e. some depths landed outside) then
# set to max of existing values
| missing_val_mask = (cost_volume == 0).float() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jinxixiang/magic_animate_unofficial
# Path: animatediff/magic_animate/unet_3d_blocks.py
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
output_states = ()
for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)[0]
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: animatediff/magic_animate/unet_3d_blocks.py
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
encoder_hidden_states=None,
upsample_size=None,
attention_mask=None,
):
for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)[0]
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: animatediff/magic_animate/unet_3d_blocks.py
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
motion_modules = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None):
output_states = ()
for resnet, motion_module in zip(self.resnets, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: animatediff/magic_animate/unet_3d_blocks.py
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=in_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
hidden_states = resnet(hidden_states, temb)
return hidden_states
# Path: animatediff/magic_animate/unet_3d_blocks.py
class UpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_upsample=True,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
motion_modules = []
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample3D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, encoder_hidden_states=None,):
for resnet, motion_module in zip(self.resnets, self.motion_modules):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.training and self.gradient_checkpointing:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: animatediff/magic_animate/unet_3d_blocks.py
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
# Path: animatediff/magic_animate/unet_3d_blocks.py
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
# Path: animatediff/magic_animate/resnet.py
class InflatedConv3d(nn.Conv2d):
def forward(self, x):
video_length = x.shape[2]
x = rearrange(x, "b c f h w -> (b f) c h w")
x = super().forward(x)
x = rearrange(x, "(b f) c h w -> b c f h w", f=video_length)
return x
# Path: animatediff/magic_animate/unet.py
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from diffusers.utils import WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint
# *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Adapted from https://github.com/guoyww/AnimateDiff
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
| flip_sin_to_cos: bool = True, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Chat-3D/Chat-3D-v2
# Path: models/modeling_llama.py
class LlamaForCausalLM(LlamaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model = LlamaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
query_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
query_embeds=query_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.lm_head.weight.dtype == torch.float32:
hidden_states = hidden_states.float()
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
query_embeds = None
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"query_embeds": query_embeds,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
# Path: models/transformer_vanilla/transformer_block.py
class TransformerEncoder(nn.Module):
def __init__(self, dim, num_layers=1, heads=32, dim_head=None, dropout=0.1):
super().__init__()
self.block_list = [BasicTransformerBlock(dim, heads, dim_head, dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(self.block_list)
self.output_norm = nn.LayerNorm(dim)
self.apply(self._init_weights)
def forward(self, x, mask=None, dist_attn=None):
for layer in self.layers:
x = layer(x, mask=mask, dist_attn=dist_attn)
# x = self.output_norm(x)
return x
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
# Path: models/transformer_vanilla/transformer_block.py
class CMT(nn.Module):
def __init__(self, hidden_size, num_layers=1):
super().__init__()
decoder_layer = TransformerSpatialDecoderLayer(
d_model=hidden_size, nhead=8, dim_head=64,
dim_feedforward=4096, dropout=0.1
)
self.layers = _get_clones(decoder_layer, num_layers)
loc_layer = nn.Sequential(
nn.Linear(6, hidden_size),
nn.ReLU(),
nn.LayerNorm(hidden_size)
)
self.loc_layers = _get_clones(loc_layer, 1)
self.apply(self._init_weights)
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.01)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def calc_pairwise_locs(self, obj_centers, eps=1e-10, pairwise_rel_type='center'):
pairwise_locs = einops.repeat(obj_centers, 'b l d -> b l 1 d') \
- einops.repeat(obj_centers, 'b l d -> b 1 l d')
pairwise_dists = torch.sqrt(torch.sum(pairwise_locs ** 2, 3) + eps) # (b, l, l)
# max_dists = torch.max(pairwise_dists.view(pairwise_dists.size(0), -1), dim=1)[0]
norm_pairwise_dists = pairwise_dists #/ einops.repeat(max_dists, 'b -> b 1 1')
pairwise_dists_2d = torch.sqrt(torch.sum(pairwise_locs[..., :2] ** 2, 3) + eps)
pairwise_locs = torch.stack(
[norm_pairwise_dists, pairwise_locs[..., 2] / pairwise_dists,
pairwise_dists_2d / pairwise_dists, pairwise_locs[..., 1] / pairwise_dists_2d,
pairwise_locs[..., 0] / pairwise_dists_2d],
dim=3
)
return pairwise_locs
def forward(
self, obj_embeds, obj_locs, obj_masks
):
pairwise_locs = self.calc_pairwise_locs(
obj_locs[:, :, :3]
)
out_embeds = obj_embeds
for i, layer in enumerate(self.layers):
# query_pos = self.loc_layers[0](obj_locs)
# out_embeds = out_embeds + query_pos
out_embeds = layer(
out_embeds, pairwise_locs,
tgt_key_padding_mask=obj_masks.logical_not(),
)
return out_embeds
# Path: models/helpers.py
class GenericMLP(nn.Module):
def __init__(
self,
input_dim,
hidden_dims,
output_dim,
norm_fn_name=None,
activation="silu",
use_conv=False,
dropout=None,
hidden_use_bias=False,
output_use_bias=True,
output_use_activation=False,
output_use_norm=False,
weight_init_name=None,
weight_init_std=0.02
):
super().__init__()
activation = ACTIVATION_DICT[activation]
norm = None
if norm_fn_name is not None:
norm = NORM_DICT[norm_fn_name]
if norm_fn_name == "ln" and use_conv:
norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm
if dropout is not None:
if not isinstance(dropout, list):
dropout = [dropout for _ in range(len(hidden_dims))]
layers = []
prev_dim = input_dim
for idx, x in enumerate(hidden_dims):
if use_conv:
layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias)
else:
layer = nn.Linear(prev_dim, x, bias=hidden_use_bias)
layers.append(layer)
if norm:
layers.append(norm(x))
layers.append(activation())
if dropout is not None:
layers.append(nn.Dropout(p=dropout[idx]))
prev_dim = x
if use_conv:
layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias)
else:
layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias)
layers.append(layer)
if output_use_norm:
layers.append(norm(output_dim))
if output_use_activation:
layers.append(activation())
self.layers = nn.Sequential(*layers)
# self.weight_init_std = weight_init_std
# self.apply(self._init_weights)
def _init_weights(self, module):
std = self.weight_init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
# if weight_init_name is not None:
# self.do_weight_init(weight_init_name)
#
# def do_weight_init(self, weight_init_name):
# func = WEIGHT_INIT_DICT[weight_init_name]
# for (_, param) in self.named_parameters():
# if param.dim() > 1: # skips batchnorm/layernorm
# func(param)
def forward(self, x):
output = self.layers(x)
return output
# Path: models/position_embedding.py
class PositionEmbeddingCoordsSine(nn.Module):
def __init__(
self,
temperature=10000,
normalize=False,
scale=None,
pos_type="fourier",
d_pos=None,
d_in=3,
gauss_scale=1.0,
):
super().__init__()
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
assert pos_type in ["sine", "fourier"]
self.pos_type = pos_type
self.scale = scale
if pos_type == "fourier":
assert d_pos is not None
assert d_pos % 2 == 0
# define a gaussian matrix input_ch -> output_ch
B = torch.empty((d_in, d_pos // 2)).normal_()
B *= gauss_scale
self.register_buffer("gauss_B", B)
self.d_pos = d_pos
def get_sine_embeddings(self, xyz, num_channels, input_range):
# clone coords so that shift/scale operations do not affect original tensor
orig_xyz = xyz
xyz = orig_xyz.clone()
ncoords = xyz.shape[1]
# if self.normalize:
# xyz = shift_scale_points(xyz, src_range=input_range)
ndim = num_channels // xyz.shape[2]
if ndim % 2 != 0:
ndim -= 1
# automatically handle remainder by assiging it to the first dim
rems = num_channels - (ndim * xyz.shape[2])
assert (
ndim % 2 == 0
), f"Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}"
final_embeds = []
prev_dim = 0
for d in range(xyz.shape[2]):
cdim = ndim
if rems > 0:
# add remainder in increments of two to maintain even size
cdim += 2
rems -= 2
if cdim != prev_dim:
dim_t = torch.arange(cdim, dtype=torch.float32, device=xyz.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / cdim)
# create batch x cdim x nccords embedding
raw_pos = xyz[:, :, d]
if self.scale:
raw_pos *= self.scale
pos = raw_pos[:, :, None] / dim_t
pos = torch.stack(
(pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3
).flatten(2)
final_embeds.append(pos)
prev_dim = cdim
final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)
return final_embeds
def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None):
# Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html
if num_channels is None:
num_channels = self.gauss_B.shape[1] * 2
bsize, npoints = xyz.shape[0], xyz.shape[1]
assert num_channels > 0 and num_channels % 2 == 0
d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1]
d_out = num_channels // 2
assert d_out <= max_d_out
assert d_in == xyz.shape[-1]
# clone coords so that shift/scale operations do not affect original tensor
orig_xyz = xyz
xyz = orig_xyz.clone()
ncoords = xyz.shape[1]
# if self.normalize:
# xyz = shift_scale_points(xyz, src_range=input_range)
xyz *= 2 * np.pi
xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view(
bsize, npoints, d_out
)
final_embeds = [xyz_proj.sin(), xyz_proj.cos()]
# return batch x d_pos x npoints embedding
final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)
return final_embeds
def forward(self, xyz, num_channels=None, input_range=None):
assert isinstance(xyz, torch.Tensor)
assert xyz.ndim == 3
# xyz is batch x npoints x 3
if self.pos_type == "sine":
with torch.no_grad():
return self.get_sine_embeddings(xyz, num_channels, input_range)
elif self.pos_type == "fourier":
with torch.no_grad():
return self.get_fourier_embeddings(xyz, num_channels, input_range)
else:
raise ValueError(f"Unknown {self.pos_type}")
def extra_repr(self):
st = f"type={self.pos_type}, scale={self.scale}, normalize={self.normalize}"
if hasattr(self, "gauss_B"):
st += (
f", gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}"
)
return st
# Path: models/position_embedding.py
class PositionalEmbedding(nn.Module):
def __init__(self, sigma=1, dim=4096):
super().__init__()
self.sigma = sigma
self.dim = dim // 2
self.w = torch.randn((self.dim, 3)) * sigma
self.w = nn.Parameter(self.w, requires_grad=True)
def forward(self, x):
bs, obj_num, _ = x.shape
x = x.reshape(-1, 3)
v = torch.cat([torch.sin(self.w.detach() @ x.T), torch.cos(self.w.detach() @ x.T)])
v = v.T.reshape(bs, obj_num, -1)
v_norm = v / v.norm(dim=-1).unsqueeze(-1)
return v_norm
# Path: models/chat3d.py
import random
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import contextlib
from abc import ABC
from torch.cuda.amp import autocast as autocast
from .modeling_llama import LlamaForCausalLM
from transformers import LlamaTokenizer, LlamaConfig
from models.transformer_vanilla import TransformerEncoder, CMT
from models.helpers import GenericMLP
from models.position_embedding import PositionEmbeddingCoordsSine, PositionalEmbedding
from transformers import StoppingCriteria, StoppingCriteriaList
logger = logging.getLogger(__name__)
class StoppingCriteriaSub(StoppingCriteria):
def __init__(self, stops=[], encounters=1):
super().__init__()
self.stops = stops
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor):
| for stop in self.stops: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SqueezeBits/owlite
# Path: owlite/calib/percentile_calibrator.py
class PercentileCalibrator(_HistogramCalibrator):
"""Percentile Calibrator Class
Attributes:
quantizer (FakeQuantizer): The `FakeQuantizer` module to be calibrated.
percentile (float): The desired percentile value, ranging from 0 to 100.
"""
def __init__(self, quantizer, percentile: float):
"""Initializes the percentile calibrator.
Args:
quantizer (FakeQuantizer): The `FakeQuantizer` module to be calibrated.
percentile(float): The desired percentile value, ranging from 0 to 100.
Raises:
ValueError: If the percentile is outside the valid range [0, 100].
"""
super().__init__(quantizer)
if percentile < 0 or percentile > 100:
raise ValueError("percentile must be in range [0,100]")
self.percentile = percentile
def update(self):
# update step_size using "percentile"
# cumsum_cuda_kernel does not have a deterministic implementation
_deterministic_enable = torch.are_deterministic_algorithms_enabled()
if _deterministic_enable:
torch.use_deterministic_algorithms(False)
for chn, _ in enumerate(self.quantizer.histc_bins):
total = self.quantizer.histogram[chn].data.sum()
cdf = torch.cumsum(self.quantizer.histogram[chn].data / total, 0)
idx = torch.searchsorted(cdf, self.percentile / 100)
per_max = self.quantizer.bin_edges[chn].data[idx]
self.quantizer.step_size.data[chn] = (
(per_max / self.quantizer.maxabs_bound)
.detach()
.to(self.quantizer.step_size.device)
)
# allocate to original state
if _deterministic_enable:
torch.use_deterministic_algorithms(True)
# delete "histogram" attritbution from quantizer
for key in self.set_attr_list:
delattr(self.quantizer, key)
# remove registered forward_hook.item())
self.hook_handler.remove()
# Path: owlite/enums/ptq_calibration_type.py
class PTQCalibrationType(Enum):
"""The enum for specifying available Calibrator classes"""
absmax = 0
percentile = 1
mse = 2
minmax = 3
@property
def calibrator_class(self) -> type:
"""The Calibrator class corresponding to this enum value"""
return {
"absmax": AbsmaxCalibrator,
"percentile": PercentileCalibrator,
"mse": MSECalibrator,
"minmax": MinmaxCalibrator,
}[self.name]
def __repr__(self) -> str:
return self.name
def __str__(self) -> str:
return self.name
# Path: owlite/enums/qat_backward_type.py
class QATBackwardType(Enum):
"""The enum for specifying available QAT backward functions"""
ste = 0
clq = 1
clq_plus = 2
@property
def function(self) -> Callable:
"""The apply method of the `torch.autograd.Function` class corresponding to this enum value"""
return {
"clq": clq_function,
"clq_plus": clq_plus_function,
"ste": ste_function,
}[self.name]
def __repr__(self) -> str:
return self.name
def __str__(self) -> str:
return self.name
# Path: owlite/logger.py
class Logger(logging.Logger):
class _WarningFilterContext:
class WarningFilter(logging.Filter):
ENV_VAR = "OWLITE_LOG_LEVEL"
DEBUG_WARNING = 15
ULTRA_VERBOSE = -10
def ignore_warnings(self):
def __init__(self, logger) -> None:
def __enter__(self):
def filter(self, record):
def __exit__(self, exc_type, exc_val, exc_tb):
def debug_warning(self, msg, *args, **kwargs):
def level(self) -> int:
def level(self, value):
def suppress_owlite_warnings(cls):
def new_init(self, *args, **kwargs):
# Path: owlite/nn/functions/fake_quantize.py
def fake_quantize(
inputs: Union[Tensor, float],
step_size: torch.Tensor,
zero_point: torch.Tensor,
quant_min: Union[IntTensor, int],
quant_max: Union[IntTensor, int],
per_channel: Union[torch.BoolTensor, bool],
axis: int = 0,
) -> torch.Tensor:
# Path: owlite/options/fake_quantizer_options.py
class FakeQuantizerOptions(OptionsMixin):
"""Options required for setting up a quantizer"""
qat_backward: QATBackwardType = QATBackwardType.ste
ptq_calibration: PTQCalibrationType = PTQCalibrationType.absmax
precision: int = 8
per_channel: bool = False
symmetric: bool = True
learn_zero_point: bool = False
unsigned: bool = False
grad_scale: float = 1.000
percentile: Optional[float] = None
@staticmethod
def ste_per_channel(**kwargs: Any) -> "FakeQuantizerOptions":
"""A convenience wrapper for creating options with "ste" backward and per channel quantization"""
return FakeQuantizerOptions(qat_backward=QATBackwardType.ste, per_channel=True, **kwargs)
@staticmethod
def ste_per_tensor(**kwargs: Any) -> "FakeQuantizerOptions":
"""A convenience wrapper for creating options with "ste" backward and per tensor quantization"""
return FakeQuantizerOptions(qat_backward=QATBackwardType.ste, per_channel=False, **kwargs)
@staticmethod
def clq_per_channel(**kwargs: Any) -> "FakeQuantizerOptions":
"""A convenience wrapper for creating options with "clq" backward and per channel quantization"""
return FakeQuantizerOptions(qat_backward=QATBackwardType.clq, per_channel=True, **kwargs)
@staticmethod
def clq_per_tensor(**kwargs: Any) -> "FakeQuantizerOptions":
"""A convenience wrapper for creating options with "clq" backward and per tensor quantization"""
return FakeQuantizerOptions(qat_backward=QATBackwardType.clq, per_channel=False, **kwargs)
def check_precision(self, precision: int) -> bool:
"""precision must be one of 4, 8 or 16"""
return precision in (4, 8, 16)
def check_percentile(self, percentile: Optional[float]) -> bool:
"""
if `ptq_calibration="percentile"`, `percentile` value must be provided
and it must be between 0 and 100 (exclusive). Otherwise, its value is ignored.
"""
if self.ptq_calibration == PTQCalibrationType.percentile:
return percentile is not None and 0 < percentile < 100
if percentile is not None:
log.warning(
'`percentile` is used only when `ptq_calibration="percentile"`.'
f"The given percentile value {percentile} will be ignored",
stacklevel=2,
)
return True
def check_grad_scale(self, grad_scale: float) -> bool:
"""grad_scale value must be between 0 and 1 (inclusive)"""
return 0 <= grad_scale <= 1
def check_learn_zero_point(self, learn_zero_point: bool) -> bool:
"""`learn_zero_point` must be False if `symmetric=True`"""
return not (self.symmetric and learn_zero_point)
def check_per_channel(self, per_channel: bool) -> bool:
"""`per_channel=True` is not compatible with `symmetric=False`"""
return self.symmetric or not per_channel
def check_symmetric(self, symmetric: bool) -> bool:
"""
* `learn_zero_point` must be False if `symmetric=True`
* `ptq_calibration="absmax"` is not compatible with `symmetric=False`
* `symmetric=False` is not compatible with `per_channel=True`
"""
if not symmetric and self.per_channel:
log.warning(
"asymmetric per channel quantization is not supported.",
stacklevel=2,
)
return False
if symmetric and self.learn_zero_point:
log.warning(
"`learn_zero_point` will be automatically set to False as `symmetric` is being set to True",
stacklevel=2,
)
self.learn_zero_point = False
if not symmetric and self.ptq_calibration == PTQCalibrationType.absmax:
log.warning(
"`ptq_calibration` will be automatically set to `minmax` as `symmetric` is being set to False",
stacklevel=2,
)
self.ptq_calibration = PTQCalibrationType.minmax
return True
def check_ptq_calibration(self, ptq_calibration: PTQCalibrationType) -> bool:
"""
* if `symmetric=False`, `ptq_calibration` must not be 'absmax'
* if `ptq_calibration="percentile"` and `percentile` is None, it will be automatically set to 99.99
"""
if not self.symmetric and ptq_calibration == PTQCalibrationType.absmax:
return False
if ptq_calibration == PTQCalibrationType.percentile and self.percentile is None:
log.warning(
'`ptq_calibration="percentile"` requires a `percentile` value.'
"Will set `percentile` to 99.99 automatically.",
stacklevel=2,
)
with log.ignore_warnings():
self.percentile = 99.99
if ptq_calibration != PTQCalibrationType.percentile and self.percentile is not None:
log.warning(
'`percentile` is used only when `ptq_calibration="percentile"`.'
f"The percentile value {self.percentile} will be ignored.",
stacklevel=2,
)
return True
# Path: owlite/nn/fake_quantizer.py
from collections import OrderedDict
from typing import Any, Optional, Union
from ..calib import PercentileCalibrator
from ..enums import PTQCalibrationType, QATBackwardType
from ..logger import log
from ..nn.functions.fake_quantize import FakeQuantFunc
from ..options.fake_quantizer_options import FakeQuantizerOptions
import torch
"""An implementation of fake quantization (a.k.a. quantization simulation)
Attributes:
step_size (torch.Tensor): The quantization scale, determining the magnitude of each quantization interval.
zero_point (torch.Tensor): The quantization zero_point. It may be expressed as a float in the context
of asymmetric quantization, while for symmetric quantization, it is fixed at zero tensor.
precision (torch.IntTensor): The number of bits used for quantization.
symmetric (torch.BoolTensor): Whether symmetric quantization is applied.
unsigned (torch.BoolTensor): Whether unsigned quantization is applied
per_channel (torch.BoolTensor): Whether per-channel quantization or per-tensor quantization is applied
learn_zero_point (torch.BoolTensor): whether the zero point is learnable.
grad_scale (torch.FloatTensor): The gradient scaling factor of quantization parameters.
_narrow_range (torch.BoolTensor): Whether a narrow range is used in quantization.
"""
precision: torch.IntTensor
symmetric: torch.BoolTensor
unsigned: torch.BoolTensor
per_channel: torch.BoolTensor
learn_zero_point: torch.BoolTensor
grad_scale: torch.FloatTensor
_narrow_range: torch.BoolTensor
@classmethod
def create(
cls,
options: Optional[FakeQuantizerOptions],
channel_size: Optional[int] = None,
enable: bool = True,
narrow_range: bool = False,
) -> Optional["FakeQuantizer"]:
"""Creates a `FakeQuantizer` instance if options is not `None`, otherwise returns `None`
Args:
options (Optional[FakeQuantizerOptions]): Options for fake quantizer to return. If `None`,
dose notcreate fake quantizer.
channel_size (Optional[int], optional): Channel size of per-channel quantization. Not used in
per-tensor quantization. If `None`, no channel size is set. Defaults to `None`.
enable (bool, optional): If true, returns the enabled quantzier. If false, returns the quantizer
that was disabled. Defaults to `True`
narrow_range (bool, optional): If true, returns the quantzier with a narrow range. If false, it
does not have a narrow range. Defaults to `False`
Returns:
Optional[FakeQuantizer]: If the `options` is valid for quantization returns created fake quantizer.
Otherwise return `None`.
"""
if options is None or options.precision > 8:
return None
return FakeQuantizer(options, channel_size, enable, narrow_range)
def __init__(
self,
options: FakeQuantizerOptions,
channel_size: Optional[int] = None,
enable: bool = True,
narrow_range: bool = False,
):
"""Initializes a FakeQuantizer instance.
Args:
options (QuantizerOptions): options
channel_size (Optional[int], optional): The channel size for per-channel quantization. Defaults to None.
This value is required only when `options.per_channel` is `True`, otherwise has no effect.
It can be set after the instantiation of the object, must be set before calling its `forward` method.
enable (bool, optional): whether to enable this quantizer object as soon as it is initialized.
Defaults to True.
narrow_range (bool, optional): Use symmetric integer range for signed quantization
eg) [-127,127] instead of [-128,127] for num_bits=8. Default False.
Raises:
ValueError: if `options.ptq_calibration` is "percentile" but `options.percentile` is `None`.
"""
super().__init__()
self.register_buffer("precision", torch.tensor(options.precision))
self.register_buffer("symmetric", torch.tensor(options.symmetric))
self.register_buffer("unsigned", torch.tensor(options.unsigned))
self.register_buffer("per_channel", torch.tensor(options.per_channel))
if not self.symmetric.item() and self.per_channel.item():
raise RuntimeError("asymmetric per_channel quantization is not available")
self.register_buffer("learn_zero_point", torch.tensor(options.learn_zero_point))
self.register_buffer("grad_scale", torch.tensor(options.grad_scale))
if narrow_range and not (self.symmetric.item() and not self.unsigned.item()):
log.warning(
"narrow_range should only be used with symmetric signed quantization.\n"
"(narrow_range, symmetric, unsigned) = "
f"({narrow_range}, {self.symmetric.item()}, {self.unsigned.item()})"
)
self.register_buffer("_narrow_range", torch.tensor(narrow_range))
if self.per_channel:
if channel_size is not None:
self.channel_size = channel_size
else:
self.step_size = torch.nn.Parameter(torch.ones(1))
self.zero_point = torch.nn.Parameter(
torch.zeros(1),
requires_grad=bool(not self.symmetric.item() and self.learn_zero_point.item()),
)
self._is_enabled = enable
self.is_zero_point_folded = False
self.qat_backward_type = options.qat_backward
self.ptq_calibration = options.ptq_calibration
calibrator_class = options.ptq_calibration.calibrator_class
if options.ptq_calibration == PTQCalibrationType.percentile:
if options.percentile is None:
raise ValueError("percentile value is required for percentile PTQ calibrator")
self.calibrator = calibrator_class(self, options.percentile)
else:
self.calibrator = calibrator_class(self)
@property
def qat_function(
self,
) -> FakeQuantFunc:
"""The autograd function providing forward and backward methods of this fake quantizer
for the quantization-aware training"""
return self.qat_backward_type.function
@property
| def channel_size(self) -> Optional[int]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ximinng/PyTorch-SVGRender
# Path: pytorch_svgrender/diffvg_warp/diffvg_state.py
class DiffVGState(torch.nn.Module):
def __init__(self,
device: torch.device,
use_gpu: bool = torch.cuda.is_available(),
print_timing: bool = False,
canvas_width: int = None,
canvas_height: int = None):
super(DiffVGState, self).__init__()
# pydiffvg device setting
self.device = device
init_pydiffvg(device, use_gpu, print_timing)
# canvas size
self.canvas_width = canvas_width
self.canvas_height = canvas_height
# record all paths
self.shapes = []
self.shape_groups = []
# record the current optimized path
self.cur_shapes = []
self.cur_shape_groups = []
# learnable SVG params
self.point_vars = []
self.color_vars = []
self.width_vars = []
def clip_curve_shape(self, *args, **kwargs):
raise NotImplementedError
def render_warp(self, seed=0):
self.clip_curve_shape()
scene_args = pydiffvg.RenderFunction.serialize_scene(
self.canvas_width, self.canvas_height, self.shapes, self.shape_groups
)
_render = pydiffvg.RenderFunction.apply
img = _render(self.canvas_width, # width
self.canvas_height, # height
2, # num_samples_x
2, # num_samples_y
seed, # seed
None,
*scene_args)
return img
@staticmethod
def load_svg(path_svg):
canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(path_svg)
return canvas_width, canvas_height, shapes, shape_groups
def save_svg(self,
filename: Union[AnyStr, pathlib.Path],
width: int = None,
height: int = None,
shapes: List = None,
shape_groups: List = None,
use_gamma: bool = False,
background: str = None):
"""
Save an SVG file with specified parameters and shapes.
Noting: New version of SVG saving function that is an adaptation of pydiffvg.save_svg.
The original version saved words resulting in incomplete glyphs.
Args:
filename (str): The path to save the SVG file.
width (int): The width of the SVG canvas.
height (int): The height of the SVG canvas.
shapes (list): A list of shapes to be included in the SVG.
shape_groups (list): A list of shape groups.
use_gamma (bool): Flag indicating whether to apply gamma correction.
background (str, optional): The background color of the SVG.
Returns:
None
"""
root = etree.Element('svg')
root.set('version', '1.1')
root.set('xmlns', 'http://www.w3.org/2000/svg')
root.set('width', str(width))
root.set('height', str(height))
if background is not None:
print(f"setting background to {background}")
root.set('style', str(background))
defs = etree.SubElement(root, 'defs')
g = etree.SubElement(root, 'g')
if use_gamma:
f = etree.SubElement(defs, 'filter')
f.set('id', 'gamma')
f.set('x', '0')
f.set('y', '0')
f.set('width', '100%')
f.set('height', '100%')
gamma = etree.SubElement(f, 'feComponentTransfer')
gamma.set('color-interpolation-filters', 'sRGB')
feFuncR = etree.SubElement(gamma, 'feFuncR')
feFuncR.set('type', 'gamma')
feFuncR.set('amplitude', str(1))
feFuncR.set('exponent', str(1 / 2.2))
feFuncG = etree.SubElement(gamma, 'feFuncG')
feFuncG.set('type', 'gamma')
feFuncG.set('amplitude', str(1))
feFuncG.set('exponent', str(1 / 2.2))
feFuncB = etree.SubElement(gamma, 'feFuncB')
feFuncB.set('type', 'gamma')
feFuncB.set('amplitude', str(1))
feFuncB.set('exponent', str(1 / 2.2))
feFuncA = etree.SubElement(gamma, 'feFuncA')
feFuncA.set('type', 'gamma')
feFuncA.set('amplitude', str(1))
feFuncA.set('exponent', str(1 / 2.2))
g.set('style', 'filter:url(#gamma)')
# Store color
for i, shape_group in enumerate(shape_groups):
def add_color(shape_color, name):
if isinstance(shape_color, pydiffvg.LinearGradient):
lg = shape_color
color = etree.SubElement(defs, 'linearGradient')
color.set('id', name)
color.set('x1', str(lg.begin[0].item()))
color.set('y1', str(lg.begin[1].item()))
color.set('x2', str(lg.end[0].item()))
color.set('y2', str(lg.end[1].item()))
offsets = lg.offsets.data.cpu().numpy()
stop_colors = lg.stop_colors.data.cpu().numpy()
for j in range(offsets.shape[0]):
stop = etree.SubElement(color, 'stop')
stop.set('offset', str(offsets[j]))
c = lg.stop_colors[j, :]
stop.set('stop-color', 'rgb({}, {}, {})'.format(
int(255 * c[0]), int(255 * c[1]), int(255 * c[2])
))
stop.set('stop-opacity', '{}'.format(c[3]))
if isinstance(shape_color, pydiffvg.RadialGradient):
lg = shape_color
color = etree.SubElement(defs, 'radialGradient')
color.set('id', name)
color.set('cx', str(lg.center[0].item() / width))
color.set('cy', str(lg.center[1].item() / height))
# this only support width=height
color.set('r', str(lg.radius[0].item() / width))
offsets = lg.offsets.data.cpu().numpy()
stop_colors = lg.stop_colors.data.cpu().numpy()
for j in range(offsets.shape[0]):
stop = etree.SubElement(color, 'stop')
stop.set('offset', str(offsets[j]))
c = lg.stop_colors[j, :]
stop.set('stop-color', 'rgb({}, {}, {})'.format(
int(255 * c[0]), int(255 * c[1]), int(255 * c[2])
))
stop.set('stop-opacity', '{}'.format(c[3]))
if shape_group.fill_color is not None:
add_color(shape_group.fill_color, 'shape_{}_fill'.format(i))
if shape_group.stroke_color is not None:
add_color(shape_group.stroke_color, 'shape_{}_stroke'.format(i))
for i, shape_group in enumerate(shape_groups):
shape = shapes[shape_group.shape_ids[0]]
if isinstance(shape, pydiffvg.Circle):
shape_node = etree.SubElement(g, 'circle')
shape_node.set('r', str(shape.radius.item()))
shape_node.set('cx', str(shape.center[0].item()))
shape_node.set('cy', str(shape.center[1].item()))
elif isinstance(shape, pydiffvg.Polygon):
shape_node = etree.SubElement(g, 'polygon')
points = shape.points.data.cpu().numpy()
path_str = ''
for j in range(0, shape.points.shape[0]):
path_str += '{} {}'.format(points[j, 0], points[j, 1])
if j != shape.points.shape[0] - 1:
path_str += ' '
shape_node.set('points', path_str)
elif isinstance(shape, pydiffvg.Path):
for j, id in enumerate(shape_group.shape_ids):
shape = shapes[id]
if isinstance(shape, pydiffvg.Path):
if j == 0:
shape_node = etree.SubElement(g, 'path')
node_id = shape_node.get('id')
path_str = ''
num_segments = shape.num_control_points.shape[0]
num_control_points = shape.num_control_points.data.cpu().numpy()
points = shape.points.data.cpu().numpy()
num_points = shape.points.shape[0]
path_str += 'M {} {}'.format(points[0, 0], points[0, 1])
point_id = 1
for j in range(0, num_segments):
if num_control_points[j] == 0:
p = point_id % num_points
path_str += ' L {} {}'.format(
points[p, 0], points[p, 1])
point_id += 1
elif num_control_points[j] == 1:
p1 = (point_id + 1) % num_points
path_str += ' Q {} {} {} {}'.format(
points[point_id, 0], points[point_id, 1],
points[p1, 0], points[p1, 1])
point_id += 2
elif num_control_points[j] == 2:
p2 = (point_id + 2) % num_points
path_str += ' C {} {} {} {} {} {}'.format(
points[point_id, 0], points[point_id, 1],
points[point_id + 1, 0], points[point_id + 1, 1],
points[p2, 0], points[p2, 1])
point_id += 3
if node_id is not None:
shape_node.set('id', node_id) # add id to Path
shape_node.set('d', path_str)
elif isinstance(shape, pydiffvg.Rect):
shape_node = etree.SubElement(g, 'rect')
shape_node.set('x', str(shape.p_min[0].item()))
shape_node.set('y', str(shape.p_min[1].item()))
shape_node.set('width', str(shape.p_max[0].item() - shape.p_min[0].item()))
shape_node.set('height', str(shape.p_max[1].item() - shape.p_min[1].item()))
elif isinstance(shape, pydiffvg.Ellipse):
shape_node = etree.SubElement(g, 'ellipse')
shape_node.set('cx', str(shape.center[0].item()))
shape_node.set('cy', str(shape.center[1].item()))
shape_node.set('rx', str(shape.radius[0].item()))
shape_node.set('ry', str(shape.radius[1].item()))
else:
raise NotImplementedError(f'shape type: {type(shape)} is not involved in pydiffvg.')
shape_node.set('stroke-width', str(2 * shape.stroke_width.data.cpu().item()))
if shape_group.fill_color is not None:
if isinstance(shape_group.fill_color, pydiffvg.LinearGradient):
shape_node.set('fill', 'url(#shape_{}_fill)'.format(i))
else:
c = shape_group.fill_color.data.cpu().numpy()
shape_node.set('fill', 'rgb({}, {}, {})'.format(
int(255 * c[0]), int(255 * c[1]), int(255 * c[2])))
shape_node.set('opacity', str(c[3]))
else:
shape_node.set('fill', 'none')
if shape_group.stroke_color is not None:
if isinstance(shape_group.stroke_color, pydiffvg.LinearGradient):
shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i))
else:
c = shape_group.stroke_color.data.cpu().numpy()
shape_node.set('stroke', 'rgb({}, {}, {})'.format(
int(255 * c[0]), int(255 * c[1]), int(255 * c[2])))
shape_node.set('stroke-opacity', str(c[3]))
shape_node.set('stroke-linecap', 'round')
shape_node.set('stroke-linejoin', 'round')
with open(filename, "w") as f:
f.write(pydiffvg.prettify(root))
@staticmethod
def save_image(img, filename, gamma=1):
if torch.is_tensor(img) and torch.device != 'cpu':
img = img.detach().cpu()
pydiffvg.imwrite(img, filename, gamma=gamma)
# Path: pytorch_svgrender/painter/wordasimage/ttf.py
def font_string_to_beziers(font, txt, size=30, spacing=1.0, merge=True, target_control=None):
"""
Load a font and convert the outlines for a given string to cubic bezier curves,
if merge is True, simply return a list of all bezier curves,
otherwise return a list of lists with the bezier curves for each glyph
"""
face = ft.Face(font)
face.set_char_size(64 * size)
slot = face.glyph
x = 0
beziers = []
previous = 0
for c in txt:
face.load_char(c, ft.FT_LOAD_DEFAULT | ft.FT_LOAD_NO_BITMAP)
bez = glyph_to_cubics(face, x)
# Check number of control points if desired
if target_control is not None:
if c in target_control.keys():
nctrl = np.sum([len(C) for C in bez])
while nctrl < target_control[c]:
longest = np.max(
sum([[bezier.approx_arc_length(b) for b in bezier.chain_to_beziers(C)] for C in bez], []))
thresh = longest * 0.5
bez = [bezier.subdivide_bezier_chain(C, thresh) for C in bez]
nctrl = np.sum([len(C) for C in bez])
print("nctrl: ", nctrl)
if merge:
beziers += bez
else:
beziers.append(bez)
kerning = face.get_kerning(previous, c)
x += (slot.advance.x + kerning.x) * spacing
previous = c
return beziers
# Path: pytorch_svgrender/painter/wordasimage/ttf.py
def write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path):
cmds = ''
svg = header
path = '<g><path d="'
for C in beziers:
if subdivision_thresh is not None:
print('subd')
C = bezier.subdivide_bezier_chain(C, subdivision_thresh)
cmds += bezier_chain_to_commands(C, True)
path += cmds + '"/>\n'
svg += path + '</g></svg>\n'
fname = f"{dest_path}/{fontname}_{c}.svg"
fname = fname.replace(" ", "_")
with open(fname, 'w') as f:
f.write(svg)
return fname, path
# Path: pytorch_svgrender/painter/wordasimage/painter_params.py
import os
import pathlib
import numpy as np
import pydiffvg
import torch
from torch.optim.lr_scheduler import LambdaLR
from pytorch_svgrender.diffvg_warp import DiffVGState
from .ttf import font_string_to_beziers, write_letter_svg
size = rb - lt
sizestr = 'width="%.1f" height="%.1f"' % (size[0], size[1])
boxstr = ' viewBox="%.1f %.1f %.1f %.1f"' % (lt[0], lt[1], size[0], size[1])
header = '''<?xml version="1.0" encoding="utf-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" baseProfile="full" '''
header += sizestr
header += boxstr
header += '>\n<defs/>\n'
svg_all = header
for i, (c, beziers) in enumerate(zip(txt, glyph_beziers)):
fname, path = write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path)
num_cp = self.count_cp(fname)
print(f"Total control point: {num_cp} -- {c}")
# Add to global svg
svg_all += path + '</g>\n'
# Save global svg
svg_all += '</svg>\n'
fname = f"{dest_path}/{fontname}_{txt}.svg"
fname = fname.replace(" ", "_")
with open(fname, 'w') as f:
f.write(svg_all)
def count_cp(self, file_name):
canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(file_name)
p_counter = 0
for path in shapes:
p_counter += path.points.shape[0]
return p_counter
def normalize_letter_size(self, dest_path, font, txt):
fontname = os.path.splitext(os.path.basename(font))[0]
for i, c in enumerate(txt):
fname = f"{dest_path}/{fontname}_{c}.svg"
fname = fname.replace(" ", "_")
self.fix_single_svg(fname)
fname = f"{dest_path}/{fontname}_{txt}.svg"
fname = fname.replace(" ", "_")
self.fix_single_svg(fname, all_word=True)
def fix_single_svg(self, svg_path, all_word=False):
target_h_letter = 360
target_canvas_width, target_canvas_height = 600, 600
canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_path)
letter_h = canvas_height
letter_w = canvas_width
if all_word:
if letter_w > letter_h:
scale_canvas_w = target_h_letter / letter_w
hsize = int(letter_h * scale_canvas_w)
scale_canvas_h = hsize / letter_h
else:
scale_canvas_h = target_h_letter / letter_h
wsize = int(letter_w * scale_canvas_h)
scale_canvas_w = wsize / letter_w
else:
scale_canvas_h = target_h_letter / letter_h
wsize = int(letter_w * scale_canvas_h)
scale_canvas_w = wsize / letter_w
for num, p in enumerate(shapes):
p.points[:, 0] = p.points[:, 0] * scale_canvas_w
p.points[:, 1] = p.points[:, 1] * scale_canvas_h + target_h_letter
w_min = min([torch.min(p.points[:, 0]) for p in shapes])
w_max = max([torch.max(p.points[:, 0]) for p in shapes])
h_min = min([torch.min(p.points[:, 1]) for p in shapes])
h_max = max([torch.max(p.points[:, 1]) for p in shapes])
for num, p in enumerate(shapes):
p.points[:, 0] = p.points[:, 0] + (target_canvas_width / 2) - int(w_min + (w_max - w_min) / 2)
p.points[:, 1] = p.points[:, 1] + (target_canvas_height / 2) - int(h_min + (h_max - h_min) / 2)
output_path = f"{svg_path[:-4]}_scaled.svg"
print("output_path: ", output_path)
self.save_svg(output_path, target_canvas_width, target_canvas_height, shapes, shape_groups)
def combine_word(self, word, letter, font, results_dir):
word_svg_scaled = results_dir / f"{font}_{word}_scaled.svg"
canvas_width_word, canvas_height_word, shapes_word, shape_groups_word = pydiffvg.svg_to_scene(word_svg_scaled)
letter_ids = []
for l in letter:
letter_ids += self.get_letter_ids(l, word, shape_groups_word)
w_min, w_max = min([torch.min(shapes_word[ids].points[:, 0]) for ids in letter_ids]), max(
[torch.max(shapes_word[ids].points[:, 0]) for ids in letter_ids])
h_min, h_max = min([torch.min(shapes_word[ids].points[:, 1]) for ids in letter_ids]), max(
[torch.max(shapes_word[ids].points[:, 1]) for ids in letter_ids])
c_w = (-w_min + w_max) / 2
c_h = (-h_min + h_max) / 2
svg_result = results_dir / "final_letter.svg"
canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(svg_result)
out_w_min, out_w_max = min([torch.min(p.points[:, 0]) for p in shapes]), max(
[torch.max(p.points[:, 0]) for p in shapes])
out_h_min, out_h_max = min([torch.min(p.points[:, 1]) for p in shapes]), max(
[torch.max(p.points[:, 1]) for p in shapes])
out_c_w = (-out_w_min + out_w_max) / 2
out_c_h = (-out_h_min + out_h_max) / 2
scale_canvas_w = (w_max - w_min) / (out_w_max - out_w_min)
scale_canvas_h = (h_max - h_min) / (out_h_max - out_h_min)
if scale_canvas_h > scale_canvas_w:
wsize = int((out_w_max - out_w_min) * scale_canvas_h)
scale_canvas_w = wsize / (out_w_max - out_w_min)
shift_w = -out_c_w * scale_canvas_w + c_w
else:
hsize = int((out_h_max - out_h_min) * scale_canvas_w)
| scale_canvas_h = hsize / (out_h_max - out_h_min) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lyhisme/DeST
# Path: libs/class_id_map.py
def get_id2class_map(dataset: str, dataset_dir: str = "./dataset") -> Dict[int, str]:
class2id_map = get_class2id_map(dataset, dataset_dir)
return {val: key for key, val in class2id_map.items()}
# Path: libs/metric.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name: str, fmt: str = ":f") -> None:
self.name = name
self.fmt = fmt
self.reset()
def reset(self) -> None:
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val: float, n: int = 1) -> None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self) -> str:
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
# Path: libs/metric.py
class BoundaryScoreMeter(object):
def __init__(self, tolerance=5, boundary_threshold=0.7):
# max distance of the frame which can be regarded as correct
self.tolerance = tolerance
# threshold of the boundary value which can be regarded as action boundary
self.boundary_threshold = boundary_threshold
self.tp = 0.0 # true positive
self.fp = 0.0 # false positive
self.fn = 0.0 # false negative
self.n_correct = 0.0
self.n_frames = 0.0
def update(self, preds, gts, masks):
"""
Args:
preds: np.array. the model output(N, T)
gts: np.array. boudnary ground truth array (N, T)
masks: np.array. np.bool. valid length for each video (N, T)
Return:
Accuracy
Boundary F1 Score
"""
for pred, gt, mask in zip(preds, gts, masks):
# ignore invalid frames
pred = pred[mask]
gt = gt[mask]
pred_idx = argrelmax(pred, threshold=self.boundary_threshold)
gt_idx = argrelmax(gt, threshold=self.boundary_threshold)
n_frames = pred.shape[0]
tp = 0.0
fp = 0.0
fn = 0.0
hits = np.zeros(len(gt_idx))
# calculate true positive, false negative, false postive, true negative
for i in range(len(pred_idx)):
dist = np.abs(np.array(gt_idx) - pred_idx[i])
min_dist = np.min(dist)
idx = np.argmin(dist)
if min_dist <= self.tolerance and hits[idx] == 0:
tp += 1
hits[idx] = 1
else:
fp += 1
fn = len(gt_idx) - sum(hits)
tn = n_frames - tp - fp - fn
self.tp += tp
self.fp += fp
self.fn += fn
self.n_frames += n_frames
self.n_correct += tp + tn
def get_scores(self):
"""
Return:
Accuracy
Boundary F1 Score
"""
# accuracy
acc = 100 * self.n_correct / self.n_frames
# Boudnary F1 Score
precision = self.tp / float(self.tp + self.fp)
recall = self.tp / float(self.tp + self.fn)
f1s = 2.0 * (precision * recall) / (precision + recall + 1e-7)
f1s = np.nan_to_num(f1s) * 100
# Accuracy, Edit Distance, F1 Score
return acc, precision * 100, recall * 100, f1s
def save_scores(self, save_path: str) -> None:
acc, precision, recall, f1s = self.get_scores()
# save log
columns = ["bound_acc", "precision", "recall", "bound_f1s"]
data_dict = {
"bound_acc": [acc],
"precision": [precision],
"recall": [recall],
"bound_f1s": [f1s],
}
df = pd.DataFrame(data_dict, columns=columns)
df.to_csv(save_path, index=False)
def reset(self):
self.tp = 0.0 # true positive
self.fp = 0.0 # false positive
self.fn = 0.0 # false negative
self.n_correct = 0.0
self.n_frames = 0.0
# Path: libs/metric.py
class ScoreMeter(object):
def __init__(
self,
id2class_map: Dict[int, str],
iou_thresholds: Tuple[float] = (0.1, 0.25, 0.5),
ignore_index: int = 255,
) -> None:
self.iou_thresholds = iou_thresholds # threshold for f score
self.ignore_index = ignore_index
self.id2class_map = id2class_map
self.edit_score = 0
self.tp = [0 for _ in range(len(iou_thresholds))] # true positive
self.fp = [0 for _ in range(len(iou_thresholds))] # false positive
self.fn = [0 for _ in range(len(iou_thresholds))] # false negative
self.n_correct = 0
self.n_frames = 0
self.n_videos = 0
self.n_classes = len(self.id2class_map)
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
def _fast_hist(self, pred: np.ndarray, gt: np.ndarray) -> np.ndarray:
mask = (gt >= 0) & (gt < self.n_classes)
hist = np.bincount(
self.n_classes * gt[mask].astype(int) + pred[mask],
minlength=self.n_classes ** 2,
).reshape(self.n_classes, self.n_classes)
return hist
def update(
self,
outputs: np.ndarray,
gts: np.ndarray,
boundaries: Optional[np.ndarray] = None,
masks: Optional[np.ndarray] = None,
) -> None:
"""
Args:
outputs: np.array. shape(N, C, T)
the model output for boundary prediciton
gt: np.array. shape(N, T)
Ground Truth for boundary
"""
if len(outputs.shape) == 3:
preds = outputs.argmax(axis=1)
elif len(outputs.shape) == 2:
preds = copy.copy(outputs)
for pred, gt in zip(preds, gts):
pred = pred[gt != self.ignore_index]
gt = gt[gt != self.ignore_index]
for lt, lp in zip(pred, gt):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten())
self.n_videos += 1
# count the correct frame
self.n_frames += len(pred)
for i in range(len(pred)):
if pred[i] == gt[i]:
self.n_correct += 1
# calculate the edit distance
p_label, p_start, p_end = get_segments(pred, self.id2class_map)
g_label, g_start, g_end = get_segments(gt, self.id2class_map)
self.edit_score += levenshtein(p_label, g_label, norm=True)
for i, th in enumerate(self.iou_thresholds):
tp, fp, fn = get_n_samples(
p_label, p_start, p_end, g_label, g_start, g_end, th
)
self.tp[i] += tp
self.fp[i] += fp
self.fn[i] += fn
def get_scores(self) -> Tuple[float, float, float]:
"""
Return:
Accuracy
Normlized Edit Distance
F1 Score of Each Threshold
"""
# accuracy
acc = 100 * float(self.n_correct) / self.n_frames
# edit distance
edit_score = float(self.edit_score) / self.n_videos
# F1 Score
f1s = []
for i in range(len(self.iou_thresholds)):
precision = self.tp[i] / float(self.tp[i] + self.fp[i])
recall = self.tp[i] / float(self.tp[i] + self.fn[i])
f1 = 2.0 * (precision * recall) / (precision + recall + 1e-7)
f1 = np.nan_to_num(f1) * 100
f1s.append(f1)
# Accuracy, Edit Distance, F1 Score
return acc, edit_score, f1s
def return_confusion_matrix(self) -> np.ndarray:
return self.confusion_matrix
def save_scores(self, save_path: str) -> None:
acc, edit_score, segment_f1s = self.get_scores()
# save log
columns = ["cls_acc", "edit"]
data_dict = {
"cls_acc": [acc],
"edit": [edit_score],
}
for i in range(len(self.iou_thresholds)):
key = "segment f1s@{}".format(self.iou_thresholds[i])
columns.append(key)
data_dict[key] = [segment_f1s[i]]
df = pd.DataFrame(data_dict, columns=columns)
df.to_csv(save_path, index=False)
def save_confusion_matrix(self, save_path: str) -> None:
with open(save_path, "w") as file:
writer = csv.writer(file, lineterminator="\n")
writer.writerows(self.confusion_matrix)
def reset(self) -> None:
self.edit_score = 0
self.tp = [0 for _ in range(len(self.iou_thresholds))] # true positive
self.fp = [0 for _ in range(len(self.iou_thresholds))] # false positive
self.fn = [0 for _ in range(len(self.iou_thresholds))] # false negative
self.n_correct = 0
self.n_frames = 0
self.n_videos = 0
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
# Path: libs/postprocess.py
class PostProcessor(object):
def __init__(
self,
name: str,
boundary_th: int = 0.7,
theta_t: int = 15,
kernel_size: int = 15,
) -> None:
self.func = {
"refinement_with_boundary": self._refinement_with_boundary,
"relabeling": self._relabeling,
"smoothing": self._smoothing,
}
assert name in self.func
self.name = name
self.boundary_th = boundary_th
self.theta_t = theta_t
self.kernel_size = kernel_size
if name == "smoothing":
self.filter = GaussianSmoothing(self.kernel_size)
def _is_probability(self, x: np.ndarray) -> bool:
assert x.ndim == 3
if x.shape[1] == 1:
# sigmoid
if x.min() >= 0 and x.max() <= 1:
return True
else:
return False
else:
# softmax
_sum = np.sum(x, axis=1).astype(np.float32)
_ones = np.ones_like(_sum, dtype=np.float32)
return np.allclose(_sum, _ones)
def _convert2probability(self, x: np.ndarray) -> np.ndarray:
"""
Args: x (N, C, T)
"""
assert x.ndim == 3
if self._is_probability(x):
return x
else:
if x.shape[1] == 1:
# sigmoid
prob = 1 / (1 + np.exp(-x))
else:
# softmax
prob = np.exp(x) / np.sum(np.exp(x), axis=1)
return prob.astype(np.float32)
def _convert2label(self, x: np.ndarray) -> np.ndarray:
assert x.ndim == 2 or x.ndim == 3
if x.ndim == 2:
return x.astype(np.int64)
else:
if not self._is_probability(x):
x = self._convert2probability(x)
label = np.argmax(x, axis=1)
return label.astype(np.int64)
def _refinement_with_boundary(
self,
outputs: np.array,
boundaries: np.ndarray,
masks: np.ndarray,
) -> np.ndarray:
"""
Get segments which is defined as the span b/w two boundaries,
and decide their classes by majority vote.
Args:
outputs: numpy array. shape (N, C, T)
the model output for frame-level class prediction.
boundaries: numpy array. shape (N, 1, T)
boundary prediction.
masks: np.array. np.bool. shape (N, 1, T)
valid length for each video
Return:
preds: np.array. shape (N, T)
final class prediction considering boundaries.
"""
preds = self._convert2label(outputs)
boundaries = self._convert2probability(boundaries)
for i, (output, pred, boundary, mask) in enumerate(
zip(outputs, preds, boundaries, masks)
):
boundary = boundary[mask]
idx = argrelmax(boundary, threshold=self.boundary_th)
# add the index of the last action ending
T = pred.shape[0]
idx.append(T)
# majority vote
for j in range(len(idx) - 1):
count = np.bincount(pred[idx[j] : idx[j + 1]])
modes = np.where(count == count.max())[0]
if len(modes) == 1:
mode = modes
else:
if outputs.ndim == 3:
# if more than one majority class exist
prob_sum_max = 0
for m in modes:
prob_sum = output[m, idx[j] : idx[j + 1]].sum()
if prob_sum_max < prob_sum:
mode = m
prob_sum_max = prob_sum
else:
# decide first mode when more than one majority class
# have the same number during oracle experiment
mode = modes[0]
preds[i, idx[j] : idx[j + 1]] = mode
return preds
def _relabeling(self, outputs: np.ndarray, **kwargs: np.ndarray) -> np.ndarray:
"""
Relabeling small action segments with their previous action segment
Args:
output: the results of action segmentation. (N, T) or (N, C, T)
theta_t: the threshold of the size of action segments.
Return:
relabeled output. (N, T)
"""
preds = self._convert2label(outputs)
for i in range(preds.shape[0]):
# shape (T,)
last = preds[i][0]
cnt = 1
for j in range(1, preds.shape[1]):
if last == preds[i][j]:
cnt += 1
else:
if cnt > self.theta_t:
cnt = 1
last = preds[i][j]
else:
preds[i][j - cnt : j] = preds[i][j - cnt - 1]
cnt = 1
last = preds[i][j]
if cnt <= self.theta_t:
preds[i][j - cnt : j] = preds[i][j - cnt - 1]
return preds
def _smoothing(self, outputs: np.ndarray, **kwargs: np.ndarray) -> np.ndarray:
"""
Smoothing action probabilities with gaussian filter.
Args:
outputs: frame-wise action probabilities. (N, C, T)
Return:
predictions: final prediction. (N, T)
"""
outputs = self._convert2probability(outputs)
outputs = self.filter(torch.Tensor(outputs)).numpy()
preds = self._convert2label(outputs)
return preds
def __call__(self, outputs, **kwargs: np.ndarray) -> np.ndarray:
preds = self.func[self.name](outputs, **kwargs)
return preds
# Path: libs/helper.py
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from typing import Optional, Tuple
from torch.utils.data import DataLoader
from libs.class_id_map import get_id2class_map
from libs.metric import AverageMeter, BoundaryScoreMeter, ScoreMeter
from libs.postprocess import PostProcessor
# compute output and loss
output_cls, output_bound = model(x, mask)
loss = 0.0
if isinstance(output_cls, list):
n = len(output_cls)
for out in output_cls:
loss += criterion_cls(out, t, x) / n
else:
loss += criterion_cls(output_cls, t, x)
if isinstance(output_bound, list):
n = len(output_bound)
for out in output_bound:
loss += lambda_bound_loss * criterion_bound(out, b, mask) / n
else:
loss += lambda_bound_loss * criterion_bound(output_bound, b, mask)
# record loss
losses.update(loss.item(), batch_size)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return losses.avg
def validate(
val_loader: DataLoader,
model: nn.Module,
criterion_cls: nn.Module,
criterion_bound: nn.Module,
lambda_bound_loss: float,
device: str,
dataset: str,
dataset_dir: str,
iou_thresholds: Tuple[float],
boundary_th: float,
tolerance: int,
refinement_method: Optional[str] = None
) -> Tuple[float, float, float, float, float, float, float, float, str]:
losses = AverageMeter("Loss", ":.4e")
postprocessor = PostProcessor(refinement_method, boundary_th)
scores_cls = ScoreMeter(
id2class_map=get_id2class_map(dataset, dataset_dir=dataset_dir),
iou_thresholds=iou_thresholds,
)
scores_bound = BoundaryScoreMeter(
tolerance=tolerance, boundary_threshold=boundary_th
)
scores_after_refinement = ScoreMeter(
id2class_map=get_id2class_map(dataset, dataset_dir=dataset_dir),
iou_thresholds=iou_thresholds,
)
# switch to evaluate mode
model.eval()
with torch.no_grad():
for sample in val_loader:
x = sample["feature"]
t = sample["label"]
b = sample["boundary"]
mask = sample["mask"]
x = x.to(device)
t = t.to(device)
b = b.to(device)
mask = mask.to(device)
batch_size = x.shape[0]
# compute output and loss
output_cls, output_bound = model(x, mask)
loss = 0.0
loss += criterion_cls(output_cls, t, x)
loss += lambda_bound_loss * criterion_bound(output_bound, b, mask)
# measure accuracy and record loss
losses.update(loss.item(), batch_size)
# calcualte accuracy and f1 score
output_cls = output_cls.to("cpu").data.numpy()
output_bound = output_bound.to("cpu").data.numpy()
t = t.to("cpu").data.numpy()
b = b.to("cpu").data.numpy()
mask = mask.to("cpu").data.numpy()
refined_output_cls = postprocessor(
output_cls, boundaries=output_bound, masks=mask
)
# update score
scores_cls.update(output_cls, t, output_bound, mask)
scores_bound.update(output_bound, b, mask)
scores_after_refinement.update(refined_output_cls, t)
cls_acc, edit_score, segment_f1s = scores_after_refinement.get_scores()
bound_acc, precision, recall, bound_f1s = scores_bound.get_scores()
return (
losses.avg,
cls_acc,
edit_score,
segment_f1s,
bound_acc,
precision,
recall,
bound_f1s,
)
def evaluate(
val_loader: DataLoader,
model: nn.Module,
device: str,
boundary_th: float,
| dataset: str, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: bolna-ai/bolna
# Path: bolna/agent_manager/base_manager.py
class BaseManager:
def __init__(self):
self.agent = "bolna-agent"
# Path: bolna/helpers/utils.py
def create_ws_data_packet(data, meta_info=None, is_md5_hash=False, llm_generated=False):
metadata = copy.deepcopy(meta_info)
if meta_info is not None: #It'll be none in case we connect through dashboard playground
metadata["is_md5_hash"] = is_md5_hash
metadata["llm_generated"] = llm_generated
return {
'data': data,
'meta_info': metadata
}
# Path: bolna/helpers/utils.py
def is_valid_md5(hash_string):
return bool(re.fullmatch(r"[0-9a-f]{32}", hash_string))
# Path: bolna/helpers/utils.py
async def get_raw_audio_bytes_from_base64(agent_name, b64_string, audio_format='mp3', user_id = None, assistant_id=None, local = False):
# we are already storing pcm formatted audio in the filler config. No need to encode/decode them further
audio_data = None
if local:
file_name = f"{PREPROCESS_DIR}/{agent_name}/{audio_format}/{b64_string}.{audio_format}"
with open(file_name, 'rb') as file:
# Read the entire file content into a variable
audio_data = file.read()
else:
object_key = f"{user_id}/{assistant_id}/audio/{b64_string}.{audio_format}"
audio_data = await get_s3_file(BUCKET_NAME, object_key)
return audio_data
# Path: bolna/helpers/utils.py
def get_required_input_types(task):
input_types = dict()
for i, chain in enumerate(task['toolchain']['pipelines']):
first_model = chain[0]
if chain[0] == "transcriber":
input_types["audio"] = i
elif chain[0] == "synthesizer" or chain[0] == "llm":
input_types["text"] = i
return input_types
# Path: bolna/helpers/utils.py
def format_messages(messages):
formatted_string = ""
for message in messages:
role = message['role']
content = message['content']
if role == 'assistant':
formatted_string += "assistant: " + content + "\n"
elif role == 'user':
formatted_string += "user: " + content + "\n"
return formatted_string
# Path: bolna/helpers/utils.py
async def get_prompt_responses(agent_name, local=False, user_id=None, assistant_id = None):
filepath = f"{PREPROCESS_DIR}/{agent_name}/conversation_details.json"
data = ""
if local:
logger.info("Loading up the conversation details from the local file")
try:
with open(filepath, "r") as json_file:
data = json.load(json_file)
except Exception as e:
logger.error("Could not load up the dataset")
else:
key = f"{user_id}/{assistant_id}/conversation_details.json"
logger.info(f"Loading up the conversation details from the s3 file BUCKET_NAME {BUCKET_NAME} {key}")
try:
response = await get_s3_file(BUCKET_NAME, key)
file_content = response.decode('utf-8')
json_content = json.loads(file_content)
return json_content
except Exception as e:
traceback.print_exc()
print(f"An error occurred: {e}")
return None
return data
# Path: bolna/helpers/utils.py
def update_prompt_with_context(prompt, context_data):
if not isinstance(context_data.get('recipient_data'), dict):
return prompt
return prompt.format(**context_data.get('recipient_data', {}))
# Path: bolna/helpers/utils.py
def get_md5_hash(text):
return hashlib.md5(text.encode()).hexdigest()
# Path: bolna/helpers/utils.py
def clean_json_string(json_str):
if json_str.startswith("```json") and json_str.endswith("```"):
json_str = json_str[7:-3].strip()
return json_str
# Path: bolna/helpers/utils.py
def yield_chunks_from_memory(audio_bytes, chunk_size=512):
total_length = len(file_in_memory)
for i in range(0, total_length, chunk_size):
yield file_in_memory[i:i + chunk_size]
# Path: bolna/helpers/logger_config.py
def configure_logger(file_name, enabled=True, logging_level='INFO'):
if logging_level not in VALID_LOGGING_LEVELS:
logging_level = "INFO"
logging.basicConfig(
level=logging_level,
format="%(asctime)s.%(msecs)03d %(levelname)s {%(module)s} [%(funcName)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(file_name)
if not enabled:
logger.disabled = True
return logger
# Path: bolna/agent_manager/task_manager.py
import asyncio
import traceback
import time
import json
from .base_manager import BaseManager
from bolna.agent_types import *
from bolna.providers import *
from bolna.helpers.utils import create_ws_data_packet, is_valid_md5, get_raw_audio_bytes_from_base64, \
get_required_input_types, format_messages, get_prompt_responses, update_prompt_with_context, get_md5_hash, clean_json_string, yield_chunks_from_memory
from bolna.helpers.logger_config import configure_logger
self.conversation_ended = True
self.ended_by_assistant = True
await self.tools["input"].stop_handler()
logger.info("Stopped input handler")
if "transcriber" in self.tools and not self.connected_through_dashboard:
logger.info("Stopping transcriber")
await self.tools["transcriber"].toggle_connection()
await asyncio.sleep(5) # Making sure whatever message was passed is over
return
self.llm_processed_request_ids.add(self.current_request_id)
llm_response = ""
def _extract_sequence_and_meta(self, message):
sequence, meta_info = None, None
if isinstance(message, dict) and "meta_info" in message:
self._set_call_details(message)
sequence = message["meta_info"]["sequence"]
meta_info = message["meta_info"]
return sequence, meta_info
def _is_extraction_task(self):
return self.task_config["task_type"] == "extraction"
def _is_summarization_task(self):
return self.task_config["task_type"] == "summarization"
def _is_conversation_task(self):
return self.task_config["task_type"] == "conversation"
def _is_preprocessed_flow(self):
return self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "preprocessed"
def _is_formulaic_flow(self):
return self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "formulaic"
# This is used only in the case it's a text based chatbot
async def _listen_llm_input_queue(self):
logger.info(
f"Starting listening to LLM queue as either Connected to dashboard = {self.connected_through_dashboard} or it's a textual chat agent {self.textual_chat_agent}")
while True:
try:
ws_data_packet = await self.queues["llm"].get()
logger.info(f"ws_data_packet {ws_data_packet}")
bos_packet = create_ws_data_packet("<beginning_of_stream>", ws_data_packet['meta_info'])
await self.tools["output"].handle(bos_packet)
await self._run_llm_task(
ws_data_packet) # In case s3 is down and it's an audio processing job, this might produce blank message on the frontend of playground.
eos_packet = create_ws_data_packet("<end_of_stream>", ws_data_packet['meta_info'])
await self.tools["output"].handle(eos_packet)
except Exception as e:
traceback.print_exc()
logger.error(f"Something went wrong with LLM queue {e}")
break
async def _run_llm_task(self, message):
logger.info("running llm based agent")
sequence, meta_info = self._extract_sequence_and_meta(message)
try:
if self._is_extraction_task() or self._is_summarization_task():
await self._process_followup_task(message, sequence, meta_info)
elif self._is_conversation_task():
if self._is_preprocessed_flow():
await self._process_conversation_preprocessed_task(message, sequence, meta_info)
elif self._is_formulaic_flow():
await self._process_conversation_formulaic_task(message, sequence, meta_info)
else:
await self._process_conversation_task(message, sequence, meta_info)
else:
logger.error("unsupported task type: {}".format(self.task_config["task_type"]))
self.llm_task = None
except Exception as e:
traceback.print_exc()
logger.error(f"Something went wrong in llm: {e}")
async def process_transcriber_request(self, meta_info):
if not self.current_request_id or self.current_request_id != meta_info["request_id"]:
self.previous_request_id, self.current_request_id = self.current_request_id, meta_info["request_id"]
sequence = meta_info["sequence"]
# check if previous request id is not in transmitted request id
if self.previous_request_id is None:
is_first_message = True
elif self.previous_request_id not in self.llm_processed_request_ids:
self.llm_rejected_request_ids.add(self.previous_request_id)
else:
skip_append_to_data = False
return sequence
async def process_interruption(self):
await self.tools["output"].handle_interruption()
self.sequence_ids = set() #Remove all the sequence ids so subsequent won't be processed
if self.llm_task is not None:
self.llm_task.cancel()
self.llm_task = None
self.was_long_pause = True
# if len(self.synthesizer_tasks) > 0:
# for synth_task in self.synthesizer_tasks:
# synth_task.cancel()
# self.synthesizer_tasks = []
########################
# Transcriber task
########################
async def _handle_transcriber_output(self, next_task, transcriber_message, meta_info):
if next_task == "llm":
meta_info["origin"] = "transcriber"
self.llm_task = asyncio.create_task(
self._run_llm_task(create_ws_data_packet(transcriber_message, meta_info)))
elif next_task == "synthesizer":
self.synthesizer_tasks.append(asyncio.create_task(
self._synthesize(create_ws_data_packet(transcriber_message, meta_info))))
| else: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: relari-ai/continuous-eval
# Path: continuous_eval/metrics/generation_deterministic_metrics.py
class DeterministicAnswerCorrectness(Metric):
def calculate(self, answer, ground_truths, **kwargs):
# calculate the max score across all ground truth answers
token_scores = [TokenOverlap().calculate(answer, gt_answer) for gt_answer in ground_truths]
rouge_scores = [RougeScore().calculate(answer, gt_answer) for gt_answer in ground_truths]
bleu_scores = [BleuScore().calculate(answer, gt_answer) for gt_answer in ground_truths]
return {
metric: max(score.get(metric, 0) for score in token_scores + rouge_scores + bleu_scores)
for metric in ["rouge_l_recall", "rouge_l_precision", "rouge_l_f1", "token_overlap_recall", "token_overlap_precision", "token_overlap_f1", "bleu_score"]
}
# Path: continuous_eval/metrics/generation_deterministic_metrics.py
class DeterministicFaithfulness(Metric):
ROUGE_PRECISION_THRESHOLD = 0.5
TOKEN_OVERLAP_PRECISION_THRESHOLD = 0.5
BLEU_SCORE_THRESHOLD = 0.5
def __init__(self):
_download_punkt()
super().__init__()
def calculate(self, answer, retrieved_contexts, **kwargs):
context = "\n".join(retrieved_contexts)
sentences = nltk.sent_tokenize(answer)
rouge_scores = [RougeScore().calculate(sentence, context)["rouge_l_precision"] for sentence in sentences]
token_overlap_scores = [
TokenOverlap().calculate(sentence, context)["token_overlap_precision"] for sentence in sentences
]
bleu_scores = [BleuScore().calculate(sentence, context)["bleu_score"] for sentence in sentences]
rouge_faithfulness = sum(score > self.ROUGE_PRECISION_THRESHOLD for score in rouge_scores) / len(sentences)
token_overlap_faithfulness = sum(
score > self.TOKEN_OVERLAP_PRECISION_THRESHOLD for score in token_overlap_scores
) / len(sentences)
bleu_faithfulness = sum(score for score in bleu_scores) / len(sentences)
return {
"rouge_faithfulness": rouge_faithfulness,
"token_overlap_faithfulness": token_overlap_faithfulness,
"bleu_faithfulness": bleu_faithfulness,
"rouge_p_by_sentence": rouge_scores,
"token_overlap_p_by_sentence": token_overlap_scores,
"blue_score_by_sentence": bleu_scores,
}
# Path: continuous_eval/metrics/generation_LLM_based_metrics.py
class LLMBasedAnswerCorrectness(LLMBasedMetric):
"""
The LLM based answer correctness metric.
Measures whether the generated answer is correct compared to the ground truths.
"""
def __init__(self, model: LLMInterface = DefaultLLM, use_few_shot: bool = True):
super().__init__(model)
self.use_few_shot = use_few_shot
def __str__(self):
return f"LLMBasedAnswerCorrectness(model={self.model}, use_few_shot={self.use_few_shot})"
def calculate(self, question, answer, ground_truths, **kwargs):
"""
Calculate the faithfulness score for the given datapoint.
"""
gt_answers = "\n".join(ground_truths)
if self.use_few_shot:
few_shot_prompt = """Example Response:
3.5
The answer is relevant to the question and similar to the ground truth answer but misses some information.
"""
else:
few_shot_prompt = ""
prompt = {
"system_prompt": (
"""
You are an expert evaluator system for a question answering system.
You need to evaluate the quality of the generated answer based on the question and reference ground truth answer.
Output a score and the reasoning for your score in a new line.
Use the following guidelines for evaluation:
* You should output a single score between 1 to 5.
* 1 means that the answer is completely irrelevant to the question.
* 2 means that the answer is relevant to the question but contains major errors.
* 3 means that the answer is relevant to the question and is partially correct.
* 4 means that the answer is relevant to the question and is correct.
* 5 means that the answer is relevant to the question and is correct and complete.
"""
+ few_shot_prompt
),
"user_prompt": (
"Question: " + question + "\nAnswer: " + answer + r"\Ground truth reference answer(s): " + gt_answers
),
}
response = self._llm.run(prompt)
score_txt, reasoning = response.split("\n", 1)
score = float(score_txt.split(":")[-1].strip())
return {
"LLM_based_answer_correctness": score,
"LLM_based_answer_correctness_reasoning": reasoning,
}
# Path: continuous_eval/metrics/generation_LLM_based_metrics.py
class LLMBasedFaithfulness(LLMBasedMetric):
"""
The LLM based faithfulness metric.
Measures whether the generated answer is faithful to the retrieved context.
"""
def __init__(
self,
model: LLMInterface = DefaultLLM,
use_few_shot: bool = True,
classify_by_statement: bool = False,
):
super().__init__(model)
self.use_few_shot = use_few_shot
self.classify_by_statement = classify_by_statement
def __str__(self):
return f"LLMBasedFaithfulness(model={self.model}, use_few_shot={self.use_few_shot}, classify_by_statement={self.classify_by_statement})"
def calculate(self, question, retrieved_contexts, answer, **kwargs):
"""
Calculate the faithfulness score for the given datapoint.
"""
if self.classify_by_statement:
# Context coverage uses the same prompt as faithfulness because it calculates how what proportion statements in the answer can be attributed to the context.
# The difference is that faithfulness uses the generated answer, while context coverage uses ground truth answer (to evaluate context).
context_coverage = LLMBasedContextCoverage(use_few_shot=self.use_few_shot)
results = context_coverage.calculate(question, retrieved_contexts, answer)
score = results["LLM_based_context_coverage"]
reasoning = results["LLM_based_context_statements"]
else:
context = "\n".join(retrieved_contexts)
if self.use_few_shot:
few_shot_prompt = """
Example 1:
Context: The Eiffel Tower, a wrought-iron lattice tower on the Champ de Mars in Paris, France, is one of the most famous landmarks in the world. It was designed by Gustave Eiffel and completed in 1889.
Statement: The Eiffel Tower can be found in the center of London, near the Thames River.
Response:
No
The statement contradicts with the context, which states that Eiffel Tower is in Paris, as opposed to the center of London.
Example 2:
Context: Photosynthesis is a process used by plants and other organisms to convert light energy into chemical energy that can later be released to fuel the organisms' activities. This chemical energy is stored in carbohydrate molecules, such as sugars, which are synthesized from carbon dioxide and water.
Statement: Photosynthesis in plants primarily involves the conversion of light energy into chemical energy stored in forms such as sugar.
Response:
Yes
The statement is supported by the context, which states that photosynthesis converts light energy into chemical energy and that the chemical energy is stored in carbohydrate molecules, such as sugars.
"""
else:
few_shot_prompt = ""
prompt = {
"system_prompt": (
"You are tasked to evaluate whether the statement is fully supported by the context. Respond with either Yes or No, followed by your reasoning in a new line.\n"
+ few_shot_prompt
),
"user_prompt": ("Context: " + context + r"\Statement: " + answer),
}
response = self._llm.run(prompt)
score_txt, reasoning = response.split("\n", 1)
score = bool("yes" in score_txt.lower())
return {
"LLM_based_faithfulness": score,
"LLM_based_faithfulness_reasoning": reasoning,
}
# Path: tests/helpers/example_datum.py
CAPITAL_OF_FRANCE = {
"question": "What is the capital of France?",
"retrieved_contexts": [
"Paris is the largest city in France.",
"Lyon is a major city in France.",
],
"ground_truth_contexts": ["Paris is the capital of France."],
"answer": "Paris",
"ground_truths": ["Paris"],
}
ROMEO_AND_JULIET = {
"question": "Who wrote Romeo and Juliet?",
"retrieved_contexts": [
"Shakespeare was a playwright.",
"Romeo and Juliet is a play by Shakespeare.",
],
"ground_truth_contexts": [
"Shakespeare was a playwright.",
"Romeo and Juliet is a play by William Shakespeare.",
],
"answer": "William Shakespeare",
"ground_truths": ["William Shakespeare"],
}
IMPLICATIONS_GLOBAL_WARMING = {
"question": "What are the implications of global warming?",
"retrieved_contexts": [
(
"Global warming refers to the long-term rise in the average temperature of the Earth's climate system. "
"It is a major aspect of climate change, and has been demonstrated by direct temperature measurements "
"and by measurements of various effects of the warming. The terms are commonly used interchangeably, "
"though global warming is more specifically about rising surface temperatures, while climate change includes "
"global warming as well as everything else that increasing greenhouse gas amounts will affect. "
"A 2016 report stated that the Arctic is warming at a rate double that of the global average. "
"The effects of global warming include rising sea levels, regional changes in precipitation, more frequent "
"extreme weather events such as heat waves, and expansion of deserts. Surface temperature increases are "
"greatest in the Arctic, which has contributed to the retreat of glaciers, permafrost, and sea ice. "
"Overall, higher temperatures bring more rain and snowfall, but for some regions, droughts and wildfires "
"increase instead. Climate change threatens to diminish the supply of fresh water. A warming atmosphere "
"can hold, and more frequently does hold, larger quantities of water vapor, which can lead to more intense "
"rainstorms, causing destructive erosion. Warming also creates conditions that can lead to more powerful "
"hurricanes. Rising temperatures also have the potential to change the nature of global rainfall, snow, "
"and river flows. Effects significant to humans include the threat to food security from decreasing crop "
"yields and the abandonment of populated areas due to rising sea levels. Because the climate system has "
"a large inertia and greenhouse gases will remain in the atmosphere for a long time, climatic changes and "
"their effects will continue for many centuries even if greenhouse gas emissions are stopped."
),
(
"Environmental impacts of climate change might include harsher hurricanes and storms, the death of reefs "
"and forests, more frequent and severe droughts, increased heat waves, and stronger, more intense wildfires. "
"Such changes will have significant implications for human societies and the natural world. The extent of these "
"effects will depend largely on the degree of future global warming and the strategies adopted for mitigation "
"and adaptation. Some effects of climate change, such as record high temperatures and melting glaciers, are "
"already being observed. The world community has taken some steps towards addressing climate change. The "
"2015 Paris Agreement, for instance, set the goal of limiting global warming to well below 2.0 degrees Celsius "
"relative to pre-industrial levels; and to limit the increase to 1.5 degrees Celsius, recognizing that this would "
"substantially reduce the risks and impacts of climate change. This agreement is meant to signal the beginning "
"of the end of over two centuries of predominance of fossil fuels. Some experts have called for a coordinated "
"economic transition to rapid decarbonization, climate finance and 'climate justice'. The overall conclusion of "
"the Intergovernmental Panel on Climate Change (IPCC), the peak scientific body on climate change, is that it "
"is 'extremely likely' that the majority of global warming since 1950 has been caused by human activities."
),
],
"ground_truth_contexts": [
(
"Climate change threatens to diminish the supply of fresh water. A warming atmosphere "
"can hold, and more frequently does hold, larger quantities of water vapor, which can lead to more intense "
"rainstorms, causing destructive erosion. To mitigate these impacts, "
"strategies such as reducing greenhouse gas emissions and enhancing sustainability practices are vital. "
"The Paris Agreement of 2015 marks a global effort to limit warming and reduce the risks associated with "
"climate change, aiming to transition away from fossil fuels towards cleaner, renewable sources of energy."
)
],
"answer": "Reducing greenhouse gas emissions, transitioning to renewable energy",
"ground_truths": [
"Reducing greenhouse gas emissions",
"Transitioning to renewable energy",
],
}
FARGO = {
"question": "Did Fargo win the golden globe nominations for both seasons?",
"retrieved_contexts": [
"Fargo is an American black comedy crime drama television series created and primarily written by Noah Hawley. The show is inspired by the 1996 film of the same name, which was written and directed by the Coen brothers, and takes place within the same fictional universe. The Coens were impressed by Hawley's script and agreed to be named as executive producers.[3] The series premiered on April 15, 2014, on FX,[3] and follows an anthology format, with each season set in a different era and location, with a different story and mostly new characters and cast, although there is minor overlap. Each season is heavily influenced by various Coen brothers films, with each containing numerous references to them.[4]",
"The first season, set primarily in Minnesota and North Dakota from January 2006 to February 2007 and starring Billy Bob Thornton, Allison Tolman, Colin Hanks, and Martin Freeman, received wide acclaim from critics.[5] It won the Primetime Emmy Awards for Outstanding Miniseries, Outstanding Directing, and Outstanding Casting, and received 15 additional nominations including Outstanding Writing, another Outstanding Directing nomination, and acting nominations for all four leads. It also won the Golden Globe Awards for Best Miniseries or Television Film and Best Actor – Miniseries or Television Film for Thornton.",
"The second season, set in Minnesota, North Dakota, and South Dakota in March 1979 and starring Kirsten Dunst, Patrick Wilson, Jesse Plemons, Jean Smart, Allison Tolman, and Ted Danson, received widespread critical acclaim.[6] It received three Golden Globe nominations, along with several Emmy nominations including Outstanding Miniseries, and acting nominations for Dunst, Plemons, Smart, and Bokeem Woodbine.",
],
"ground_truth_contexts": [
"The first season, set primarily in Minnesota and North Dakota from January 2006 to February 2007 and starring Billy Bob Thornton, Allison Tolman, Colin Hanks, and Martin Freeman, received wide acclaim from critics.[5] It won the Primetime Emmy Awards for Outstanding Miniseries, Outstanding Directing, and Outstanding Casting, and received 15 additional nominations including Outstanding Writing, another Outstanding Directing nomination, and acting nominations for all four leads. It also won the Golden Globe Awards for Best Miniseries or Television Film and Best Actor – Miniseries or Television Film for Thornton.",
"The second season, set in Minnesota, North Dakota, and South Dakota in March 1979 and starring Kirsten Dunst, Patrick Wilson, Jesse Plemons, Jean Smart, Allison Tolman, and Ted Danson, received widespread critical acclaim.[6] It received three Golden Globe nominations, along with several Emmy nominations including Outstanding Miniseries, and acting nominations for Dunst, Plemons, Smart, and Bokeem Woodbine.",
],
"answer": "Berlin",
"ground_truths": [
"Yes, they did get a nomination in season 1 and 2.",
"Not really, they didn't win for season three.",
],
}
# Path: tests/helpers/utils.py
def all_close(
datum_1: Dict[str, Union[Number, List[Number]]],
datum_2: Dict[str, Union[Number, List[Number]]],
rel_tol: float = 1e-8,
abs_tol: float = 1e-4,
):
if set(datum_1.keys()) != set(datum_2.keys()):
return False
for key, value1 in datum_1.items():
if isinstance(value1, list):
if not all(math.isclose(v1, v2, rel_tol=rel_tol, abs_tol=abs_tol) for v1, v2 in zip(value1, datum_2[key])):
return False
else:
if not math.isclose(value1, datum_2[key], rel_tol=rel_tol, abs_tol=abs_tol):
return False
return True
# Path: tests/generation_metrics_test.py
import pytest
from continuous_eval.metrics import (
DeterministicAnswerCorrectness,
DeterministicFaithfulness,
LLMBasedAnswerCorrectness,
LLMBasedFaithfulness,
)
from tests.helpers import example_datum
from tests.helpers.utils import all_close
def test_deterministic_answer_relevance():
data = [example_datum.ROMEO_AND_JULIET, example_datum.IMPLICATIONS_GLOBAL_WARMING]
expected_results = [
{
'rouge_l_recall': 1.0,
'rouge_l_precision': 1.0,
'rouge_l_f1': 0.999999995,
'token_overlap_recall': 1.0,
'token_overlap_precision': 1.0,
'token_overlap_f1': 1.0,
'bleu_score': 1.0
},
{
'rouge_l_recall': 0.75,
'rouge_l_precision': 0.375,
'rouge_l_f1': 0.49999999555555563,
| 'token_overlap_recall': 1.0, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Seunggu0305/VLCounter
# Path: tools/models/ViT_Encoder.py
class VPTCLIPVisionTransformer(nn.Module):
def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[6,7,8,11], pretrained=None, get_embeddings=True,
num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):
super().__init__()
self.pretrained = pretrained
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.spatial_size = input_resolution // patch_size
self.ln_pre = LayerNorm(width)
self.get_embeddings = get_embeddings
self.num_layers = layers
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
self.out_indices = out_indices
if get_embeddings:
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
## Setting of visual prompt tuning
self.num_tokens = num_tokens
self.prompt_dim = prompt_dim
self.total_d_layer = total_d_layer
## Add the prompt parameters # exclude_key=prompt:
self._init_prompt(patch_size, self.num_tokens, self.prompt_dim, self.total_d_layer)
self.embed_dim = width
self.num_heads = heads
self.patch_size = patch_size
def _init_prompt(self, patch, num_tokens, prompt_dim, total_d_layer):
patch_size = []
patch_size.append(patch)
patch_size.append(patch)
val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim)) # noqa
if total_d_layer >= 0:
self.prompt_embeddings = nn.Parameter(torch.zeros(1, num_tokens, prompt_dim))
# xavier_uniform initialization
nn.init.uniform_(self.prompt_embeddings.data, -val, val)
if total_d_layer > 0: # noqa
self.deep_prompt_embeddings = nn.Parameter(torch.zeros(total_d_layer, num_tokens, prompt_dim))
# xavier_uniform initialization
nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)
self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)
nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')
self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)
self.prompt_dropout = Dropout(0.1)
else: # total_d_layer < 0
self.deep_prompt_embeddings = nn.Parameter(torch.zeros(abs(total_d_layer), num_tokens, prompt_dim))
nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)
self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)
nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')
self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)
self.prompt_dropout = Dropout(0.1)
def init_weights(self, pretrained=None):
pretrained = pretrained or self.pretrained
if isinstance(pretrained, str):
checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()
state_dict = {}
for k in checkpoint.keys():
if k.startswith('visual.'):
new_k = k.replace('visual.', '')
state_dict[new_k] = checkpoint[k]
if 'positional_embedding' in state_dict.keys():
if self.positional_embedding.shape != state_dict['positional_embedding'].shape:
# (1025, 768) (197, 768)
print(f'Resize the pos_embed shape from {state_dict["positional_embedding"].shape} to {self.positional_embedding.shape}')
cls_pos = state_dict["positional_embedding"][0:1, :]
spatial_pos = F.interpolate(state_dict["positional_embedding"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')
spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)
positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)
state_dict['positional_embedding'] = positional_embedding
assert self.positional_embedding.shape == state_dict['positional_embedding'].shape
u, w = self.load_state_dict(state_dict, False)
print(u, w, 'are misaligned params in vision transformer')
self.attn = None
if self.attn == None:
for i in range(1,2): # surgery 7, maskclip 2
self.attn = Attention(self.embed_dim, self.embed_dim, self.num_heads, True)
self.attn.qkv.weight.data = self.transformer.resblocks[-i].attn.in_proj_weight.clone()
self.attn.qkv.bias.data = self.transformer.resblocks[-i].attn.in_proj_bias.clone()
self.attn.proj.weight.data = self.transformer.resblocks[-i].attn.out_proj.weight.clone()
self.attn.proj.bias.data = self.transformer.resblocks[-i].attn.out_proj.bias.clone()
self.transformer.resblocks[-i].attn = self.attn
def forward(self, x: torch.Tensor):
x = self.conv1(x)
B, C, H, W = x.shape
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)
pos = self.positional_embedding.to(x.dtype)
cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)
spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')
spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)
pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)
x = x + pos
x = self.ln_pre(x)
if self.total_d_layer >=0:
# concat prompt
x = torch.cat((
x[:, :1, :],
self.prompt_dropout(self.prompt_proj(self.prompt_embeddings).expand(B, -1, -1)),
x[:, 1:, :]
), dim=1)
x = x.permute(1, 0, 2)
features = []
outs = []
x, features = self.forward_deep_prompt(x, features, H, W)
if self.get_embeddings:
x = x.permute(1, 0, 2)
x = self.ln_post(x)
x = x @ self.proj
global_embedding = x[:, 0]
visual_embedding = x[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2)
visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)
features.append(visual_embedding)
outs.append(tuple(features))
global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)
outs.append(global_embedding)
return outs[0]
def forward_deep_prompt(self, embedding_output, features, H, W, out_last=False):
B = embedding_output.shape[1]
for i in range(self.num_layers):
if i == 0:
hidden_states = self.transformer.resblocks[i](embedding_output)
elif i <= self.deep_prompt_embeddings.shape[0]:
deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i-1]).expand(B, -1, -1)).permute(1, 0, 2)
hidden_states = torch.cat((
hidden_states[:1, :, :],
deep_prompt_emb,
hidden_states[(1+self.num_tokens):, :, :]
), dim=0)
hidden_states = self.transformer.resblocks[i](hidden_states)
if len(self.out_indices) > 1:
if i in self.out_indices[:-1]:
xp = hidden_states.permute(1, 0, 2)[:, -(H*W):, :].permute(0, 2, 1).reshape(B, -1, H, W)
features.append(xp.contiguous() / xp.norm(dim=1,keepdim=True))
encoded = self.prompt_norm(hidden_states)
return encoded, features
# Path: tools/models/ViT_Encoder_add.py
class SPTCLIPVisionTransformer(nn.Module):
def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True,
num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):
super().__init__()
self.pretrained = pretrained
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.spatial_size = input_resolution // patch_size
self.ln_pre = LayerNorm(width)
self.get_embeddings = get_embeddings
self.num_layers = layers
self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)
self.out_indices = out_indices
if get_embeddings:
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
self.text_proj = nn.Linear(512, width)
nn.init.kaiming_normal_(self.text_proj.weight, a=0, mode='fan_out')
self.text_dropout = nn.Dropout(0.1)
## Setting of visual prompt tuning
self.num_tokens = num_tokens
self.prompt_dim = prompt_dim
self.total_d_layer = total_d_layer
## Add the prompt parameters # exclude_key=prompt:
self._init_prompt(patch_size, self.num_tokens, self.prompt_dim, self.total_d_layer)
self.embed_dim = width
self.num_heads = heads
self.patch_size = patch_size
def _init_prompt(self, patch, num_tokens, prompt_dim, total_d_layer):
patch_size = []
patch_size.append(patch)
patch_size.append(patch)
val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim)) # noqa
if total_d_layer >= 0:
self.prompt_embeddings = nn.Parameter(torch.zeros(1, num_tokens, prompt_dim))
# xavier_uniform initialization
nn.init.uniform_(self.prompt_embeddings.data, -val, val)
if total_d_layer > 0: # noqa
self.deep_prompt_embeddings = nn.Parameter(torch.zeros(total_d_layer, num_tokens, prompt_dim))
# xavier_uniform initialization
nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)
self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)
nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')
self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)
self.prompt_dropout = Dropout(0.1)
else: # total_d_layer < 0
self.deep_prompt_embeddings = nn.Parameter(torch.zeros(abs(total_d_layer), num_tokens, prompt_dim))
nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)
self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)
nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')
self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)
self.prompt_dropout = Dropout(0.1)
def init_weights(self, pretrained=None):
pretrained = pretrained or self.pretrained
if isinstance(pretrained, str):
checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()
# checkpoint = torch.load(pretrained)['model']
state_dict = {}
for k in checkpoint.keys():
if k.startswith('visual.'):
new_k = k.replace('visual.', '')
# if k.startswith('module.visual.'):
# new_k = k.replace('module.visual.', '')
state_dict[new_k] = checkpoint[k].float()
if 'positional_embedding' in state_dict.keys():
if self.positional_embedding.shape != state_dict['positional_embedding'].shape:
# (1025, 768) (197, 768)
print(f'Resize the pos_embed shape from {state_dict["positional_embedding"].shape} to {self.positional_embedding.shape}')
cls_pos = state_dict["positional_embedding"][0:1, :]
if self.patch_size == 16:
spatial_pos = F.interpolate(state_dict["positional_embedding"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')
elif self.patch_size == 32:
spatial_pos = F.interpolate(state_dict["positional_embedding"][1:,].reshape(1, 7, 7, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')
else:
assert AttributeError('Patch Size should be 16 or 32')
spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)
positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)
state_dict['positional_embedding'] = positional_embedding
assert self.positional_embedding.shape == state_dict['positional_embedding'].shape
# del state_dict['conv1.weight']
u, w = self.load_state_dict(state_dict, False)
print(u, w, 'are misaligned params in vision transformer')
self.attn = None
if self.attn == None:
for i in range(1,2): # surgery 7, maskclip 2
self.attn = Attention(self.embed_dim, self.embed_dim, self.num_heads, True)
self.attn.qkv.weight.data = self.transformer.resblocks[-i].attn.in_proj_weight.clone()
self.attn.qkv.bias.data = self.transformer.resblocks[-i].attn.in_proj_bias.clone()
self.attn.proj.weight.data = self.transformer.resblocks[-i].attn.out_proj.weight.clone()
self.attn.proj.bias.data = self.transformer.resblocks[-i].attn.out_proj.bias.clone()
self.transformer.resblocks[-i].attn = self.attn
def forward(self, x: torch.Tensor, _t: torch.Tensor):
x = self.conv1(x)
B, C, H, W = x.shape
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)
pos = self.positional_embedding.to(x.dtype)
cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)
spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')
spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)
pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)
x = x + pos
x = self.ln_pre(x)
if self.total_d_layer >=0:
# concat prompt
x = torch.cat((
x[:, :1, :],
self.prompt_dropout(self.prompt_proj(self.prompt_embeddings).expand(B, -1, -1)) + self.text_dropout(self.text_proj(_t).expand(-1, self.num_tokens, -1)),
x[:, 1:, :]
), dim=1)
x = x.permute(1, 0, 2)
features = []
outs = []
x, features = self.forward_deep_prompt(x, features, H, W, _t)
if self.get_embeddings:
x = x.permute(1, 0, 2)
x = self.ln_post(x)
x = x @ self.proj
global_embedding = x[:, 0]
visual_embedding = x[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2)
visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)
features.append(visual_embedding)
outs.append(tuple(features))
global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)
outs.append(global_embedding)
return outs[0]
def forward_deep_prompt(self, embedding_output, features, H, W, _t, out_last=False):
B = embedding_output.shape[1]
for i in range(self.num_layers):
if i == 0:
hidden_states = self.transformer.resblocks[i](embedding_output)
elif i <= self.deep_prompt_embeddings.shape[0]:
deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i-1]).expand(B, -1, -1)).permute(1, 0, 2)
deep_text = self.text_dropout(self.text_proj(_t).expand(-1,self.num_tokens,-1)).permute(1,0,2)
hidden_states = torch.cat((
hidden_states[:1, :, :],
deep_prompt_emb + deep_text,
hidden_states[(1+self.num_tokens):, :, :]
), dim=0)
hidden_states = self.transformer.resblocks[i](hidden_states)
else:
hidden_states = self.transformer.resblocks[i](hidden_states)
if len(self.out_indices) > 1:
if i in self.out_indices[:-1]:
xp = hidden_states.permute(1, 0, 2)[:, -(H*W):, :].permute(0, 2, 1).reshape(B, -1, H, W)
features.append(xp.contiguous() / xp.norm(dim=1,keepdim=True))
encoded = self.prompt_norm(hidden_states)
return encoded, features
# Path: tools/models/Text_Encoder.py
class CLIPTextEncoder(nn.Module):
def __init__(self, context_length=77,
vocab_size=49408,
# vocab_size=49408+1,
transformer_width=512,
transformer_heads=8,
transformer_layers=12,
embed_dim=512,
out_dim=256,
pretrained=None, **kwargs):
super().__init__()
self.pretrained = pretrained
self.context_length = context_length
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
# self.text_projection = nn.Linear(transformer_width, embed_dim)
def init_weights(self, pretrained=None):
pretrained = pretrained or self.pretrained
if isinstance(pretrained, str):
checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()
# checkpoint = torch.load(pretrained)['model']
state_dict = {}
for k in checkpoint.keys():
if k.startswith('transformer.'):
# if k.startswith('module.encode_text.transformer.'):
# new_k = k.replace('module.encode_text.', '')
# state_dict[new_k] = checkpoint[k].float()
state_dict[k] = checkpoint[k].float()
if k == 'positional_embedding' or k == 'text_projection' or k.startswith('token_embedding') or k.startswith('ln_final'):
# if k == 'module.encode_text.positional_embedding' or k.startswith('module.encode_text.text_projection') or k.startswith('module.encode_text.token_embedding') or k.startswith('module.encode_text.ln_final'):
# new_k = k.replace('module.encode_text.', '')
# if new_k == 'positional_embedding' and checkpoint[k].size(0) > self.context_length:
if k == 'positional_embedding' and checkpoint[k].size(0) > self.context_length:
checkpoint[k] = checkpoint[k][:self.context_length]
print('positional_embedding is tuncated from 77 to', self.context_length)
# state_dict[new_k] = checkpoint[k]
state_dict[k] = checkpoint[k]
u, w = self.load_state_dict(state_dict, False)
if u != [] or w != [] :
print(u, w, 'are misaligned params in text encoder')
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, text):
x = self.token_embedding(text)
x = x + self.positional_embedding
x = x.permute(1, 0, 2)
x = self.transformer(x)
x = x.permute(1, 0, 2)
x = self.ln_final(x)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
# x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])
return x
# Path: tools/models/VLCounter.py
import math
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .ViT_Encoder import VPTCLIPVisionTransformer as vpt
from .ViT_Encoder_add import SPTCLIPVisionTransformer as spt
from .Text_Encoder import CLIPTextEncoder
from timm.models.layers import trunc_normal_
def trunc_normal_init(module: nn.Module,
mean: float = 0,
std: float = 1,
| a: float = -2, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Chris10M/Ev2Hands
# Path: src/Ev2Hands/model/pointnet2_utils.py
class PointNetSetAbstractionMsg(nn.Module):
def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):
super(PointNetSetAbstractionMsg, self).__init__()
self.npoint = npoint
self.radius_list = radius_list
self.nsample_list = nsample_list
self.conv_blocks = nn.ModuleList()
self.bn_blocks = nn.ModuleList()
for i in range(len(mlp_list)):
convs = nn.ModuleList()
bns = nn.ModuleList()
last_channel = in_channel + 3
for out_channel in mlp_list[i]:
convs.append(nn.Conv2d(last_channel, out_channel, 1))
bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.conv_blocks.append(convs)
self.bn_blocks.append(bns)
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1).contiguous()
if points is not None:
points = points.permute(0, 2, 1).contiguous()
B, N, C = xyz.shape
S = self.npoint
new_xyz = index_points(xyz, farthest_point_sample(xyz, S))
new_points_list = []
for i, radius in enumerate(self.radius_list):
K = self.nsample_list[i]
group_idx = query_ball_point(radius, K, xyz, new_xyz)
grouped_xyz = index_points(xyz, group_idx)
grouped_xyz -= new_xyz.view(B, S, 1, C)
if points is not None:
grouped_points = index_points(points, group_idx)
grouped_points = torch.cat([grouped_points, grouped_xyz], dim=-1)
else:
grouped_points = grouped_xyz
grouped_points = grouped_points.permute(0, 3, 2, 1).contiguous() # [B, D, K, S]
for j in range(len(self.conv_blocks[i])):
conv = self.conv_blocks[i][j]
bn = self.bn_blocks[i][j]
grouped_points = F.relu(bn(conv(grouped_points)))
new_points = torch.max(grouped_points, 2)[0] # [B, D', S]
new_points_list.append(new_points)
new_xyz = new_xyz.permute(0, 2, 1).contiguous()
new_points_concat = torch.cat(new_points_list, dim=1)
return new_xyz, new_points_concat
# Path: src/Ev2Hands/model/pointnet2_utils.py
class PointNetSetAbstraction(nn.Module):
def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):
super(PointNetSetAbstraction, self).__init__()
self.npoint = npoint
self.radius = radius
self.nsample = nsample
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm2d(out_channel))
last_channel = out_channel
self.group_all = group_all
def forward(self, xyz, points):
"""
Input:
xyz: input points position data, [B, C, N]
points: input points data, [B, D, N]
Return:
new_xyz: sampled points position data, [B, C, S]
new_points_concat: sample points feature data, [B, D', S]
"""
xyz = xyz.permute(0, 2, 1).contiguous()
if points is not None:
points = points.permute(0, 2, 1).contiguous()
if self.group_all:
new_xyz, new_points = sample_and_group_all(xyz, points)
else:
new_xyz, new_points = sample_and_group(self.npoint, self.radius, self.nsample, xyz, points)
# new_xyz: sampled points position data, [B, npoint, C]
# new_points: sampled points data, [B, npoint, nsample, C+D]
new_points = new_points.permute(0, 3, 2, 1).contiguous() # [B, C+D, nsample,npoint]
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
new_points = torch.max(new_points, 2)[0]
new_xyz = new_xyz.permute(0, 2, 1).contiguous()
return new_xyz, new_points
# Path: src/Ev2Hands/model/pointnet2_utils.py
class PointNetFeaturePropagation(nn.Module):
def __init__(self, in_channel, mlp):
super(PointNetFeaturePropagation, self).__init__()
self.mlp_convs = nn.ModuleList()
self.mlp_bns = nn.ModuleList()
last_channel = in_channel
for out_channel in mlp:
self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))
self.mlp_bns.append(nn.BatchNorm1d(out_channel))
last_channel = out_channel
def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1).contiguous()
xyz2 = xyz2.permute(0, 2, 1).contiguous()
points2 = points2.permute(0, 2, 1).contiguous()
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
if points1 is not None:
points1 = points1.permute(0, 2, 1).contiguous()
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1).contiguous()
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)))
return new_points
# Path: src/Ev2Hands/model/TEHNet.py
import numpy as np
import torch.nn as nn
import torch
import os
import torch.nn.functional as F
from .pointnet2_utils import PointNetSetAbstractionMsg, PointNetSetAbstraction, PointNetFeaturePropagation
previous_rot_trans_params = torch.zeros(6).unsqueeze(0).expand(batch_size, -1).to(device)
mano_params = self.mano_regressor(l2_points)
global_orient = mano_params[:, :3]
hand_pose = mano_params[:, 3:3+self.n_pose_params]
betas = mano_params[:, 3+self.n_pose_params:-3]
transl = mano_params[:, -3:]
device = mano_hand.shapedirs.device
mano_args = {
'global_orient': global_orient.to(device),
'hand_pose' : hand_pose.to(device),
'betas' : betas.to(device),
'transl' : transl.to(device),
}
mano_outs = dict()
output = mano_hand(**mano_args)
mano_outs['vertices'] = output.vertices
mano_outs['j3d'] = output.joints
mano_outs.update(mano_args)
if not self.training:
mano_outs['faces'] = np.tile(mano_hand.faces, (batch_size, 1, 1))
return mano_outs
class TEHNet(nn.Module):
def __init__(self, n_pose_params, num_classes=4):
super(TEHNet, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = 1 + int(os.getenv('ERPC', 0))
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)
self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256])
self.fp2 = PointNetFeaturePropagation(in_channel=576, mlp=[256, 128])
self.fp1 = PointNetFeaturePropagation(128, [128, 128, 256])
self.classifier = nn.Sequential(
nn.Conv1d(256, 256, 1),
nn.ReLU(),
nn.BatchNorm1d(256),
nn.Dropout(0.3),
nn.Conv1d(256, num_classes, 1)
)
self.attention_block = AttentionBlock()
self.left_mano_regressor = MANORegressor(n_pose_params=n_pose_params)
self.right_mano_regressor = MANORegressor(n_pose_params=n_pose_params)
self.mhlnes = int(os.getenv('MHLNES', 0))
self.left_query_conv = nn.Sequential(
nn.Conv1d(256, 256, 3, 1, 3//2),
nn.ReLU(),
nn.BatchNorm1d(256),
nn.Dropout(0.1),
nn.Conv1d(256, 256, 3, 1, 3//2),
nn.BatchNorm1d(256),
)
self.right_query_conv = nn.Sequential(
nn.Conv1d(256, 256, 3, 1, 3//2),
nn.ReLU(),
nn.BatchNorm1d(256),
nn.Dropout(0.1),
nn.Conv1d(256, 256, 3, 1, 3//2),
nn.BatchNorm1d(256),
)
def forward(self, xyz, mano_hands):
device = xyz.device
# Set Abstraction layers
l0_points = xyz
l0_xyz = xyz[:, :3, :]
if self.mhlnes:
l0_xyz[:, -1, :] = xyz[:, 3:, :].mean(1)
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l3_xyz, l3_points = self.sa3(l2_xyz, l2_points)
# Feature Propagation layers
l2_points = self.fp3(l2_xyz, l3_xyz, l2_points, l3_points)
l1_points = self.fp2(l1_xyz, l2_xyz, l1_points, l2_points)
l0_points = self.fp1(l0_xyz, l1_xyz, None, l1_points)
seg_out = self.classifier(l0_points)
feat_fuse = l0_points
left_hand_features = self.attention_block(seg_out, feat_fuse, self.left_query_conv(feat_fuse))
right_hand_features = self.attention_block(seg_out, feat_fuse, self.right_query_conv(feat_fuse))
left = self.left_mano_regressor(l0_xyz, left_hand_features, mano_hands['left'])
right = self.right_mano_regressor(l0_xyz, right_hand_features, mano_hands['right'])
return {'class_logits': seg_out, 'left': left, 'right': right}
def main():
net = TEHNet(n_pose_params=6)
| points = torch.rand(4, 4, 128) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: abing7k/redroid-script
# Path: stuffs/gapps.py
class Gapps(General):
dl_links = {
"x86_64": ["https://cfhcable.dl.sourceforge.net/project/opengapps/x86_64/20220503/open_gapps-x86_64-10.0-pico-20220503.zip", "5fb186bfb7bed8925290f79247bec4cf"],
"x86": ["https://cfhcable.dl.sourceforge.net/project/opengapps/x86/20220503/open_gapps-x86-10.0-pico-20220503.zip", "7fc75ec9bdca8def07bad306345ce877"],
"arm64-v8a": ["https://cfhcable.dl.sourceforge.net/project/opengapps/arm64/20220503/open_gapps-arm64-10.0-pico-20220503.zip", "2feaf25d03530892c6146687ffa08bc2"],
"armeabi-v7a": ["https://cfhcable.dl.sourceforge.net/project/opengapps/arm/20220215/open_gapps-arm-10.0-pico-20220215.zip", "1d00ffa4594734d477b10f2e0ee19c0b"]
}
arch = host()
print("arch: "+str(arch))
download_loc = get_download_dir()
dl_link = dl_links[arch[0]][0]
dl_file_name = os.path.join(download_loc, "open_gapps.zip")
act_md5 = dl_links[arch[0]][1]
copy_dir = "./gapps"
extract_to = "/tmp/ogapps/extract"
non_apks = [
"vending-common.tar.lz",
"defaultetc-common.tar.lz",
"defaultframework-common.tar.lz",
"googlepixelconfig-common.tar.lz"
]
if arch == ('arm64-v8a', 64):
skip_1 = 'setupwizarddefault-x86_64.tar.lz'
skip_2 = "setupwizardtablet-x86_64.tar.lz"
if arch == ('x86_64', 64):
skip_1 = 'setupwizarddefault-arm64.tar.lz'
skip_2 = "setupwizardtablet-arm64.tar.lz"
skip = [
skip_1,
skip_2
]
def download(self):
print_color("Downloading OpenGapps now .....", bcolors.GREEN)
super().download()
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
if not os.path.exists(self.extract_to):
os.makedirs(self.extract_to)
if not os.path.exists(os.path.join(self.extract_to, "appunpack")):
os.makedirs(os.path.join(self.extract_to, "appunpack"))
for lz_file in os.listdir(os.path.join(self.extract_to, "Core")):
for d in os.listdir(os.path.join(self.extract_to, "appunpack")):
shutil.rmtree(os.path.join(self.extract_to, "appunpack", d))
if lz_file not in self.skip:
if lz_file not in self.non_apks:
print(" Processing app package : "+os.path.join(self.extract_to, "Core", lz_file))
run(["tar", "--lzip", "-xvf", os.path.join(self.extract_to, "Core", lz_file), "-C", os.path.join(self.extract_to, "appunpack")])
app_name = os.listdir(os.path.join(self.extract_to, "appunpack"))[0]
xx_dpi = os.listdir(os.path.join(self.extract_to, "appunpack", app_name))[0]
app_priv = os.listdir(os.path.join(self.extract_to, "appunpack", app_name, "nodpi"))[0]
app_src_dir = os.path.join(self.extract_to, "appunpack", app_name, xx_dpi, app_priv)
for app in os.listdir(app_src_dir):
shutil.copytree(os.path.join(app_src_dir, app), os.path.join(self.copy_dir, "system", "priv-app", app), dirs_exist_ok=True)
else:
print(" Processing extra package : "+os.path.join(self.extract_to, "Core", lz_file))
run(["tar", "--lzip", "-xvf", os.path.join(self.extract_to, "Core", lz_file), "-C", os.path.join(self.extract_to, "appunpack")])
app_name = os.listdir(os.path.join(self.extract_to, "appunpack"))[0]
common_content_dirs = os.listdir(os.path.join(self.extract_to, "appunpack", app_name, "common"))
for ccdir in common_content_dirs:
shutil.copytree(os.path.join(self.extract_to, "appunpack", app_name, "common", ccdir), os.path.join(self.copy_dir, "system", ccdir), dirs_exist_ok=True)
# Path: stuffs/magisk.py
class Magisk(General):
download_loc = get_download_dir()
dl_link = "https://mgb1.androidfilehost.com/dl/_E1ugpo3KLudP2K-WauRfQ/1702724403/10620683726822077179/Magisk+Delta+25206+canary+%284dbd8358%29.apk"
dl_file_name = os.path.join(download_loc, "magisk.apk")
extract_to = "/tmp/magisk_unpack"
copy_dir = "./magisk"
magisk_dir = os.path.join(copy_dir, "system", "etc", "init", "magisk")
machine = host()
oringinal_bootanim = """
service bootanim /system/bin/bootanimation
class core animation
user graphics
group graphics audio
disabled
oneshot
ioprio rt 0
task_profiles MaxPerformance
"""
bootanim_component = """
on post-fs-data
start logd
exec u:r:su:s0 root root -- /system/etc/init/magisk/magisk{arch} --auto-selinux --setup-sbin /system/etc/init/magisk
exec u:r:su:s0 root root -- /system/etc/init/magisk/magiskpolicy --live --magisk "allow * magisk_file lnk_file *"
mkdir /sbin/.magisk 700
mkdir /sbin/.magisk/mirror 700
mkdir /sbin/.magisk/block 700
copy /system/etc/init/magisk/config /sbin/.magisk/config
rm /dev/.magisk_unblock
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --post-fs-data
wait /dev/.magisk_unblock 40
rm /dev/.magisk_unblock
on zygote-start
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --service
on property:sys.boot_completed=1
mkdir /data/adb/magisk 755
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --boot-complete
exec -- /system/bin/sh -c "if [ ! -e /data/data/io.github.huskydg.magisk ] ; then pm install /system/etc/init/magisk/magisk.apk ; fi"
on property:init.svc.zygote=restarting
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
on property:init.svc.zygote=stopped
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
""".format(arch=machine[1])
def download(self):
if os.path.isfile(self.dl_file_name):
os.remove(self.dl_file_name)
print_color("Downloading latest Magisk-Delta now .....", bcolors.GREEN)
download_file(self.dl_link, self.dl_file_name)
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
if not os.path.exists(self.magisk_dir):
os.makedirs(self.magisk_dir, exist_ok=True)
if not os.path.exists(os.path.join(self.copy_dir, "sbin")):
os.makedirs(os.path.join(self.copy_dir, "sbin"), exist_ok=True)
print_color("Copying magisk libs now ...", bcolors.GREEN)
lib_dir = os.path.join(self.extract_to, "lib", self.machine[0])
for parent, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
o_path = os.path.join(lib_dir, filename)
filename = re.search('lib(.*)\.so', filename)
n_path = os.path.join(self.magisk_dir, filename.group(1))
shutil.copyfile(o_path, n_path)
run(["chmod", "+x", n_path])
shutil.copyfile(self.dl_file_name, os.path.join(self.magisk_dir,"magisk.apk") )
# Updating Magisk from Magisk manager will modify bootanim.rc,
# So it is necessary to backup the original bootanim.rc.
bootanim_path = os.path.join(self.copy_dir, "system", "etc", "init", "bootanim.rc")
gz_filename = os.path.join(bootanim_path)+".gz"
with gzip.open(gz_filename,'wb') as f_gz:
f_gz.write(self.oringinal_bootanim.encode('utf-8'))
with open(bootanim_path, "w") as initfile:
initfile.write(self.oringinal_bootanim+self.bootanim_component)
os.chmod(bootanim_path, 0o644)
# Path: stuffs/ndk.py
class Ndk(General):
download_loc = get_download_dir()
copy_dir = "./ndk"
dl_link = "https://github.com/supremegamers/vendor_google_proprietary_ndk_translation-prebuilt/archive/181d9290a69309511185c4417ba3d890b3caaaa8.zip"
dl_file_name = os.path.join(download_loc, "libndktranslation.zip")
extract_to = "/tmp/libndkunpack"
act_md5 = "0beff55f312492f24d539569d84f5bfb"
# init_rc_component = """
# # Enable native bridge for target executables
# on early-init
# mount binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc
# on property:ro.enable.native.bridge.exec=1
# copy /system/etc/binfmt_misc/arm_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm_dyn /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_exe /proc/sys/fs/binfmt_misc/register
# copy /system/etc/binfmt_misc/arm64_dyn /proc/sys/fs/binfmt_misc/register
# """
def download(self):
print_color("Downloading libndk now .....", bcolors.GREEN)
super().download()
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
run(["chmod", "+x", self.extract_to, "-R"])
print_color("Copying libndk library files ...", bcolors.GREEN)
shutil.copytree(os.path.join(self.extract_to, "vendor_google_proprietary_ndk_translation-prebuilt-181d9290a69309511185c4417ba3d890b3caaaa8", "prebuilts"), os.path.join(self.copy_dir, "system"), dirs_exist_ok=True)
init_path = os.path.join(self.copy_dir, "system", "etc", "init", "ndk_translation.rc")
os.chmod(init_path, 0o644)
# if not os.path.isfile(init_path):
# os.makedirs(os.path.dirname(init_path), exist_ok=True)
# with open(init_path, "w") as initfile:
# initfile.write(self.init_rc_component)
# Path: stuffs/widevine.py
class Widevine(General):
def __init__(self, android_version) -> None:
super().__init__()
self.android_version = android_version
self.dl_link = self.dl_links[self.machine[0]][android_version][0]
self.act_md5 = self.dl_links[self.machine[0]][android_version][1]
download_loc = get_download_dir()
machine = host()
copy_dir = "./widevine"
dl_links = {
# "x86": {
# "11.0.0": ["https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/48d1076a570837be6cdce8252d5d143363e37cc1.zip",
# "f587b8859f9071da4bca6cea1b9bed6a"]
# },
"x86_64": {
"11.0.0": ["https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/48d1076a570837be6cdce8252d5d143363e37cc1.zip",
"f587b8859f9071da4bca6cea1b9bed6a"],
"12.0.0": ["https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/3bba8b6e9dd5ffad5b861310433f7e397e9366e8.zip",
"3e147bdeeb7691db4513d93cfa6beb23"],
"13.0.0": ["https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/a8524d608431573ef1c9313822d271f78728f9a6.zip",
"5c55df61da5c012b4e43746547ab730f"]
},
# "armeabi-v7a":
# {
# "11.0.0": ["https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/7b6e37ef0b63408f7d0232e67192020ba0aa402b.zip",
# "3c3a136dc926ae5fc07826359720dbab"]
# },
"arm64-v8a": {
"11.0.0": ["https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/a1a19361d36311bee042da8cf4ced798d2c76d98.zip",
"fed6898b5cfd2a908cb134df97802554"]
}
}
dl_file_name = os.path.join(download_loc, "widevine.zip")
extract_to = "/tmp/widevineunpack"
def download(self):
print_color("Downloading widevine now .....", bcolors.GREEN)
super().download()
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
run(["chmod", "+x", self.extract_to, "-R"])
print_color("Copying widevine library files ...", bcolors.GREEN)
name = re.findall("([a-zA-Z0-9]+)\.zip", self.dl_link)[0]
shutil.copytree(os.path.join(self.extract_to, "vendor_google_proprietary_widevine-prebuilt-"+name,
"prebuilts"), os.path.join(self.copy_dir, "vendor"), dirs_exist_ok=True)
if "x86" in self.machine[0] and self.android_version == "11.0.0":
os.symlink("./libprotobuf-cpp-lite-3.9.1.so",
os.path.join(self.copy_dir, "vendor", "lib", "libprotobuf-cpp-lite.so"))
os.symlink("./libprotobuf-cpp-lite-3.9.1.so", os.path.join(self.copy_dir,
"vendor", "lib64", "libprotobuf-cpp-lite.so"))
for file in os.listdir(os.path.join(self.copy_dir, "vendor", "etc", "init")):
if file.endswith('.rc'):
os.chmod(os.path.join(self.copy_dir, "vendor", "etc", "init", file), 0o644)
# Path: redroid.py
import argparse
import tools.helper as helper
import subprocess
from stuffs.gapps import Gapps
from stuffs.magisk import Magisk
from stuffs.ndk import Ndk
from stuffs.widevine import Widevine
#!/usr/bin/env python3
def main():
dockerfile = ""
tags = []
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--android-version',
dest='android',
help='Specify the Android version to build',
default='11.0.0',
choices=['13.0.0', '12.0.0', '12.0.0_64only', '11.0.0', '10.0.0', '9.0.0', '8.1.0'])
| parser.add_argument('-g', '--install-gapps', |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zvict/papr
# Path: models/utils.py
def normalize_vector(x, eps=0.):
# assert(x.shape[-1] == 3)
return x / (torch.norm(x, dim=-1, keepdim=True) + eps)
# Path: models/utils.py
def create_learning_rate_fn(optimizer, max_steps, args, debug=False):
"""Create learning rate schedule."""
if args.type == "none":
return None
if args.warmup > 0:
warmup_start_factor = 1e-16
else:
warmup_start_factor = 1.0
warmup_fn = lr_scheduler.LinearLR(optimizer,
start_factor=warmup_start_factor,
end_factor=1.0,
total_iters=args.warmup,
verbose=debug)
if args.type == "linear":
decay_fn = lr_scheduler.LinearLR(optimizer,
start_factor=1.0,
end_factor=0.,
total_iters=max_steps - args.warmup,
verbose=debug)
schedulers = [warmup_fn, decay_fn]
milestones = [args.warmup]
elif args.type == "cosine":
cosine_steps = max(max_steps - args.warmup, 1)
decay_fn = lr_scheduler.CosineAnnealingLR(optimizer,
T_max=cosine_steps,
verbose=debug)
schedulers = [warmup_fn, decay_fn]
milestones = [args.warmup]
elif args.type == "cosine-hlfperiod":
cosine_steps = max(max_steps - args.warmup, 1) * 2
decay_fn = lr_scheduler.CosineAnnealingLR(optimizer,
T_max=cosine_steps,
verbose=debug)
schedulers = [warmup_fn, decay_fn]
milestones = [args.warmup]
elif args.type == "exp":
decay_fn = lr_scheduler.ExponentialLR(optimizer,
gamma=args.gamma,
verbose=debug)
schedulers = [warmup_fn, decay_fn]
milestones = [args.warmup]
elif args.type == "stop":
decay_fn = lr_scheduler.StepLR(
optimizer, step_size=1, gamma=0.0, verbose=debug)
schedulers = [warmup_fn, decay_fn]
milestones = [args.warmup]
else:
raise NotImplementedError
schedule_fn = lr_scheduler.SequentialLR(optimizer,
schedulers=schedulers,
milestones=milestones,
verbose=debug)
return schedule_fn
# Path: models/utils.py
def add_points_knn(coords, influ_scores, add_num, k, comb_type="mean", sample_type="random", sample_k=10, point_features=None):
"""
Add points to the point cloud by kNN
"""
pc = KDTree(coords)
N = coords.shape[0]
# Step 1: Determine where to add points
if N <= add_num and "random" in comb_type:
inds = np.random.choice(N, add_num, replace=True)
query_coords = coords[inds, :]
elif N <= add_num:
query_coords = coords
inds = list(range(N))
else:
if sample_type == "random":
inds = np.random.choice(N, add_num, replace=False)
query_coords = coords[inds, :]
elif sample_type == "top-knn-std":
assert k >= 2
nns_dists, nns_inds = pc.query(coords, k=sample_k)
inds = np.argsort(nns_dists.std(axis=-1))[-add_num:]
query_coords = coords[inds, :]
elif sample_type == "top-knn-mean":
assert k >= 2
nns_dists, nns_inds = pc.query(coords, k=sample_k)
inds = np.argsort(nns_dists.mean(axis=-1))[-add_num:]
query_coords = coords[inds, :]
elif sample_type == "top-knn-max":
assert k >= 2
nns_dists, nns_inds = pc.query(coords, k=sample_k)
inds = np.argsort(nns_dists.max(axis=-1))[-add_num:]
query_coords = coords[inds, :]
elif sample_type == "top-knn-min":
assert k >= 2
nns_dists, nns_inds = pc.query(coords, k=sample_k)
inds = np.argsort(nns_dists.min(axis=-1))[-add_num:]
query_coords = coords[inds, :]
elif sample_type == "influ-scores-max":
inds = np.argsort(influ_scores.squeeze())[-add_num:]
query_coords = coords[inds, :]
elif sample_type == "influ-scores-min":
inds = np.argsort(influ_scores.squeeze())[:add_num]
query_coords = coords[inds, :]
else:
raise NotImplementedError
# Step 2: Add points by kNN
new_features = None
if comb_type == "duplicate":
noise = np.random.randn(3).astype(np.float32)
noise = noise / np.linalg.norm(noise)
noise *= k
new_coords = (query_coords + noise)
new_influ_scores = influ_scores[inds, :]
if point_features is not None:
new_features = point_features[inds, :]
else:
nns_dists, nns_inds = pc.query(query_coords, k=k+1)
nns_dists = nns_dists.astype(np.float32)
nns_dists = nns_dists[:, 1:]
nns_inds = nns_inds[:, 1:]
if comb_type == "mean":
new_coords = coords[nns_inds, :].mean(
axis=-2) # (Nq, k, 3) -> (Nq, 3)
new_influ_scores = influ_scores[nns_inds, :].mean(axis=-2)
if point_features is not None:
new_features = point_features[nns_inds, :].mean(axis=-2)
elif comb_type == "random":
rnd_w = np.random.uniform(
0, 1, (query_coords.shape[0], k)).astype(np.float32)
rnd_w /= rnd_w.sum(axis=-1, keepdims=True)
new_coords = (coords[nns_inds, :] *
rnd_w.reshape(-1, k, 1)).sum(axis=-2)
new_influ_scores = (
influ_scores[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)
if point_features is not None:
new_features = (
point_features[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)
elif comb_type == "random-softmax":
rnd_w = np.random.randn(
query_coords.shape[0], k).astype(np.float32)
rnd_w = scipy.special.softmax(rnd_w, axis=-1)
new_coords = (coords[nns_inds, :] *
rnd_w.reshape(-1, k, 1)).sum(axis=-2)
new_influ_scores = (
influ_scores[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)
if point_features is not None:
new_features = (
point_features[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)
elif comb_type == "weighted":
new_coords = (coords[nns_inds, :] * (1 / (nns_dists + 1e-6)).reshape(-1, k, 1)).sum(
axis=-2) / (1 / (nns_dists + 1e-6)).sum(axis=-1, keepdims=True)
new_influ_scores = (influ_scores[nns_inds, :] * (1 / (nns_dists + 1e-6)).reshape(-1, k, 1)).sum(
axis=-2) / (1 / (nns_dists + 1e-6)).sum(axis=-1, keepdims=True)
if point_features is not None:
new_features = (point_features[nns_inds, :] * (1 / (nns_dists + 1e-6)).reshape(-1, k, 1)).sum(
axis=-2) / (1 / (nns_dists + 1e-6)).sum(axis=-1, keepdims=True)
else:
raise NotImplementedError
return new_coords, len(new_coords), new_influ_scores, new_features
# Path: models/utils.py
def activation_func(act_type='leakyrelu', neg_slope=0.2, inplace=True, num_channels=128, a=1., b=1., trainable=False):
act_type = act_type.lower()
if act_type == 'none':
layer = nn.Identity()
elif act_type == 'leakyrelu':
layer = nn.LeakyReLU(neg_slope, inplace)
elif act_type == 'prelu':
layer = nn.PReLU(num_channels)
elif act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == '+1':
layer = PlusOneActivation()
elif act_type == 'relu+1':
layer = nn.Sequential(nn.ReLU(inplace), PlusOneActivation())
elif act_type == 'tanh':
layer = nn.Tanh()
elif act_type == 'shifted_tanh':
layer = ShiftedTanh()
elif act_type == 'sigmoid':
layer = nn.Sigmoid()
elif act_type == 'gelu':
layer = nn.GELU()
elif act_type == 'gaussian':
layer = GaussianActivation(a, trainable)
elif act_type == 'quadratic':
layer = QuadraticActivation(a, trainable)
elif act_type == 'multi-quadratic':
layer = MultiQuadraticActivation(a, trainable)
elif act_type == 'laplacian':
layer = LaplacianActivation(a, trainable)
elif act_type == 'super-gaussian':
layer = SuperGaussianActivation(a, b, trainable)
elif act_type == 'expsin':
layer = ExpSinActivation(a, trainable)
elif act_type == 'clamp':
layer = Clamp(0, 1)
elif 'sine' in act_type:
layer = Sine(factor=a)
elif 'softplus' in act_type:
a, b, c = [float(i) for i in act_type.split('_')[1:]]
print(
'Softplus activation: a={:.2f}, b={:.2f}, c={:.2f}'.format(a, b, c))
layer = SoftplusActivation(a, b, c)
else:
raise NotImplementedError(
'activation layer [{:s}] is not found'.format(act_type))
return layer
# Path: models/mlp.py
def get_mapping_mlp(args, use_amp=False, amp_dtype=torch.float16):
return MappingMLP(args.mapping_mlp, inp_dim=args.shading_code_dim, out_dim=args.mapping_mlp.out_dim, use_amp=use_amp, amp_dtype=amp_dtype)
# Path: models/tx.py
def get_transformer(args, seq_len, v_extra_dim=0, k_extra_dim=0, q_extra_dim=0, eps=1e-6, use_amp=False, amp_dtype=torch.float16):
k_dim_map = {
1: [3, 3, 3],
}
k_dim = k_dim_map[args.k_type]
q_dim_map = {
1: [3],
}
q_dim = q_dim_map[args.q_type]
v_dim_map = {
1: [3, 3],
}
v_dim = v_dim_map[args.v_type]
return Transformer(d_k=k_dim, d_q=q_dim, d_v=v_dim, d_model=args.d_model, d_out=args.d_out, seq_len=seq_len,
embed_args=args.embed, block_args=args.block, d_ko=k_extra_dim, d_qo=q_extra_dim,
d_vo=v_extra_dim, eps=eps, use_amp=use_amp, amp_dtype=amp_dtype)
# Path: models/renderer.py
def get_generator(args, in_c, out_c, use_amp=False, amp_dtype=torch.float16):
if args.type == "small-unet":
opt = args.small_unet
return SmallUNet(in_c, out_c, bilinear=opt.bilinear, single=opt.single, norm=opt.norm, last_act=opt.last_act,
use_amp=use_amp, amp_dtype=amp_dtype, affine_layer=opt.affine_layer)
elif args.type == "mlp":
opt = args.mlp
return MLPGenerator(inp_dim=in_c, num_layers=opt.num_layers, num_channels=opt.num_channels, out_dim=out_c,
act_type=opt.act_type, last_act_type=opt.last_act_type, use_wn=opt.use_wn, a=opt.act_a, b=opt.act_b,
trainable=opt.act_trainable, skip_layers=opt.skip_layers, bias=opt.bias, half_layers=opt.half_layers,
residual_layers=opt.residual_layers, residual_dims=opt.residual_dims)
else:
raise NotImplementedError(
'generator type [{:d}] is not supported'.format(args.type))
# Path: models/model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import os
import numpy as np
from .utils import normalize_vector, create_learning_rate_fn, add_points_knn, activation_func
from .mlp import get_mapping_mlp
from .tx import get_transformer
from .renderer import get_generator
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
class PAPR(nn.Module):
def __init__(self, args, device='cuda'):
super(PAPR, self).__init__()
self.args = args
self.eps = args.eps
self.device = device
self.use_amp = args.use_amp
self.amp_dtype = torch.float16 if args.amp_dtype == 'float16' else torch.bfloat16
self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)
point_opt = args.geoms.points
pc_feat_opt = args.geoms.point_feats
bkg_feat_opt = args.geoms.background
self.register_buffer('select_k', torch.tensor(
point_opt.select_k, device=device, dtype=torch.int32))
self.coord_scale = args.dataset.coord_scale
if point_opt.load_path:
if point_opt.load_path.endswith('.pth') or point_opt.load_path.endswith('.pt'):
points = torch.load(point_opt.load_path, map_location='cpu')
points = np.asarray(points).astype(np.float32)
np.random.shuffle(points)
points = points[:args.max_num_pts, :]
points = torch.from_numpy(points).float()
print("Loaded points from {}, shape: {}, dtype {}".format(point_opt.load_path, points.shape, points.dtype))
print("Loaded points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max())
else:
# Initialize point positions
pt_init_center = [i * self.coord_scale for i in point_opt.init_center]
pt_init_scale = [i * self.coord_scale for i in point_opt.init_scale]
if point_opt.init_type == 'sphere': # initial points on a sphere
points = self._sphere_pc(pt_init_center, point_opt.num, pt_init_scale)
elif point_opt.init_type == 'cube': # initial points in a cube
points = self._cube_normal_pc(pt_init_center, point_opt.num, pt_init_scale)
else:
raise NotImplementedError("Point init type [{:s}] is not found".format(point_opt.init_type))
print("Scratch points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max())
self.points = torch.nn.Parameter(points, requires_grad=True)
# Initialize point influence scores
self.points_influ_scores = torch.nn.Parameter(torch.ones(
points.shape[0], 1, device=device) * point_opt.influ_init_val, requires_grad=True)
# Initialize mapping MLP, only if fine-tuning with IMLE for the exposure control
self.mapping_mlp = None
if args.models.mapping_mlp.use:
self.mapping_mlp = get_mapping_mlp(
args.models, use_amp=self.use_amp, amp_dtype=self.amp_dtype)
# Initialize UNet
if args.models.use_renderer:
tx_opt = args.models.transformer
feat_dim = tx_opt.embed.d_ff_out if tx_opt.embed.share_embed else tx_opt.embed.value.d_ff_out
self.renderer = get_generator(args.models.renderer.generator, in_c=feat_dim,
out_c=3, use_amp=self.use_amp, amp_dtype=self.amp_dtype)
print("Renderer: ", count_parameters(self.renderer))
else:
assert (args.models.transformer.embed.share_embed and args.models.transformer.embed.d_ff_out == 3) or \
(not args.models.transformer.embed.share_embed and args.models.transformer.embed.value.d_ff_out == 3), \
"Value embedding MLP should have output dim 3 if not using renderer"
# Initialize background score and features
if bkg_feat_opt.init_type == 'random':
| bkg_feat_init_func = torch.rand |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AdaCheng/EgoThink
# Path: models/minigpt4_legacy/common/dist_utils.py
def setup_for_distributed(is_master):
def print(*args, **kwargs):
def is_dist_avail_and_initialized():
def get_world_size():
def get_rank():
def is_main_process():
def init_distributed_mode(args):
def get_dist_info():
def main_process(func):
def wrapper(*args, **kwargs):
def download_cached_file(url, check_hash=True, progress=False):
def get_cached_file_path():
# Path: models/minigpt4_legacy/common/dist_utils.py
def download_cached_file(url, check_hash=True, progress=False):
"""
Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.
If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.
"""
def get_cached_file_path():
# a hack to sync the file path across processes
parts = torch.hub.urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(timm_hub.get_cache_dir(), filename)
return cached_file
if is_main_process():
timm_hub.download_cached_file(url, check_hash, progress)
if is_dist_avail_and_initialized():
dist.barrier()
return get_cached_file_path()
# Path: models/minigpt4_legacy/common/utils.py
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
# Path: models/minigpt4_legacy/common/logger.py
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, attr)
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def global_avg(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {:.4f}".format(name, meter.global_avg))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
log_msg = [
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
if torch.cuda.is_available():
log_msg.append("max mem: {memory:.0f}")
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
# Path: models/minigpt4_legacy/models/base_model.py
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[0].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])
def load_checkpoint_from_config(self, cfg, **kwargs):
"""
Load checkpoint as specified in the config file.
If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model.
When loading the pretrained model, each task-specific architecture may define their
own load_from_pretrained() method.
"""
load_finetuned = cfg.get("load_finetuned", True)
if load_finetuned:
finetune_path = cfg.get("finetuned", None)
assert (
finetune_path is not None
), "Found load_finetuned is True, but finetune_path is None."
self.load_checkpoint(url_or_filename=finetune_path)
else:
# load pre-trained weights
pretrain_path = cfg.get("pretrained", None)
assert "Found load_finetuned is False, but pretrain_path is None."
self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs)
def before_evaluation(self, **kwargs):
pass
def show_n_params(self, return_str=True):
tot = 0
for p in self.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return "{:.1f}M".format(tot / 1e6)
else:
return "{:.1f}K".format(tot / 1e3)
else:
return tot
# Path: models/minigpt4_legacy/models/Qformer.py
class BertEmbeddings(nn.Module):
class BertSelfAttention(nn.Module):
class BertSelfOutput(nn.Module):
class BertAttention(nn.Module):
class BertIntermediate(nn.Module):
class BertOutput(nn.Module):
class BertLayer(nn.Module):
class BertEncoder(nn.Module):
class BertPooler(nn.Module):
class BertPredictionHeadTransform(nn.Module):
class BertLMPredictionHead(nn.Module):
class BertOnlyMLMHead(nn.Module):
class BertPreTrainedModel(PreTrainedModel):
class BertModel(BertPreTrainedModel):
class BertLMHeadModel(BertPreTrainedModel):
class BertForMaskedLM(BertPreTrainedModel):
def __init__(self, config):
def forward(
self,
input_ids=None,
position_ids=None,
query_embeds=None,
past_key_values_length=0,
):
def __init__(self, config, is_cross_attention):
def save_attn_gradients(self, attn_gradients):
def get_attn_gradients(self):
def save_attention_map(self, attention_map):
def get_attention_map(self):
def transpose_for_scores(self, x):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
def __init__(self, config):
def forward(self, hidden_states, input_tensor):
def __init__(self, config, is_cross_attention=False):
def prune_heads(self, heads):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, hidden_states, input_tensor):
def __init__(self, config, layer_num):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
query_length=0,
):
def feed_forward_chunk(self, attention_output):
def feed_forward_chunk_query(self, attention_output):
def __init__(self, config):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
query_length=0,
):
def create_custom_forward(module):
def custom_forward(*inputs):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, hidden_states):
def __init__(self, config):
def forward(self, sequence_output):
def _init_weights(self, module):
def __init__(self, config, add_pooling_layer=False):
def get_input_embeddings(self):
def set_input_embeddings(self, value):
def _prune_heads(self, heads_to_prune):
def get_extended_attention_mask(
self,
attention_mask: Tensor,
input_shape: Tuple[int],
device: device,
is_decoder: bool,
has_query: bool = False,
) -> Tensor:
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
is_decoder=False,
):
def __init__(self, config):
def get_output_embeddings(self):
def set_output_embeddings(self, new_embeddings):
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=True,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=True,
reduction="mean",
):
def prepare_inputs_for_generation(
self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs
):
def _reorder_cache(self, past, beam_idx):
def __init__(self, config):
def get_output_embeddings(self):
def set_output_embeddings(self, new_embeddings):
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
query_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
return_logits=False,
is_decoder=False,
):
# Path: models/minigpt4_legacy/models/eva_vit.py
def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp32"):
model = VisionTransformer(
img_size=img_size,
patch_size=14,
use_mean_pooling=False,
embed_dim=1408,
depth=39,
num_heads=1408//88,
mlp_ratio=4.3637,
qkv_bias=True,
drop_path_rate=drop_path_rate,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
use_checkpoint=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth"
cached_file = download_cached_file(
url, check_hash=False, progress=True
)
state_dict = torch.load(cached_file, map_location="cpu")
interpolate_pos_embed(model,state_dict)
incompatible_keys = model.load_state_dict(state_dict, strict=False)
# print(incompatible_keys)
if precision == "fp16":
# model.to("cuda")
convert_weights_to_fp16(model)
return model
# Path: models/minigpt4_legacy/models/blip2.py
import contextlib
import logging
import os
import time
import datetime
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
from ..common import dist_utils as dist_utils
from ..common.dist_utils import download_cached_file
from ..common.utils import is_url
from ..common.logger import MetricLogger
from ..models.base_model import BaseModel
from ..models.Qformer import BertConfig, BertLMHeadModel
from ..models.eva_vit import create_eva_vit_g
from transformers import BertTokenizer
"""
Copyright (c) 2023, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class Blip2Base(BaseModel):
@classmethod
def init_tokenizer(cls):
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
tokenizer.add_special_tokens({"bos_token": "[DEC]"})
return tokenizer
def maybe_autocast(self, dtype=torch.float16):
# if on cpu, don't use autocast
# if on gpu, use autocast with dtype if provided, otherwise use torch.float16
enable_autocast = self.device != torch.device("cpu")
if enable_autocast:
return torch.cuda.amp.autocast(dtype=dtype)
else:
return contextlib.nullcontext()
@classmethod
def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2):
encoder_config = BertConfig.from_pretrained("bert-base-uncased")
encoder_config.encoder_width = vision_width
# insert cross-attention layer every other block
encoder_config.add_cross_attention = True
encoder_config.cross_attention_freq = cross_attention_freq
encoder_config.query_length = num_query_token
Qformer = BertLMHeadModel(config=encoder_config)
query_tokens = nn.Parameter(
torch.zeros(1, num_query_token, encoder_config.hidden_size)
)
query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
return Qformer, query_tokens
@classmethod
def init_vision_encoder(
cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision
):
assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4"
visual_encoder = create_eva_vit_g(
img_size, drop_path_rate, use_grad_checkpoint, precision
)
ln_vision = LayerNorm(visual_encoder.num_features)
return visual_encoder, ln_vision
def load_from_pretrained(self, url_or_filename):
# import pdb; pdb.set_trace()
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
msg = self.load_state_dict(state_dict, strict=False)
# logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
| return self |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: 3dlg-hcvc/cage
# Path: utils/savermixins.py
class SaverMixin():
def set_save_dir(self, stage):
self.hparams.save_dir = os.path.join(self.logger.log_dir, 'images', stage)
os.makedirs(self.hparams.save_dir, exist_ok=True)
@property
def save_dir(self):
return self.hparams.save_dir
def convert_format(self, data):
if isinstance(data, np.ndarray):
return data
elif isinstance(data, torch.Tensor):
return data.cpu().numpy()
elif isinstance(data, list):
return [self.convert_data(d) for d in data]
elif isinstance(data, dict):
return {k: self.convert_data(v) for k, v in data.items()}
else:
raise TypeError('Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting', type(data))
def get_save_path(self, filename):
save_path = os.path.join(self.save_dir, filename)
os.makedirs(os.path.dirname(save_path), exist_ok=True)
return save_path
def save_rgb_image(self, filename, img):
imageio.imwrite(self.get_save_path(filename), img)
def save_rgb_video(self, filename, stage='fit', filter=None):
img_dir = os.path.join(self.logger.log_dir, 'images', stage)
writer_graph = imageio.get_writer(os.path.join(img_dir, filename), fps=1)
for file in sorted(os.listdir(img_dir)):
if file.endswith('.png') and 'gt' not in file:
if filter is not None:
if filter in file:
writer_graph.append_data(imageio.imread(os.path.join(img_dir, file)))
else:
writer_graph.append_data(imageio.imread(os.path.join(img_dir, file)))
writer_graph.close()
def save_json(self, filename, data):
save_path = self.get_save_path(filename)
with open(save_path, 'w') as f:
json.dump(data, f)
# Path: utils/refs.py
# Path: utils/plot.py
def viz_graph(info_dict, res=256):
'''
Function to plot the directed graph
Args:
- info_dict (dict): output json containing the graph information
- res (int): resolution of the image
Returns:
- img_arr (np.array): image array
'''
# build tree
tree = info_dict['diffuse_tree']
edges = []
for node in tree:
edges += [(node['id'], child) for child in node['children']]
G = nx.DiGraph()
G.add_edges_from(edges)
# plot tree
plt.figure(figsize=(res/100, res/100))
colors = get_color(graph_color_ref, len(tree))
pos = nx.nx_agraph.graphviz_layout(G, prog="twopi", args="")
node_order = sorted(G.nodes())
nx.draw(G, pos, node_color=colors, nodelist=node_order, edge_color='k', with_labels=False)
buf = BytesIO()
plt.savefig(buf, format="png", dpi=100)
buf.seek(0)
img = Image.open(buf)
img_arr = np.asarray(img)
buf.close()
plt.clf()
plt.close()
return img_arr[:, :, :3]
# Path: utils/plot.py
def make_grid(images, cols=5):
"""
Arrange list of images into a N x cols grid.
Args:
- images (list): List of Numpy arrays representing the images.
- cols (int): Number of columns for the grid.
Returns:
- grid (numpy array): Numpy array representing the image grid.
"""
# Determine the dimensions of each image
img_h, img_w, _ = images[0].shape
rows = len(images) // cols
# Initialize a blank canvas
grid = np.zeros((rows * img_h, cols * img_w, 3), dtype=images[0].dtype)
# Place each image onto the grid
for idx, img in enumerate(images):
y = (idx // cols) * img_h
x = (idx % cols) * img_w
grid[y: y + img_h, x: x + img_w] = img
return grid
# Path: utils/plot.py
def add_text(text, imgarr):
'''
Function to add text to image
Args:
- text (str): text to add
- imgarr (np.array): image array
Returns:
- img (np.array): image array with text
'''
img = Image.fromarray(imgarr)
I = ImageDraw.Draw(img)
I.text((10, 10), text, fill='black')
return np.asarray(img)
# Path: utils/render.py
def rescale_axis(jtype, axis_d, axis_o, box_center):
'''
Function to rescale the axis for rendering
Args:
- jtype (int): joint type
- axis_d (np.array): axis direction
- axis_o (np.array): axis origin
- box_center (np.array): bounding box center
Returns:
- center (np.array): rescaled axis origin
- axis_d (np.array): rescaled axis direction
'''
if jtype == 0 or jtype == 1:
return [0., 0., 0.], [0., 0., 0.]
if jtype == 3 or jtype == 4:
center = box_center
else:
center = axis_o + np.dot(axis_d, box_center-axis_o) * axis_d
return center.tolist(), axis_d.tolist()
# Path: utils/render.py
def draw_boxes_axiss_anim(aabbs_0, aabbs_1, axiss, mode='graph', resolution=256, types=None):
'''
Function to draw the 3D bounding boxes and axes of the two frames
Args:
aabbs_0: list of trimesh objects for the bounding box of each part in the resting state
aabbs_1: list of trimesh objects for the bounding box of each part in the open state
axiss: list of trimesh objects for the axis of each part
mode:
'graph' using palette corresponding to graph node,
'jtype' using palette corresponding to joint type,
'semantic' using palette corresponding to semantic label
resolution: resolution of the rendered image
types: ids corresponding to each joint type or semantic label, if mode is 'jtype' or 'semantic'
'''
n_parts = len(aabbs_0)
ren_aabbs_0 = []
ren_aabbs_1 = []
ren_axiss = []
if mode == 'graph':
palette = graph_color_ref
# Add meshes to the scene
for i in range(n_parts):
color = get_color_from_palette(palette, i)
aabb_0 = pyrender.Mesh.from_trimesh(aabbs_0[i], smooth=False)
aabb_0.primitives[0].color_0 = color.repeat(aabb_0.primitives[0].positions.shape[0], axis=0)
ren_aabbs_0.append(aabb_0)
aabb_1 = pyrender.Mesh.from_trimesh(aabbs_1[i], smooth=False)
aabb_1.primitives[0].color_0 = color.repeat(aabb_1.primitives[0].positions.shape[0], axis=0)
ren_aabbs_1.append(aabb_1)
if axiss[i] is not None:
axis = pyrender.Mesh.from_trimesh(axiss[i], smooth=False)
axis.primitives[0].color_0 = color.repeat(axis.primitives[0].positions.shape[0], axis=0)
ren_axiss.append(axis)
else:
ren_axiss.append(None)
elif mode == 'jtype' or mode == 'semantic':
assert types is not None
palette = joint_color_ref if mode == 'jtype' else semantic_color_ref
# Add meshes to the scene
for i in range(n_parts):
color = get_color_from_palette(palette, types[i])
aabb_0 = pyrender.Mesh.from_trimesh(aabbs_0[i], smooth=False)
aabb_0.primitives[0].color_0 = color.repeat(aabb_0.primitives[0].positions.shape[0], axis=0)
ren_aabbs_0.append(aabb_0)
aabb_1 = pyrender.Mesh.from_trimesh(aabbs_1[i], smooth=False)
aabb_1.primitives[0].color_0 = color.repeat(aabb_1.primitives[0].positions.shape[0], axis=0)
ren_aabbs_1.append(aabb_1)
if axiss[i] is not None:
axis = pyrender.Mesh.from_trimesh(axiss[i], smooth=False)
ren_axiss.append(axis)
else:
ren_axiss.append(None)
else:
raise ValueError('mode must be either graph or type')
img0 = render_anim_parts(ren_aabbs_0, ren_axiss, resolution=resolution)
img1 = render_anim_parts(ren_aabbs_1, ren_axiss, resolution=resolution)
return np.concatenate([img0, img1], axis=1)
# Path: utils/render.py
def get_bbox_mesh_pair(center, size, radius=0.01, jtype=None, jrange=None, axis_d=None, axis_o=None):
'''
Function to get the bounding box mesh pair
Args:
- center (np.array): bounding box center
- size (np.array): bounding box size
- radius (float): radius of the cylinder
- jtype (int): joint type
- jrange (list): joint range
- axis_d (np.array): axis direction
- axis_o (np.array): axis origin
Returns:
- trimesh_box (trimesh object): trimesh object for the bbox at resting state
- trimesh_box_anim (trimesh object): trimesh object for the bbox at opening state
'''
size = np.clip(size, a_max=3, a_min=0.005)
center = np.clip(center, a_max=3, a_min=-3)
line_box = o3d.geometry.TriangleMesh()
z_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=size[2])
y_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=size[1])
R_y = get_rotation_axis_angle(np.array([1., 0., 0.]), np.pi / 2)
y_cylinder.rotate(R_y, center=(0, 0, 0))
x_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=size[0])
R_x = get_rotation_axis_angle(np.array([0., 1., 0.]), np.pi / 2)
x_cylinder.rotate(R_x, center=(0, 0, 0))
z1 = deepcopy(z_cylinder)
z1.translate(np.array([-size[0] / 2, size[1] / 2, 0.]))
line_box += z1.translate(center[:3])
z2 = deepcopy(z_cylinder)
z2.translate(np.array([size[0] / 2, size[1] / 2, 0.]))
line_box += z2.translate(center[:3])
z3 = deepcopy(z_cylinder)
z3.translate(np.array([-size[0] / 2, -size[1] / 2, 0.]))
line_box += z3.translate(center[:3])
z4 = deepcopy(z_cylinder)
z4.translate(np.array([size[0] / 2, -size[1] / 2, 0.]))
line_box += z4.translate(center[:3])
y1 = deepcopy(y_cylinder)
y1.translate(np.array([-size[0] / 2, 0., size[2] / 2]))
line_box += y1.translate(center[:3])
y2 = deepcopy(y_cylinder)
y2.translate(np.array([size[0] / 2, 0., size[2] / 2]))
line_box += y2.translate(center[:3])
y3 = deepcopy(y_cylinder)
y3.translate(np.array([-size[0] / 2, 0., -size[2] / 2]))
line_box += y3.translate(center[:3])
y4 = deepcopy(y_cylinder)
y4.translate(np.array([size[0] / 2, 0., -size[2] / 2]))
line_box += y4.translate(center[:3])
x1 = deepcopy(x_cylinder)
x1.translate(np.array([0., -size[1] / 2, size[2] / 2]))
line_box += x1.translate(center[:3])
x2 = deepcopy(x_cylinder)
x2.translate(np.array([0., size[1] / 2, size[2] / 2]))
line_box += x2.translate(center[:3])
x3 = deepcopy(x_cylinder)
x3.translate(np.array([0., -size[1] / 2, -size[2] / 2]))
line_box += x3.translate(center[:3])
x4 = deepcopy(x_cylinder)
x4.translate(np.array([0., size[1] / 2, -size[2] / 2]))
line_box += x4.translate(center[:3])
# transform
line_box_anim = deepcopy(line_box)
if jtype == 2: # revolute
theta = np.deg2rad(jrange[0])
line_box_anim.translate(-axis_o)
R = get_rotation_axis_angle(axis_d, theta)
line_box_anim.rotate(R, center=(0, 0, 0))
line_box_anim.translate(axis_o)
elif jtype == 3: # prismatic
dist = np.array(jrange[1])
line_box_anim.translate(axis_d * dist)
elif jtype == 4: # screw
dist = np.array(jrange[1])
theta = 0.25 * np.pi
R = get_rotation_axis_angle(axis_d, theta)
line_box_anim.translate(-axis_o)
line_box_anim.rotate(R, center=(0, 0, 0))
line_box_anim.translate(axis_o)
line_box_anim.translate(axis_d * dist)
elif jtype == 5: # continuous
theta = 0.25 * np.pi
R = get_rotation_axis_angle(axis_d, theta)
line_box_anim.translate(-axis_o)
line_box_anim.rotate(R, center=(0, 0, 0))
line_box_anim.translate(axis_o)
vertices = np.asarray(line_box.vertices)
faces = np.asarray(line_box.triangles)
trimesh_box = trimesh.Trimesh(vertices=vertices, faces=faces)
trimesh_box.visual.vertex_colors = np.array([0.0, 1.0, 1.0, 1.0])
vertices_anim = np.asarray(line_box_anim.vertices)
faces_anim = np.asarray(line_box_anim.triangles)
trimesh_box_anim = trimesh.Trimesh(vertices=vertices_anim, faces=faces_anim)
trimesh_box_anim.visual.vertex_colors = np.array([0.0, 1.0, 1.0, 1.0])
return trimesh_box, trimesh_box_anim
# Path: utils/render.py
def get_axis_mesh(k, axis_o, bbox_center, joint_type):
'''
Function to get the axis mesh
Args:
- k (np.array): axis direction
- center (np.array): axis origin
- bbox_center (np.array): bounding box center
- joint_type (int): joint type
'''
if joint_type == 0 or joint_type == 1 or np.linalg.norm(k) == 0. :
return None
k = k / np.linalg.norm(k)
if joint_type == 3 or joint_type == 4: # prismatic or screw
axis_o = bbox_center
else: # revolute or continuous
axis_o = axis_o + np.dot(k, bbox_center-axis_o) * k
axis = o3d.geometry.TriangleMesh.create_arrow(cylinder_radius=0.015, cone_radius=0.03, cylinder_height=1.0, cone_height=0.08)
arrow = np.array([0., 0., 1.], dtype=np.float32)
n = np.cross(arrow, k)
rad = np.arccos(np.dot(arrow, k))
R_arrow = get_rotation_axis_angle(n, rad)
axis.rotate(R_arrow, center=(0, 0, 0))
axis.translate(axis_o[:3])
axis.compute_vertex_normals()
vertices = np.asarray(axis.vertices)
faces = np.asarray(axis.triangles)
trimesh_axis = trimesh.Trimesh(vertices=vertices, faces=faces)
trimesh_axis.visual.vertex_colors = np.array([0.5, 0.5, 0.5, 1.0])
return trimesh_axis
# Path: systems/base.py
import torch
import models
import numpy as np
import lightning.pytorch as pl
from diffusers import DDPMScheduler
from utils.savermixins import SaverMixin
from utils.refs import label_ref, joint_ref
from utils.plot import viz_graph, make_grid, add_text
from utils.render import rescale_axis, draw_boxes_axiss_anim, get_bbox_mesh_pair, get_axis_mesh
class BaseSystem(pl.LightningModule, SaverMixin):
def __init__(self, hparams):
super().__init__()
self.hparams.update(hparams)
self.model = models.make(hparams.model.name, hparams.model)
self.scheduler = DDPMScheduler(**self.hparams.scheduler.config)
self.save_hyperparameters()
def setup(self, stage: str):
self.set_save_dir(stage) # config the logger dir for images
def configure_optimizers(self):
raise NotImplementedError
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def predict_step(self, batch, batch_idx, dataloader_idx=None):
raise NotImplementedError
# ------------------------------- data converters ------------------------------- #
def convert_data_range(self, x):
x = x.reshape(-1, 30) # (K, 30)
aabb_max = self.convert_format(x[:, 0:3])
aabb_min = self.convert_format(x[:, 3:6])
center = (aabb_max + aabb_min) / 2.
size = (aabb_max - aabb_min).clip(min=1e-3)
j_type = torch.mean(x[:, 6:12], dim=1)
j_type = self.convert_format((j_type+0.5) * 5).clip(min=1., max=5.).round()
axis_d = self.convert_format(x[:, 12:15])
axis_d = axis_d / (np.linalg.norm(axis_d, axis=1, keepdims=True) + np.finfo(float).eps)
axis_o = self.convert_format(x[:, 15:18])
j_range = (x[:, 18:20] + x[:, 20:22] + x[:, 22:24]) / 3
j_range = self.convert_format(j_range).clip(min=-1., max=1.)
j_range[:, 0] = j_range[:, 0] * 360
| j_range[:, 1] = j_range[:, 1] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: duxiaodan/intrinsic-lora
# Path: diode/diode.py
def plot_depth_map(dm, validity_mask):
validity_mask = validity_mask > 0
MIN_DEPTH = 0.5
MAX_DEPTH = min(300, np.percentile(dm, 99))
dm = np.clip(dm, MIN_DEPTH, MAX_DEPTH)
dm = (dm - np.min(dm)) / np.ptp(dm)
dm = 1-dm
dm = np.stack([dm]*3,axis=-1)
dm[np.where(validity_mask == False)] = 0
dm = Image.fromarray(np.uint8(dm[:,:,:3]*255)).convert('RGB')
mask = Image.fromarray(np.uint8(validity_mask*255))
return dm, mask
# Path: diode/diode.py
def check_and_tuplize_tokens(tokens, valid_tokens):
if not isinstance(tokens, (tuple, list)):
tokens = (tokens, )
for split in tokens:
assert split in valid_tokens
return tokens
# Path: diode/diode.py
def enumerate_paths(src):
'''flatten out a nested dictionary into an iterable
DIODE metadata is a nested dictionary;
One could easily query a particular scene and scan, but sequentially
enumerating files in a nested dictionary is troublesome. This function
recursively traces out and aggregates the leaves of a tree.
'''
if isinstance(src, list):
return src
elif isinstance(src, dict):
acc = []
for k, v in src.items():
_sub_paths = enumerate_paths(v)
_sub_paths = list(map(lambda x: osp.join(k, x), _sub_paths))
acc.append(_sub_paths)
return list(chain.from_iterable(acc))
else:
raise ValueError('do not accept data type {}'.format(type(src)))
# Path: diode/diode.py
_VALID_SPLITS = ('train', 'val', 'test')
# Path: diode/diode.py
_VALID_SCENE_TYPES = ('indoors', 'outdoor')
# Path: rescale_cfg_pipeline_forward.py
@torch.no_grad()
def new_call(
self,
prompt: Union[str, List[str]] = None,
image: Union[
torch.FloatTensor,
PIL.Image.Image,
np.ndarray,
List[torch.FloatTensor],
List[PIL.Image.Image],
List[np.ndarray],
] = None,
num_inference_steps: int = 100,
guidance_scale: float = 7.5,
image_guidance_scale: float = 1.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
guidance_rescale: float = 0.0,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
`Image` or tensor representing an image batch to be repainted according to `prompt`. Can also accept
image latents as `image`, but if passing latents directly it is not encoded again.
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
image_guidance_scale (`float`, *optional*, defaults to 1.5):
Push the generated image towards the inital `image`. Image guidance scale is enabled by setting
`image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely
linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a
value of at least `1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
Examples:
```py
>>> import PIL
>>> import requests
>>> import torch
>>> from io import BytesIO
>>> from diffusers import StableDiffusionInstructPix2PixPipeline
>>> def download_image(url):
... response = requests.get(url)
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
>>> img_url = "https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png"
>>> image = download_image(img_url).resize((512, 512))
>>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
... "timbrooks/instruct-pix2pix", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "make the mountains snowy"
>>> image = pipe(prompt=prompt, image=image).images[0]
```
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
# 0. Check inputs
self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
if image is None:
raise ValueError("`image` input cannot be undefined.")
# 1. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0
# check if scheduler is in sigmas space
scheduler_is_in_sigma_space = hasattr(self.scheduler, "sigmas")
# 2. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 3. Preprocess image
image = self.image_processor.preprocess(image)
# 4. set timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare Image latents
image_latents = self.prepare_image_latents(
image,
batch_size,
num_images_per_prompt,
prompt_embeds.dtype,
device,
do_classifier_free_guidance,
generator,
)
height, width = image_latents.shape[-2:]
height = height * self.vae_scale_factor
width = width * self.vae_scale_factor
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 7. Check that shapes of latents and image match the UNet channels
num_channels_image = image_latents.shape[1]
if num_channels_latents + num_channels_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
f" `num_channels_image`: {num_channels_image} "
f" = {num_channels_latents+num_channels_image}. Please verify the config of"
" `pipeline.unet` or your `image` input."
)
# 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 9. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# Expand the latents if we are doing classifier free guidance.
# The latents are expanded 3 times because for pix2pix the guidance\
# is applied for both the text and the input image.
latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents
# concat latents, image_latents in the channel dimension
scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1)
# predict the noise residual
noise_pred = self.unet(
scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds, return_dict=False
)[0]
# Hack:
# For karras style schedulers the model does classifer free guidance using the
# predicted_original_sample instead of the noise_pred. So we need to compute the
# predicted_original_sample here if we are using a karras style scheduler.
if scheduler_is_in_sigma_space:
step_index = (self.scheduler.timesteps == t).nonzero()[0].item()
sigma = self.scheduler.sigmas[step_index]
noise_pred = latent_model_input - sigma * noise_pred
# perform guidance
if do_classifier_free_guidance:
noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3)
noise_pred = (
noise_pred_uncond
+ guidance_scale * (noise_pred_text - noise_pred_image)
+ image_guidance_scale * (noise_pred_image - noise_pred_uncond)
)
if do_classifier_free_guidance and guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
# print('Doing guidance rescale!')
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)
# Hack:
# For karras style schedulers the model does classifer free guidance using the
# predicted_original_sample instead of the noise_pred. But the scheduler.step function
# expects the noise_pred and computes the predicted_original_sample internally. So we
# need to overwrite the noise_pred here such that the value of the computed
# predicted_original_sample is correct.
if scheduler_is_in_sigma_space:
noise_pred = (noise_pred - latents) / (-sigma)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
# Path: augunet_diode_pseudo_depth.py
import argparse
import logging
import math
import os
import os.path as osp
import random
import shutil
import wandb
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import torch.utils.checkpoint
import transformers
import diffusers
import copy
import json
import datetime
import matplotlib
import wandb
import xformers
import bitsandbytes as bnb
from pathlib import Path
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from torch.utils.data import Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
from PIL.ImageOps import exif_transpose
from diode.diode import (
plot_depth_map,
check_and_tuplize_tokens,
enumerate_paths,
_VALID_SPLITS,
_VALID_SCENE_TYPES
)
from torchvision.transforms.functional import pil_to_tensor
from torchvision.transforms.functional import to_pil_image
from rescale_cfg_pipeline_forward import new_call
# coding=utf-8
# Intrinsic-LoRA
"""Intrinsic-LoRA AugUNet model for depth training"""
#Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf
logger = get_logger(__name__, log_level="INFO")
def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"![img_{i}](./image_{i}.png)\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
| tags: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AsuradaYuci/TF-CLIP
# Path: utils/meter.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# Path: utils/metrics.py
class R1_mAP_eval():
def __init__(self, num_query, max_rank=50, feat_norm=True, reranking=False):
super(R1_mAP_eval, self).__init__()
self.num_query = num_query
self.max_rank = max_rank
self.feat_norm = feat_norm
self.reranking = reranking
def reset(self):
self.feats = []
self.pids = []
self.camids = []
def update(self, output): # called once for each batch
feat, pid, camid = output
self.feats.append(feat.cpu())
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
def compute(self): # called after each epoch
feats = torch.cat(self.feats, dim=0)
if self.feat_norm:
print("The test feature is normalized")
feats = torch.nn.functional.normalize(feats, dim=1, p=2) # along channel
# query
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
# gallery
gf = feats[0:]
g_pids = np.asarray(self.pids[0:])
g_camids = np.asarray(self.camids[0:])
if self.reranking:
print('=> Enter reranking')
# distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)
distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)
else:
print('=> Computing DistMat with euclidean_distance')
distmat = euclidean_distance(qf, gf)
cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)
return cmc, mAP, distmat, self.pids, self.camids, qf, gf
# Path: utils/iotools.py
def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):
mkdir_if_missing(osp.dirname(fpath))
torch.save(state, fpath)
if is_best:
shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar'))
# Path: loss/supcontrast.py
class SupConLoss(nn.Module):
def __init__(self, device):
super(SupConLoss, self).__init__()
self.device = device
self.temperature = 1.0
def forward(self, text_features, image_features, t_label, i_targets):
batch_size = text_features.shape[0]
batch_size_N = image_features.shape[0]
mask = torch.eq(t_label.unsqueeze(1).expand(batch_size, batch_size_N), \
i_targets.unsqueeze(0).expand(batch_size,batch_size_N)).float().to(self.device)
logits = torch.div(torch.matmul(text_features, image_features.T),self.temperature)
# print(logits.size())
# for numerical stability
logits_max, _ = torch.max(logits, dim=1, keepdim=True)
logits = logits - logits_max.detach()
exp_logits = torch.exp(logits)
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
loss = - mean_log_prob_pos.mean()
return loss
# Path: loss/softmax_loss.py
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss
# Path: processor/processor_clipreid_stage2.py
import logging
import os
import time
import torch
import torch.nn as nn
import torch.distributed as dist
import collections
import time
from utils.meter import AverageMeter
from utils.metrics import R1_mAP_eval
from utils.iotools import save_checkpoint
from torch.cuda import amp
from torch.nn import functional as F
from loss.supcontrast import SupConLoss
from loss.softmax_loss import CrossEntropyLabelSmooth
from datetime import timedelta
img = img.to(device)
if cfg.MODEL.SIE_CAMERA:
camids = camids.to(device)
else:
camids = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10, 20]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
else:
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
with torch.no_grad():
img = img.to(device)
if cfg.MODEL.SIE_CAMERA:
camids = camids.to(device)
else:
camids = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
feat = model(img, cam_label=camids, view_label=target_view)
evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results - Epoch: {}".format(epoch))
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10, 20]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()
prec1 = cmc[0] + mAP
is_best = prec1 > best_performance
best_performance = max(prec1, best_performance)
if is_best:
best_epoch = epoch
save_checkpoint(model.state_dict(), is_best, os.path.join(cfg.OUTPUT_DIR, 'checkpoint_ep.pth.tar'))
logger.info("==> Best Perform {:.1%}, achieved at epoch {}".format(best_performance, best_epoch))
all_end_time = time.monotonic()
total_time = timedelta(seconds=all_end_time - all_start_time)
logger.info("Total running time: {}".format(total_time))
print(cfg.OUTPUT_DIR)
def do_inference_dense(cfg,
model,
val_loader,
num_query):
device = "cuda"
logger = logging.getLogger("TFCLIP.test")
logger.info("Enter inferencing")
evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
evaluator.reset()
if device:
if torch.cuda.device_count() > 1:
print('Using {} GPUs for inference'.format(torch.cuda.device_count()))
model = nn.DataParallel(model)
model.to(device)
model.eval()
img_path_list = []
for n_iter, (img, pid, camid, camids, target_view, imgpath) in enumerate(val_loader):
img = img.to(device) # torch.Size([64, 4, 3, 256, 128])
if len(img.size()) == 6:
# method = 'dense'
b, n, s, c, h, w = img.size()
assert (b == 1)
img = img.view(b * n, s, c, h, w) # torch.Size([5, 8, 3, 256, 128])
with torch.no_grad():
img = img.to(device)
if cfg.MODEL.SIE_CAMERA:
camids = camids.to(device)
else:
camids = None
if cfg.MODEL.SIE_VIEW:
target_view = target_view.to(device)
else:
target_view = None
feat = model(img, cam_label=camids, view_label=target_view)
feat = feat.view(-1, feat.size(1))
feat = torch.mean(feat, 0, keepdim=True) # 1,512
evaluator.update((feat, pid, camid))
img_path_list.extend(imgpath)
cmc, mAP, _, _, _, _, _ = evaluator.compute()
logger.info("Validation Results ")
logger.info("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10, 20]:
logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
return cmc[0], cmc[4]
def do_inference_rrs(cfg,
model,
val_loader,
num_query):
device = "cuda"
logger = logging.getLogger("transreid.test")
logger.info("Enter inferencing")
evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM)
evaluator.reset()
if device:
if torch.cuda.device_count() > 1:
| print('Using {} GPUs for inference'.format(torch.cuda.device_count())) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nexB/dejacode
# Path: dje/api.py
class CreateRetrieveUpdateListViewSet(
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
"""Provide default `create`, `retrieve`, `update`, `partial_update`, and `list` actions."""
email_notification_on = []
allow_reference_access = False
def get_queryset(self):
"""
Allow to access the reference data if the `self.allow_reference_access` is True.
The `REFERENCE_VAR` needs to be provided as a GET parameter `?reference=1` in the URL.
The special value `combine` can be provided as value for the `reference` parameter,
`?reference=combine`, to return records from both the user dataspace and the reference
one.
The special `merge` value value will include the reference records excluding the
objects `uuid` already present in the user dataspace.
The reference data access is only available on `SAFE_METHODS` ('GET', 'HEAD', 'OPTIONS').
"""
user_dataspace = self.request.user.dataspace
base_qs = super().get_queryset()
user_qs = base_qs.scope(user_dataspace)
reference_params_value = self.request.GET.get(REFERENCE_VAR)
reference_access = all(
[
self.allow_reference_access,
reference_params_value,
self.request.method in SAFE_METHODS,
]
)
if not reference_access:
return user_qs
reference_dataspace = Dataspace.objects.get_reference()
if not reference_dataspace:
return user_qs
if reference_params_value not in ["combine", "merge"]:
reference_qs = base_qs.scope(reference_dataspace)
return reference_qs
combined_qs = base_qs.scope(user_dataspace, include_reference=True)
if reference_params_value == "merge":
return combined_qs.exclude(
uuid__in=models.Subquery(user_qs.values("uuid")),
dataspace=reference_dataspace,
)
return combined_qs
@action(detail=True, methods=["post"])
def copy_to_my_dataspace(self, request, uuid):
reference_dataspace = Dataspace.objects.get_reference()
permission_error = {"error": "You do not have rights to execute this action."}
reference_access = all(
[
self.allow_reference_access,
reference_dataspace,
]
)
if not reference_access:
return Response(permission_error, status=status.HTTP_400_BAD_REQUEST)
queryset = self.queryset.scope(reference_dataspace)
reference_object = get_object_or_404(queryset, uuid=uuid)
user = request.user
target_dataspace = user.dataspace
model_class = reference_object.__class__
if not has_permission(reference_object, user, "add"):
return Response(permission_error, status=status.HTTP_400_BAD_REQUEST)
if target_dataspace.is_reference:
data = {"error": "Target dataspace cannot be the reference one."}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
object_exists_in_target_dataspace = (
model_class._default_manager.scope(target_dataspace)
.filter(uuid=reference_object.uuid)
.exists()
)
if object_exists_in_target_dataspace:
data = {"error": "The object already exists in your local Dataspace."}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
copied_object = copy_object(reference_object, target_dataspace, user)
if not copied_object:
data = {"error": "The object could not be copied."}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(copied_object)
return Response(serializer.data)
def perform_create(self, serializer):
"""Add the Addition History."""
user = self.request.user
fields_name = [field.name for field in serializer.Meta.model._meta.get_fields()]
kwargs = {}
if "created_by" in fields_name:
kwargs["created_by"] = user
if "last_modified_by" in fields_name:
kwargs["last_modified_by"] = user
serializer.save(**kwargs)
History.log_addition(user, serializer.instance)
if History.ADDITION in self.email_notification_on:
send_notification_email(user, serializer.instance, History.ADDITION)
def perform_update(self, serializer):
"""Add the CHANGE History."""
changed_data = []
changes_details = []
user = self.request.user
for field_name, new_value in serializer.validated_data.items():
original_value = getattr(serializer.instance, field_name, None)
if new_value != original_value:
changed_data.append(field_name)
changes_details.append((field_name, original_value, new_value))
fields_name = [field.name for field in serializer.Meta.model._meta.get_fields()]
kwargs = {}
if "last_modified_by" in fields_name:
kwargs["last_modified_by"] = user
serialized_data = None
with suppress(AttributeError):
serialized_data = serializer.instance.as_json()
serializer.save(**kwargs)
if changed_data:
change_message = [_("Changed {}.").format(get_text_list(changed_data, _("and")))]
change_message = " ".join(change_message)
else:
change_message = _("No fields changed.")
History.log_change(user, serializer.instance, change_message, serialized_data)
if History.CHANGE in self.email_notification_on:
change_message += construct_changes_details_message(
{serializer.instance: changes_details}
)
send_notification_email(user, serializer.instance, History.CHANGE, change_message)
# Path: dje/api.py
class DataspacedAPIFilterSet(FilterSet):
"""
Override default filters.
This duplicates the purpose of `Meta.filter_overrides`
but works better for inheritance.
"""
@classmethod
def filter_for_lookup(cls, f, lookup_type):
if isinstance(f, models.BooleanField):
params = {
"help_text": 'Supported values: "yes", "no"',
"widget": ExtendedNullBooleanSelect,
}
return django_filters.BooleanFilter, params
return super().filter_for_lookup(f, lookup_type)
# Path: dje/api.py
class DataspacedSerializer(serializers.HyperlinkedModelSerializer):
def __init__(self, *args, **kwargs):
"""
Add the `dataspace` attribute from the request User Dataspace.
Required at save time and for validation.
"""
super().__init__(*args, **kwargs)
request = self.context.get("request", None)
self.dataspace = request.user.dataspace if request else None
def save(self, **kwargs):
"""
Add the current user dataspace in the object data and
Wrap the IntegrityError with proper DRFValidationError.
Starts by popping the m2m data before the actual save()
then set the m2m relations post save().
"""
# Pops the m2m data from the validated_data dict before save()
m2m_data = {
f: self._validated_data.pop(f.name)
for f in self.Meta.model._meta.get_fields()
if f.many_to_many and not f.auto_created and f.name in self._validated_data
}
if "uuid" in self.validated_data and not self.validated_data.get("uuid"):
kwargs.update({"uuid": uuid.uuid4()})
# Update the uuid in the view kwargs to allow a proper `get_object()` post update
updated_uuid = self.validated_data.get("uuid")
if updated_uuid:
self.context["view"].kwargs["uuid"] = updated_uuid
kwargs.update({"dataspace": self.dataspace})
try:
instance = super().save(**kwargs)
except (IntegrityError, DjangoValidationError) as e:
raise DRFValidationError(str(e))
for field, data in m2m_data.items():
set_intermediate_explicit_m2m(instance, field, data)
return instance
def validate(self, attrs):
"""Add the uniqueness validation calling the logic from Model.clean()."""
# Make a copy of the attrs and Remove the m2m values,
# since those cannot be part of the clean()
attrs_copy = attrs.copy()
for f in self.Meta.model._meta.get_fields():
if f.many_to_many and not f.auto_created:
attrs_copy.pop(f.name, None)
if isinstance(f, models.ManyToOneRel):
attrs_copy.pop(f.get_accessor_name(), None)
for field_name in getattr(self.Meta, "exclude_from_validate", []):
attrs_copy.pop(field_name, None)
instance = self.Meta.model(**attrs_copy)
instance.dataspace = self.dataspace
# Set the id from the `instance` to handle create vs. edit in Model.`clean()`
with suppress(AttributeError):
instance.id = self.instance.id
instance.clean(from_api=True)
return attrs
def get_fields(self):
"""Enable to override the UUID field. Also enabled the field level permissions."""
fields = super().get_fields()
if "uuid" in fields:
fields["uuid"].read_only = False
fields["uuid"].allow_null = True
request = self.context.get("request", None)
if request:
fields = self.apply_tabs_permission(fields, request.user)
protected_fields = get_protected_fields(self.Meta.model, request.user)
for field_name in protected_fields:
if field_name in fields:
fields[field_name].read_only = True
# Add the object dataspace name as a read-only field.
fields["dataspace"] = serializers.StringRelatedField()
return fields
def get_absolute_url(self, obj):
"""
Return a fully qualified URL (includes the schema and domains) of the object.
Combining the settings site URL and the get_absolute_url() method of the object.
Usage:
absolute_url = serializers.SerializerMethodField()
"""
site = settings.SITE_URL.rstrip("/")
return f"{site}{obj.get_absolute_url()}"
def apply_tabs_permission(self, fields, user):
model_tabset = get_tabset_for_model(self.Meta.model)
if not model_tabset:
return fields
authorized_fields = {"api_url", "absolute_url", "uuid"}
authorized_tabs = get_authorized_tabs(self.Meta.model, user)
if authorized_tabs:
for tab in authorized_tabs:
authorized_fields.update(model_tabset.get(tab, {}).get("fields", []))
fields = {
field_name: field
for field_name, field in fields.items()
if field_name in authorized_fields
}
return fields
# Path: dje/api.py
class ExternalReferenceSerializer(DataspacedSerializer):
external_source = DataspacedSlugRelatedField(slug_field="label")
content_type = serializers.StringRelatedField(source="content_type.model")
content_object = GenericForeignKeyHyperlinkedField(lookup_field="uuid")
content_object_display_name = serializers.StringRelatedField(source="content_object")
class Meta:
model = ExternalReference
fields = (
"api_url",
"uuid",
"content_type",
"content_object",
"content_object_display_name",
"external_source",
"external_id",
"external_url",
"created_date",
"last_modified_date",
)
extra_kwargs = {
"api_url": {
"view_name": "api_v2:externalreference-detail",
"lookup_field": "uuid",
},
}
# Path: dje/filters.py
class LastModifiedDateFilter(django_filters.DateTimeFilter):
help_text = (
"Limits to records created or updated since that date. "
'Supports both "YYYY-MM-DD" date and "YYYY-MM-DD HH:MM" datetime.'
)
def __init__(self, *args, **kwargs):
kwargs.setdefault("help_text", self.help_text)
kwargs["lookup_expr"] = "gte"
super().__init__(*args, **kwargs)
# Path: dje/filters.py
class MultipleCharFilter(django_filters.MultipleChoiceFilter):
"""Filter on multiple values for a CharField type using `?field=a&field=b` URL syntax."""
field_class = MultipleCharField
# Path: dje/filters.py
class MultipleUUIDFilter(django_filters.MultipleChoiceFilter):
"""Filter on multiple values for an `UUIDField` type using `?field=a&field=b` URL syntax."""
help_text = "Exact UUID. Multi-value supported."
field_class = MultipleUUIDField
def __init__(self, *args, **kwargs):
kwargs.setdefault("help_text", self.help_text)
super().__init__(*args, **kwargs)
# Path: dje/models.py
DATASPACE_FIELD_HELP_TEXT = _(
"A Dataspace is an independent, exclusive set of DejaCode data, which can be"
" either nexB master reference data or installation-specific data."
)
LIMITED_TO_MODELS = [
"Owner",
"License",
"LicenseCategory",
"LicenseProfile",
"LicenseStatus",
"LicenseStyle",
"LicenseTag",
"Component",
"ComponentKeyword",
"ComponentStatus",
"ComponentType",
"Package",
]
ADDITION = ADDITION
CHANGE = CHANGE
DELETION = DELETION
ACTION_FLAG_CHOICES = (
(ADDITION, _("Addition")),
(CHANGE, _("Change")),
(DELETION, _("Deletion")),
)
CT_LIMIT = (
models.Q(app_label="organization", model="owner")
| models.Q(app_label="license_library", model="license")
| models.Q(app_label="component_catalog", model="component")
| models.Q(app_label="component_catalog", model="package")
)
def is_dataspace_related(model_class):
def is_content_type_related(model_class):
def get_by_natural_key(self, name):
def get_reference(self):
def __str__(self):
def get_admin_url(self):
def natural_key(self):
def is_reference(self):
def get_configuration(self, field_name=None):
def has_configuration(self):
def tab_permissions_enabled(self):
def __str__(self):
def get_by_natural_key(self, dataspace_name, uuid):
def scope(self, dataspace, include_reference=False):
def scope_by_name(self, dataspace_name):
def scope_by_id(self, dataspace_id):
def scope_for_user(self, user):
def scope_for_user_in_admin(self, user):
def get_or_none(self, *args, **kwargs):
def group_by(self, field_name):
def get_queryset(self):
def is_secured(manager):
def get_unsecured_manager(model_class):
def secure_queryset_relational_fields(queryset, user):
def get_dataspace(self):
def natural_key(self):
def check(cls, **kwargs):
def save(self, *args, **kwargs):
def model_fields(cls):
def create_from_data(cls, user, data, validate=False):
def update_from_data(self, user, data, override=False):
def as_json(self):
def get_verbose_name(self):
def get_url(self, name, params):
def get_admin_url(self):
def get_change_url(self):
def get_admin_action_url(self, name):
def get_copy_url(self):
def get_api_copy_to_my_dataspace_url(self):
def get_compare_url(self):
def get_html_link(self, href, **attrs):
def get_admin_link(self, **attrs):
def get_absolute_link(self, **attrs):
def urn_link(self):
def _get_local_foreign_fields(self):
def get_identifier_fields(cls):
def get_exclude_candidates_fields(self):
def get_exclude_choices(cls):
def unique_filters_for(self, target):
def get_extra_relational_fields():
def clean(self, from_api=False):
def validate_case_insensitive_unique_on(self):
def validate_against_reference_data(self, from_api=False):
def clean_extra_spaces_in_identifier_fields(self):
def mark_all_notifications_as_read(self, user):
def get_parents(self):
def get_children(self):
def is_parent_of(self, obj):
def is_child_of(self, obj):
def get_ancestors(self):
def get_descendants(self, set_direct_parent=False):
def get_ancestor_ids(self):
def get_descendant_ids(self):
def get_related_ancestors(self):
def get_related_descendants(self):
def is_ancestor_of(self, obj):
def is_descendant_of(self, obj):
def has_parent_or_child(self):
def __str__(self):
def clean(self, from_api=False):
def colored_icon_mixin_factory(verbose_name, icon_blank):
def get_color_code(self):
def get_icon_as_html(self):
def actives(self):
def standards(self):
def admins(self):
def create_user(self, username, email, password, dataspace, **extra_fields):
def create_superuser(self, username, email, password, dataspace=None, **extra_fields):
def create_inactive_user(self, username, email, password, dataspace, **extra_fields):
def get_data_update_recipients(self, dataspace):
def save(self, *args, **kwargs):
def last_active(self):
def get_group_names(self):
def get_homepage_layout(self):
def email_user(self, subject, message, from_email=None, **kwargs):
def regenerate_api_key(self):
def serialize_user_data(self):
def serialize_hook(self, hook):
def create_auth_token(sender, instance=None, created=False, **kwargs):
def get_queryset(self):
def get_for_object(self, obj, **kwargs):
def log_action(self, user, obj, action_flag, message="", serialized_data=None):
def log_addition(cls, user, obj, message=None):
def log_change(cls, user, obj, message, serialized_data=None):
def log_deletion(cls, user, obj):
def __str__(self):
def get_queryset(self):
def get_content_object(self, external_source, external_id):
def get_for_content_object(self, content_object):
def create_for_content_object(self, content_object, external_source, external_id):
def __str__(self):
def save(self, *args, **kwargs):
class DataspaceManager(models.Manager):
class Dataspace(models.Model):
class Meta:
class DataspaceConfiguration(models.Model):
class DataspacedQuerySet(models.QuerySet):
class DataspacedManager(models.Manager.from_queryset(DataspacedQuerySet)):
class DataspacedModel(models.Model):
class Meta:
class HistoryDateFieldsMixin(models.Model):
class Meta:
class HistoryUserFieldsMixin(models.Model):
class Meta:
class HistoryFieldsMixin(HistoryUserFieldsMixin, HistoryDateFieldsMixin):
class Meta:
class ParentChildModelMixin:
class ParentChildRelationshipModel(DataspacedModel):
class Meta:
class ColoredIconMixin(models.Model):
class Meta:
class DejacodeUserQuerySet(DataspacedQuerySet):
class DejacodeUserManager(BaseUserManager, DataspacedManager.from_queryset(DejacodeUserQuerySet)):
class DejacodeUser(AbstractUser):
class Meta:
class HistoryManager(DataspacedManager):
class History(models.Model):
class Meta:
class ExternalSource(DataspacedModel):
class Meta:
class ExternalReferenceManager(DataspacedManager):
class ExternalReference(HistoryFieldsMixin, DataspacedModel):
class Meta:
class ExternalReferenceMixin(models.Model):
class Meta:
class ReferenceNotesMixin(models.Model):
class Meta:
# Path: organization/admin.py
class OwnerAdmin(ChangelistPopupPermissionMixin, DataspacedAdmin):
list_display = (
get_hierarchy_link,
"changelist_view_on_site",
AsNaturalTime("last_modified_date", short_description="Last modified"),
"name",
"alias",
AsURL("homepage_url", short_description="Homepage URL", html_class="word-break"),
AsURL("contact_info", short_description="Contact information", html_class="word-break"),
"get_license_links",
"get_components_links",
"type_label",
"get_dataspace",
)
list_display_links = ("name",)
search_fields = ("name", "alias")
ordering = ("-last_modified_date",)
fieldsets = (
(
"",
{
"fields": (
"name",
"alias",
"type",
"homepage_url",
"contact_info",
"notes",
)
},
),
(
"Related objects",
{
"fields": (
"get_license_links",
"get_components_links",
)
},
),
("", {"classes": ("placeholder related_children-group",), "fields": ()}),
(
"",
{
"classes": ("placeholder dje-externalreference-content_type-object_id-group",),
"fields": (),
},
),
get_additional_information_fieldset(pre_fields=("urn_link",)),
)
readonly_fields = DataspacedAdmin.readonly_fields + (
"urn_link",
"get_license_links",
"get_components_links",
)
list_filter = DataspacedAdmin.list_filter + (
ReportingQueryListFilter,
"type",
("license", IsNullFieldListFilter),
("component", IsNullFieldListFilter),
MissingInFilter,
)
inlines = [
SubownerChildInline,
ExternalReferenceInline,
]
importer_class = OwnerImporter
view_on_site = DataspacedAdmin.changeform_view_on_site
navigation_buttons = True
actions = [
"copy_to",
"compare_with",
"check_updates_in_reference",
]
short_description = """An Owner is an entity that is the author, custodian,
or provider of one or more software objects (licenses, components,
products)."""
long_description = """An Owner can be an organization, person, project
team, or a foundation. An Owner may create and publish software components,
or it may simply be a standards organization. Any Owner can belong to (be
the child of) any other Owners."""
def get_queryset(self, request):
return (
super()
.get_queryset(request)
.prefetch_related(
"license_set",
"related_parents",
"related_children",
"component_set",
)
)
@admin.display(
ordering="type",
description="Type",
)
def type_label(self, obj):
return obj.type
@admin.display(description=_("Licenses"))
def get_license_links(self, obj):
return AsLinkList("license_set", "owner", qs_limit=5)(obj)
@admin.display(description=_("Components"))
def get_components_links(self, obj):
return AsLinkList("component_set", "owner", qs_limit=5)(obj)
# Path: organization/models.py
class Owner(
ExternalReferenceMixin,
HistoryFieldsMixin,
ParentChildModelMixin,
DataspacedModel,
):
name = models.CharField(
db_index=True,
max_length=70,
help_text=_(
"The unique user-maintained name of the author, custodian, or provider of "
"one or more software objects (licenses, components, products)."
),
)
homepage_url = models.URLField(
_("Homepage URL"),
max_length=1024,
blank=True,
help_text=_("The homepage URL of the owner."),
)
contact_info = models.CharField(
_("contact information"),
max_length=500,
blank=True,
help_text=_(
"Information, frequently a dedicated email address, about "
"contacting an owner for license clarifications and permissions."
),
)
notes = models.TextField(blank=True, help_text=_("Extended notes about an owner."))
alias = models.CharField(
db_index=True,
max_length=500,
blank=True,
help_text=_("Alternative spellings of the name of the owner as a comma-separated list."),
)
OWNER_TYPE_CHOICES = (
(
"Organization",
_("Organization: an ongoing entity that provides software or promotes standards."),
),
("Person", _("Person: an individual that provides software or promotes standards.")),
("Project", _("Project: a dynamic collection of contributors to a software project.")),
)
type = models.CharField(
max_length=20,
default="Organization",
choices=OWNER_TYPE_CHOICES,
db_index=True,
help_text=_(
"An owner type differentiates individuals, ongoing businesses, and "
"dynamic organizations (such as software projects). "
"An owner of any type can be associated with a license, component, or "
"product. An owner can also be the parent of any other owner."
),
)
# Use choices database values instead of the `get_FIELD_display`, in reporting.
type.report_with_db_value = True
# This reference all the Owners associated with self through a
# Subowner relation where self is the parents.
# Only the children are copied on ParentChild relation type.
children = models.ManyToManyField(
to="self",
through="Subowner",
symmetrical=False,
)
class Meta:
unique_together = (
("dataspace", "name"),
("dataspace", "uuid"),
)
ordering = ["name"]
def __str__(self):
return self.name
@property
def urn(self):
return urn.build("owner", name=self.name)
def get_url(self, name, params=None):
if not params:
params = [self.dataspace.name, quote_plus(self.name)]
return super().get_url(name, params)
def get_absolute_url(self):
return self.get_url("details")
@property
def details_url(self):
return self.get_absolute_url()
def get_change_url(self):
return self.get_url("change")
def get_delete_url(self):
return self.get_url("delete")
@staticmethod
def get_extra_relational_fields():
return ["external_references"]
@property
def case_insensitive_unique_on(self):
return ["name"]
def get_alias_list(self):
return self.alias.replace(", ", ",").split(",")
def as_spdx(self):
spdx_type = "Person" if self.type == "Person" else "Organization"
return f"{spdx_type}: {self.name}"
# Path: organization/api.py
import django_filters
from rest_framework import serializers
from dje.api import CreateRetrieveUpdateListViewSet
from dje.api import DataspacedAPIFilterSet
from dje.api import DataspacedSerializer
from dje.api import ExternalReferenceSerializer
from dje.filters import LastModifiedDateFilter
from dje.filters import MultipleCharFilter
from dje.filters import MultipleUUIDFilter
from dje.models import external_references_prefetch
from organization.admin import OwnerAdmin
from organization.models import Owner
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# DejaCode is a trademark of nexB Inc.
# SPDX-License-Identifier: AGPL-3.0-only
# See https://github.com/nexB/dejacode for support or download.
# See https://aboutcode.org for more information about AboutCode FOSS projects.
#
class OwnerSerializer(DataspacedSerializer):
absolute_url = serializers.SerializerMethodField()
licenses = serializers.HyperlinkedRelatedField(
source="license_set",
many=True,
read_only=True,
view_name="api_v2:license-detail",
lookup_field="uuid",
)
components = serializers.HyperlinkedRelatedField(
source="component_set",
many=True,
read_only=True,
view_name="api_v2:component-detail",
lookup_field="uuid",
)
external_references = ExternalReferenceSerializer(
many=True,
read_only=True,
)
class Meta:
model = Owner
fields = (
"api_url",
"absolute_url",
"uuid",
"name",
"homepage_url",
"contact_info",
"notes",
"alias",
"type",
"licenses",
"components",
"external_references",
"urn",
"created_date",
"last_modified_date",
)
extra_kwargs = {
"api_url": {
"view_name": "api_v2:owner-detail",
"lookup_field": "uuid",
},
}
class OwnerEmbeddedSerializer(OwnerSerializer):
class Meta(OwnerSerializer.Meta):
fields = (
"api_url",
"absolute_url",
"uuid",
"name",
"homepage_url",
"contact_info",
"notes",
"alias",
"type",
"urn",
"created_date",
"last_modified_date",
)
class OwnerFilterSet(DataspacedAPIFilterSet):
uuid = MultipleUUIDFilter()
name = MultipleCharFilter(
help_text="Exact name. Multi-value supported.",
)
type = django_filters.ChoiceFilter(
choices=Owner.OWNER_TYPE_CHOICES,
help_text=f"Exact owner type. Supported values: "
f'{", ".join(type[0] for type in Owner.OWNER_TYPE_CHOICES)}',
)
last_modified_date = LastModifiedDateFilter()
class Meta:
model = Owner
fields = (
"uuid",
"name",
"type",
"last_modified_date",
)
class OwnerViewSet(CreateRetrieveUpdateListViewSet):
queryset = Owner.objects.all()
serializer_class = OwnerSerializer
lookup_field = "uuid"
filterset_class = OwnerFilterSet
search_fields = (
"name",
"alias",
"notes",
)
search_fields_autocomplete = ("name",)
ordering_fields = (
"name",
"alias",
"created_date",
"last_modified_date",
)
email_notification_on = OwnerAdmin.email_notification_on
allow_reference_access = True
| def get_queryset(self): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kylemcdonald/i2i-realtime
# Path: settings.py
class Settings(BaseSettings):
# config, cannot be changed
mode: str = Field(default="video")
worker_id: int = Field(default=0)
output_fast: bool = Field(default=True)
zmq_video_port: int = Field(default=5554)
job_start_port: int = Field(default=5555)
settings_port: int = Field(default=5556)
job_finish_port: int = Field(default=5557)
output_port: int = Field(default=5558)
osc_port: int = Field(default=8000)
primary_hostname: str = Field(default='localhost')
translation: bool = Field(default=False)
safety: bool = Field(default=False)
local_files_only: bool = Field(default=False)
warmup: str = Field(default=None)
threaded: bool = Field(default=False)
# parameters for inference
prompt: str = Field(default='A psychedelic landscape.')
num_inference_steps: int = Field(default=2)
fixed_seed: bool = Field(default=True)
seed: int = Field(default=0)
batch_size: int = Field(default=4)
strength: float = Field(default=0.7)
passthrough: bool = Field(default=False)
compel: bool = Field(default=True)
# can be changed dynamically
opacity: float = Field(default=1.0)
mirror: bool = Field(default=False)
debug: bool = Field(default=False)
pad: bool = Field(default=False)
fps: int = Field(default=30)
directory: str = Field(default='data/frames')
class Config:
env_file = ".env"
env_file_encoding = 'utf-8'
# Path: settings_api.py
class SettingsAPI:
def __init__(self, settings):
self.shutdown = False
self.settings = settings
port = settings.settings_port
self.thread = threading.Thread(target=self.run, args=(port,))
def start(self):
if not self.thread.is_alive():
self.thread.start()
def run(self, port):
if self.settings.translation:
translate = Translate()
if self.settings.safety:
safety_checker = SafetyChecker()
app = FastAPI()
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/prompt/{msg}")
async def prompt(msg: str):
if self.settings.translation:
prompt = translate.translate_to_en(msg)
if prompt != msg:
print("Translating from:", msg)
else:
prompt = msg
override = "-f" in prompt
if override:
prompt = prompt.replace("-f", "").strip()
if self.settings.safety and not override:
safety = safety_checker(prompt)
if safety != "safe":
print(f"Ignoring prompt ({safety}):", prompt)
return {"safety": "unsafe"}
self.settings.prompt = prompt
print("Updated prompt:", prompt)
return {"safety": "safe"}
@app.get("/directory/{status}")
async def directory(status: str):
self.settings.directory = "data/" + status
print("Updated directory status:", self.settings.directory)
return {"status": "updated"}
@app.get("/debug/{status}")
async def debug(status: bool):
self.settings.debug = status
print("Updated debug status:", status)
return {"status": "updated"}
@app.get("/compel/{status}")
async def compel(status: bool):
self.settings.compel = status
print("Updated compel status:", status)
return {"status": "updated"}
@app.get("/passthrough/{status}")
async def passthrough(status: bool):
self.settings.passthrough = status
print("Updated passthrough status:", self.settings.passthrough)
return {"status": "updated"}
@app.get("/fixed_seed/{status}")
async def fixed_seed(status: bool):
self.settings.fixed_seed = status
print("Updated fixed_seed status:", self.settings.fixed_seed)
return {"status": "updated"}
@app.get("/mirror/{status}")
async def mirror(status: bool):
self.settings.mirror = status
print("Updated mirror status:", status)
return {"status": "updated"}
@app.get("/batch_size/{value}")
async def batch_size(value: int):
self.settings.batch_size = value
print("Updated batch_size:", self.settings.batch_size)
return {"status": "updated"}
@app.get("/seed/{value}")
async def seed(value: int):
self.settings.seed = value
print("Updated seed:", self.settings.seed)
return {"status": "updated"}
@app.get("/steps/{value}")
async def steps(value: int):
self.settings.num_inference_steps = value
print("Updated num_inference_steps:", self.settings.num_inference_steps)
return {"status": "updated"}
@app.get("/strength/{value}")
async def strength(value: float):
self.settings.strength = value
print("Updated strength:", self.settings.strength)
return {"status": "updated"}
@app.get("/opacity/{value}")
async def opacity(value: float):
value = min(max(value, 0), 1)
self.settings.opacity = value
print("Updated opacity:", self.settings.opacity)
return {"status": "updated"}
config = uvicorn.Config(app, host="0.0.0.0", port=port, log_level="info")
self.server = uvicorn.Server(config=config)
try:
self.server.run()
except KeyboardInterrupt:
pass
def close(self):
print("SettingsAPI closing")
if hasattr(self, "server"):
self.server.should_exit = True
self.thread.join()
# Path: threaded_sequence.py
class ThreadedSequence(ThreadedWorker):
def __init__(self, settings):
super().__init__(has_input=False)
self.settings = settings
self.fns = natsorted(os.listdir(settings.directory))
self.playing = threading.Event()
self.scrub_queue = queue.Queue()
def setup(self):
self.start_time = time.time()
self.frame_number = 0
def read_scrub(self):
while not self.scrub_queue.empty():
self.frame_number = self.scrub_queue.get()
timestamp = self.frame_number / self.settings.fps
self.start_time = time.time() - timestamp
def work(self):
self.playing.wait()
if self.should_exit:
return
self.read_scrub()
timestamp = time.time()
index = self.frame_number
next_frame_time = self.start_time + (index + 1) / self.settings.fps
sleep_time = next_frame_time - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
fn = os.path.join(self.settings.directory, self.fns[index])
with open(fn, "rb") as f:
encoded = f.read()
self.frame_number += 1
if self.frame_number == len(self.fns):
self.frame_number = 0
self.start_time = time.time()
return timestamp, index, encoded
def close(self):
self.should_exit = True
self.playing.set()
super().close()
def play(self):
print("playing")
if self.playing.is_set():
return
self.read_scrub()
self.scrub(self.frame_number / len(self.fns))
self.playing.set()
def pause(self):
print("pausing")
self.playing.clear()
def scrub(self, pct):
frame = int(pct * len(self.fns))
self.scrub_queue.put(frame)
# Path: threaded_camera.py
class ThreadedCamera(ThreadedWorker):
def __init__(self):
super().__init__(has_input=False)
self.jpeg = TurboJPEG()
self.cap = cv2.VideoCapture(-1, cv2.CAP_V4L2)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
self.cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
self.cap.set(cv2.CAP_PROP_FPS, 30)
def setup(self):
self.start_time = time.time()
self.frame_number = 0
def work(self):
timestamp = time.time()
self.frame_number += 1
# if self.frame_number % 30 == 0:
# duration = time.time() - self.start_time
# fps = self.frame_number / duration
# print(f"fps {fps:.2f}")
ret, frame = self.cap.read()
# crop to the center 1024x1024
frame = frame[28:1052, 448:1472]
# print(frame.shape)
encoded = self.jpeg.encode(frame)
return timestamp, self.frame_number, encoded
def cleanup(self):
self.cap.release()
# Path: threaded_zmq_video.py
class ThreadedZmqVideo(ThreadedWorker):
def __init__(self, settings):
super().__init__(has_input=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.SUB)
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
address = f"tcp://10.0.0.24:{settings.zmq_video_port}"
print(self.name, "binding to", address)
self.sock.connect(address)
self.sock.setsockopt(zmq.SUBSCRIBE, b"")
def work(self):
while not self.should_exit:
try:
msg = self.sock.recv(flags=zmq.NOBLOCK, copy=False).bytes
except zmq.Again:
continue
timestamp, index, encoded = msgpack.unpackb(msg)
# print(self.name, "zmq received", index)
return timestamp, index, encoded
def cleanup(self):
self.sock.close()
self.context.term()
# Path: batching_worker.py
class BatchingWorker(ThreadedWorker):
def __init__(self, settings):
super().__init__()
self.settings = settings
def setup(self):
self.batch = []
def work(self, input):
self.batch.append(input)
n = self.settings.batch_size
if len(self.batch) >= n:
batch = self.batch[:n]
self.batch = self.batch[n:]
return batch
# Path: zmq_sender.py
class ZmqSender(ThreadedWorker):
def __init__(self, settings):
super().__init__(has_output=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PUSH)
self.sock.setsockopt(zmq.SNDHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
self.sock.bind(f"tcp://0.0.0.0:{settings.job_start_port}")
self.settings = settings
def work(self, batch):
frame_timestamps, indices, frames = zip(*batch)
settings = self.settings
job_timestamp = time.time()
packed = msgpack.packb(
{
"job_timestamp": job_timestamp,
"frame_timestamps": frame_timestamps,
"indices": indices,
"frames": frames,
"debug": settings.debug,
"parameters": {
"prompt": settings.prompt,
"num_inference_steps": settings.num_inference_steps,
"strength": settings.strength,
"seed": settings.seed,
"passthrough": settings.passthrough,
"fixed_seed": settings.fixed_seed,
"use_compel": settings.compel
},
}
)
# frame = zmq.Frame(packed)
self.sock.send(packed)
# print(int(time.time()*1000)%1000, "sending")
# print("outgoing length", len(packed))
# print("sending", indices)
def cleanup(self):
self.sock.close()
self.context.term()
# Path: osc_video_controller.py
class OscVideoController(ThreadedWorker):
def __init__(self, video, settings):
super().__init__(has_input=False, has_output=False)
self.osc = OscSocket("0.0.0.0", settings.osc_port)
self.video = video
def work(self):
msg = self.osc.recv()
if msg is None:
return
if msg.address == "/scene":
if msg.params[0] == 1:
self.video.scrub(0)
self.video.play()
else:
self.video.pause()
# elif msg.address == "/progress":
# pct = msg.params[0]
# self.video.scrub(pct)
def cleanup(self):
self.osc.close()
# Path: osc_settings_controller.py
class OscSettingsController(ThreadedWorker):
def __init__(self, settings):
super().__init__(has_input=False, has_output=False)
address = f"0.0.0.0:{settings.osc_port}"
print(self.name, f"connecting to OSC on {address}")
self.osc = OscSocket("0.0.0.0", settings.osc_port)
self.settings = settings
self.prompt_0 = ""
self.prompt_1 = ""
self.blend = 0.5
def update_blend(self):
if self.blend == 0:
self.settings.prompt = self.prompt_0
elif self.blend == 1:
self.settings.prompt = self.prompt_1
else:
a = self.prompt_0
b = self.prompt_1
t = self.blend
self.settings.prompt = f'("{a}", "{b}").blend({1-t:.2f}, {t:.2f})'
def work(self):
try:
msg = self.osc.recv()
if msg is None:
return
if msg.address == "/prompt":
prompt = ' '.join(msg.params)
# print("OSC prompt:", prompt)
self.settings.prompt = prompt
elif msg.address == "/blend":
a, b, t = msg.params
self.prompt_0 = a
self.prompt_1 = b
self.blend = t
self.update_blend()
elif msg.address == "/prompt/0":
self.prompt_0 = ' '.join(msg.params)
self.update_blend()
elif msg.address == "/prompt/1":
self.prompt_1 = ' '.join(msg.params)
self.update_blend()
elif msg.address == "/blend_t":
self.blend = float(msg.params[0])
self.update_blend()
elif msg.address == "/seed":
seed = msg.params[0]
# print("OSC seed:", seed)
self.settings.seed = seed
elif msg.address == "/opacity":
opacity = float(msg.params[0])
opacity = min(max(opacity, 0), 1)
self.settings.opacity = opacity
elif msg.address == "/mode":
mode = msg.params[0]
if mode == "soft":
self.settings.num_inference_steps = 3
self.settings.strength = 0.5
elif mode == "hard":
self.settings.num_inference_steps = 2
self.settings.strength = 0.7
# else:
# print("unknown osc", msg.address, msg.params)
except TypeError:
print("osc TypeError")
except osc_packet.ParseError:
print("osc ParseError")
except Exception as e:
print("osc error", e)
def cleanup(self):
self.osc.close()
# Path: output_smooth.py
class OutputSmooth(ThreadedWorker):
def __init__(self, port, min_size=1, max_size=5, max_delay=200):
super().__init__(has_output=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PUB)
self.sock.bind(f"tcp://0.0.0.0:{port}")
self.sock.setsockopt(zmq.SNDHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
self.max_delay = max_delay
self.min_size = min_size
self.max_size = max_size
self.delay = 33
self.jump = 0.1
def work(self, unpacked):
start_time = time.time()
job_timestamp = unpacked["job_timestamp"]
index = unpacked["index"]
jpg = unpacked["jpg"]
# worker_id = unpacked["worker_id"]
# latency = time.time() - timestamp
# print("\033[K", end="", flush=True) # clear entire line
# print(
# f"outgoing: {index} #{worker_id} {int(1000*latency)}ms, {self.queue.qsize()}q {self.delay:.01f}ms"
# )
packed = msgpack.packb([job_timestamp, index, jpg])
self.sock.send(packed)
# doing this with smaller amounts for smaller offsets
# would help staibilize the framerate
if self.input_queue.qsize() > self.max_size:
# need to speed up
self.delay -= self.jump
if self.input_queue.qsize() < self.min_size:
# need to slow down
self.delay += self.jump
self.delay = max(0, self.delay)
self.delay = min(self.max_delay, self.delay)
next_time = start_time + (self.delay / 1000)
wait_time = next_time - time.time()
if wait_time > 0:
time.sleep(wait_time)
def cleanup(self):
self.sock.close()
self.context.term()
# Path: output_fast.py
class OutputFast(ThreadedWorker):
def __init__(self, port):
super().__init__(has_output=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PUB)
self.sock.bind(f"tcp://0.0.0.0:{port}")
self.sock.setsockopt(zmq.SNDHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
def work(self, unpacked):
timestamp = unpacked["frame_timestamp"]
index = unpacked["index"]
jpg = unpacked["jpg"]
packed = msgpack.packb([timestamp, index, jpg])
self.sock.send(packed)
# duration = time.time() - unpacked["frame_timestamp"]
# if index % 31 == 0:
# print(f"full loop {int(duration*1000)}ms", flush=True)
def cleanup(self):
self.sock.close()
self.context.term()
# Path: reordering_receiver.py
class ReorderingReceiver(ThreadedWorker):
def __init__(self, port):
super().__init__(has_input=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PULL)
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
self.sock.bind(f"tcp://0.0.0.0:{port}")
self.reset_buffer()
def reset_buffer(self):
self.msg_buffer = FixedSizeDict(100)
self.next_index = None
def work(self):
try:
msg = self.sock.recv(flags=zmq.NOBLOCK, copy=False).bytes
# print(int(time.time()*1000)%1000, "receiving")
except zmq.Again:
return
receive_time = time.time()
unpacked = msgpack.unpackb(msg)
buffer_size = 30
index = unpacked["index"]
# print(self.name, "received index", index)
if index == 0:
print(self.name, "resetting buffer due to index == 0")
self.reset_buffer()
elif self.next_index and index < self.next_index - buffer_size:
print(self.name, f"resetting buffer due to {index} < {self.next_index} - {buffer_size}")
self.reset_buffer()
self.msg_buffer[index] = unpacked # start by adding to buffer
if unpacked["index"] % 31 == 0: # close to 30, but prime (for logs)
round_trip = receive_time - unpacked["job_timestamp"]
worker_id = unpacked["worker_id"]
print(self.name, f"worker {worker_id} round trip: {int(1000*round_trip)}ms")
index = unpacked["index"]
worker_id = unpacked["worker_id"]
jpg = unpacked["jpg"]
if self.next_index is None:
# if next_index is None, let's start with this one
self.next_index = index
diff = abs(index - self.next_index)
if diff > 10:
# if we got a big jump, let's just jump to it
# this also works for resetting to 0
self.next_index = index
# packed = msgpack.packb([timestamp, index, jpg])
# publisher.send(packed) # echo mode
# ordered mode
while self.next_index in self.msg_buffer:
unpacked = self.msg_buffer[self.next_index]
self.output_queue.put(unpacked)
del self.msg_buffer[self.next_index]
self.next_index += 1
def cleanup(self):
self.sock.close()
self.context.term()
# Path: show_stream.py
class ShowStream(ThreadedWorker):
def __init__(self, port, settings):
super().__init__(has_input=False, has_output=False)
self.port = port
self.fullscreen = True
self.settings = settings
def setup(self):
self.jpeg = TurboJPEG()
self.context = zmq.Context()
self.sock = self.context.socket(zmq.SUB)
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
address = f"tcp://localhost:{self.port}"
print(f"Connecting to {address}")
self.sock.connect(address)
self.sock.setsockopt(zmq.SUBSCRIBE, b"")
self.window_name = f"Port {self.port}"
cv2.namedWindow(self.window_name, cv2.WINDOW_GUI_NORMAL)
if self.fullscreen:
cv2.setWindowProperty(self.window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
def show_msg(self, msg):
timestamp, index, jpg = msgpack.unpackb(msg)
img = self.jpeg.decode(jpg, pixel_format=TJPF_RGB)
input_h, input_w = img.shape[:2]
if self.settings.mirror:
img = img[:,::-1,:]
if self.settings.pad:
canvas = np.zeros((1024, 1280, 3), dtype=np.uint8)
canvas[:, :1024] = img
img = canvas
if self.settings.debug:
latency = time.time() - timestamp
text = f"{input_w}x{input_h} @ {int(1000*latency)} ms"
cv2.putText(
img,
text,
(10, 50),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255, 255, 0),
2,
cv2.LINE_AA,
)
cv2.imshow(self.window_name, img[:, :, ::-1])
def work(self):
try:
msg = self.sock.recv(flags=zmq.NOBLOCK, copy=False).bytes
self.show_msg(msg)
except zmq.Again:
pass
key = cv2.waitKey(1)
# toggle fullscreen when user presses 'f' key
if key == ord("f") or key == ord("F"):
self.fullscreen = not self.fullscreen
if self.fullscreen:
cv2.setWindowProperty(
self.window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN
)
else:
cv2.setWindowProperty(
self.window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_KEEPRATIO
)
def cleanup(self):
self.sock.close()
self.context.term()
cv2.destroyAllWindows()
# Path: server_app.py
import os
import psutil
from settings import Settings
from settings_api import SettingsAPI
from threaded_sequence import ThreadedSequence
from threaded_camera import ThreadedCamera
from threaded_zmq_video import ThreadedZmqVideo
from batching_worker import BatchingWorker
from zmq_sender import ZmqSender
from osc_video_controller import OscVideoController
from osc_settings_controller import OscSettingsController
from output_smooth import OutputSmooth
from output_fast import OutputFast
from reordering_receiver import ReorderingReceiver
from show_stream import ShowStream
# load up settings
settings = Settings()
# create endpoint
settings_api = SettingsAPI(settings)
# create sending end
if settings.mode == "video":
video = ThreadedSequence(settings)
controller = OscVideoController(video, settings)
elif settings.mode == "camera":
video = ThreadedCamera()
controller = OscSettingsController(settings)
elif settings.mode == "zmq":
video = ThreadedZmqVideo(settings)
controller = OscSettingsController(settings)
batcher = BatchingWorker(settings).feed(video)
sender = ZmqSender(settings).feed(batcher)
# create receiving end
reordering_receiver = ReorderingReceiver(settings.job_finish_port)
if settings.output_fast:
output = OutputFast(settings.output_port).feed(reordering_receiver)
else:
output = OutputSmooth(settings.output_port).feed(reordering_receiver)
# create display end
show_stream = ShowStream(settings.output_port, settings)
# start from the end of the chain to the beginning
# start display
show_stream.start()
# start receiving end
settings_api.start()
reordering_receiver.start()
output.start()
# start sending end
controller.start()
sender.start()
batcher.start()
video.start()
if settings.mode == "video":
video.play()
try:
process = psutil.Process(os.getpid())
while True:
memory_usage_bytes = process.memory_info().rss
memory_usage_gb = memory_usage_bytes / (1024**3)
| if memory_usage_gb > 10: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wusize/CLIM
# Path: src/open_clip/eva_clip/constants.py
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
# Path: src/open_clip/eva_clip/constants.py
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
# Path: src/open_clip/eva_clip/model.py
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
vision_cfg: CLIPVisionCfg,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
):
super().__init__()
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
self.transformer = text.transformer
self.embed_dim = embed_dim
self.vocab_size = text.vocab_size
self.token_embedding = text.token_embedding
self.positional_embedding = text.positional_embedding
self.ln_final = text.ln_final
self.text_projection = text.text_projection
self.register_buffer('attn_mask', text.attn_mask, persistent=False)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.transformer.grad_checkpointing = enable
@torch.jit.ignore
def no_weight_decay(self):
return {'logit_scale'}
def encode_image(self, image, normalize: bool = False):
features = self.visual(image)
return F.normalize(features, dim=-1) if normalize else features
def encode_text(self, text, normalize: bool = False):
cast_dtype = self.transformer.get_cast_dtype()
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.to(cast_dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return F.normalize(x, dim=-1) if normalize else x
def forward(self, image, text):
image_features = self.encode_image(image, normalize=True)
text_features = self.encode_text(text, normalize=True)
return image_features, text_features, self.logit_scale.exp()
# Path: src/open_clip/eva_clip/model.py
class CustomCLIP(nn.Module):
def __init__(
self,
embed_dim: int,
vision_cfg: CLIPVisionCfg,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
itm_task: bool = False,
):
super().__init__()
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
self.embed_dim = embed_dim
print(f'Freeze text encoder parameters', flush=True)
for param in self.text.parameters():
param.requires_grad = False
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def train(self, mode=True):
super().train(mode)
self.text.train(mode=False)
return self
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False, **kwargs):
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
def lock_text_tower(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
self.text.lock(unlocked_layers, freeze_layer_norm)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.text.set_grad_checkpointing(enable)
@torch.jit.ignore
def no_weight_decay(self):
return {'logit_scale'}
def encode_image(self, image, normalize: bool = False):
features = self.visual(image)
return F.normalize(features, dim=-1) if normalize else features
def encode_text(self, text, normalize: bool = False):
features = self.text(text)
return F.normalize(features, dim=-1) if normalize else features
def forward(self, image, text):
image_features = self.encode_image(image, normalize=True)
text_features = self.encode_text(text, normalize=True)
return image_features, text_features, self.logit_scale.exp()
def encode_dense(self, image, normalize: bool = False, keep_shape=False):
features = self.visual.encode_dense(image, keep_shape=keep_shape)
if normalize:
if keep_shape:
features = F.normalize(features, dim=1)
else:
features = F.normalize(features, dim=-1)
return features
def encode_pseudo_boxes(self, image, normed_boxes, normalize: bool = False,
extract_type='v1'):
features = self.visual.extract_roi_features(image, normed_boxes, extract_type=extract_type)
if normalize:
features = F.normalize(features, dim=-1)
return features
def encode_masks(self, image, masks, normalize=True, mask_attn=False):
mask_pooled = self.visual.mask_pool(image, masks)
if normalize:
mask_pooled = F.normalize(mask_pooled, dim=-1)
return mask_pooled
# Path: src/open_clip/eva_clip/model.py
def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
"""Convert applicable model parameters to low-precision (bf16 or fp16)"""
def _convert_weights(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.to(dtype)
if l.bias is not None:
l.bias.data = l.bias.data.to(dtype)
if isinstance(l, (nn.MultiheadAttention, Attention)):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr, None)
if tensor is not None:
tensor.data = tensor.data.to(dtype)
if isinstance(l, nn.Parameter):
l.data = l.data.to(dtype)
for name in ["text_projection", "proj"]:
if hasattr(l, name) and isinstance(l, nn.Parameter):
attr = getattr(l, name, None)
if attr is not None:
attr.data = attr.data.to(dtype)
model.apply(_convert_weights)
# Path: src/open_clip/eva_clip/model.py
def convert_to_custom_text_state_dict(state_dict: dict):
if 'text_projection' in state_dict:
# old format state_dict, move text tower -> .text
new_state_dict = {}
for k, v in state_dict.items():
if any(k.startswith(p) for p in (
'text_projection',
'positional_embedding',
'token_embedding',
'transformer',
'ln_final',
'logit_scale'
)):
k = 'text.' + k
new_state_dict[k] = v
return new_state_dict
return state_dict
# Path: src/open_clip/eva_clip/model.py
def get_cast_dtype(precision: str):
cast_dtype = None
if precision == 'bf16':
cast_dtype = torch.bfloat16
elif precision == 'fp16':
cast_dtype = torch.float16
return cast_dtype
# Path: src/open_clip/eva_clip/openai.py
def load_openai_model(
name: str,
precision: Optional[str] = None,
device: Optional[Union[str, torch.device]] = None,
jit: bool = True,
cache_dir: Optional[str] = None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
precision: str
Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
cache_dir : Optional[str]
The directory to cache the downloaded model weights
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if precision is None:
precision = 'fp32' if device == 'cpu' else 'fp16'
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
# Build a non-jit model from the OpenAI jitted model state dict
cast_dtype = get_cast_dtype(precision)
try:
model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
# model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
model = model.to(device)
if precision.startswith('amp') or precision == 'fp32':
model.float()
elif precision == 'bf16':
convert_weights_to_lp(model, dtype=torch.bfloat16)
return model
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 (typically for CPU)
if precision == 'fp32':
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
# ensure image_size attr available at consistent location for both jit and non-jit
model.visual.image_size = model.input_resolution.item()
return model
# Path: src/open_clip/eva_clip/pretrained.py
def is_pretrained_cfg(model: str, tag: str):
if model not in _PRETRAINED:
return False
return _clean_tag(tag) in _PRETRAINED[model]
# Path: src/open_clip/eva_clip/pretrained.py
def get_pretrained_cfg(model: str, tag: str):
if model not in _PRETRAINED:
return {}
model_pretrained = _PRETRAINED[model]
return model_pretrained.get(_clean_tag(tag), {})
# Path: src/open_clip/eva_clip/pretrained.py
def download_pretrained(
cfg: Dict,
force_hf_hub: bool = False,
cache_dir: Union[str, None] = None,
):
target = ''
if not cfg:
return target
download_url = cfg.get('url', '')
download_hf_hub = cfg.get('hf_hub', '')
if download_hf_hub and force_hf_hub:
# use HF hub even if url exists
download_url = ''
if download_url:
target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
elif download_hf_hub:
has_hf_hub(True)
# we assume the hf_hub entries in pretrained config combine model_id + filename in
# 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
# use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
model_id, filename = os.path.split(download_hf_hub)
if filename:
target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
else:
target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
return target
# Path: src/open_clip/eva_clip/pretrained.py
def list_pretrained_tags_by_model(model: str):
""" return all pretrain tags for the specified model architecture """
tags = []
if model in _PRETRAINED:
tags.extend(_PRETRAINED[model].keys())
return tags
# Path: src/open_clip/eva_clip/transform.py
def image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
resize_longest_max: bool = False,
fill_color: int = 0,
):
mean = mean or OPENAI_DATASET_MEAN
if not isinstance(mean, (list, tuple)):
mean = (mean,) * 3
std = std or OPENAI_DATASET_STD
if not isinstance(std, (list, tuple)):
std = (std,) * 3
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
if resize_longest_max:
transforms = [
ResizeMaxSize(image_size, fill=fill_color)
]
else:
transforms = [
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
]
transforms.extend([
_convert_to_rgb,
ToTensor(),
normalize,
])
return Compose(transforms)
# Path: src/open_clip/eva_clip/tokenizer.py
class HFTokenizer:
"HuggingFace tokenizer wrapper"
def __init__(self, tokenizer_name:str):
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor:
# same cleaning as for default tokenizer, except lowercasing
# adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
if isinstance(texts, str):
texts = [texts]
texts = [whitespace_clean(basic_clean(text)) for text in texts]
input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids
return input_ids
# Path: src/open_clip/eva_clip/tokenizer.py
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<start_of_text>"]
eot_token = _tokenizer.encoder["<end_of_text>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length] # Truncate
tokens[-1] = eot_token
result[i, :len(tokens)] = torch.tensor(tokens)
return result
# Path: src/open_clip/eva_clip/utils.py
def resize_clip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
# Rescale the grid of position embeddings when loading from state_dict
old_pos_embed = state_dict.get('visual.positional_embedding', None)
if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
return
grid_size = to_2tuple(model.visual.grid_size)
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
if new_seq_len == old_pos_embed.shape[0]:
return
if extra_tokens:
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
else:
pos_emb_tok, pos_emb_img = None, old_pos_embed
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(
pos_emb_img,
size=grid_size,
mode=interpolation,
align_corners=True,
)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
if pos_emb_tok is not None:
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['visual.positional_embedding'] = new_pos_embed
# Path: src/open_clip/eva_clip/utils.py
def resize_evaclip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
all_keys = list(state_dict.keys())
# interpolate position embedding
if 'visual.pos_embed' in state_dict:
pos_embed_checkpoint = state_dict['visual.pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.visual.patch_embed.num_patches
num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
state_dict['visual.pos_embed'] = new_pos_embed
patch_embed_proj = state_dict['visual.patch_embed.proj.weight']
patch_size = model.visual.patch_embed.patch_size
state_dict['visual.patch_embed.proj.weight'] = torch.nn.functional.interpolate(
patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
# Path: src/open_clip/eva_clip/utils.py
def resize_visual_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
# Rescale the grid of position embeddings when loading from state_dict
old_pos_embed = state_dict.get('positional_embedding', None)
if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
return
grid_size = to_2tuple(model.visual.grid_size)
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
if new_seq_len == old_pos_embed.shape[0]:
return
if extra_tokens:
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
else:
pos_emb_tok, pos_emb_img = None, old_pos_embed
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(
pos_emb_img,
size=grid_size,
mode=interpolation,
align_corners=True,
)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
if pos_emb_tok is not None:
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['positional_embedding'] = new_pos_embed
# Path: src/open_clip/eva_clip/utils.py
def resize_eva_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
all_keys = list(state_dict.keys())
# interpolate position embedding
if 'pos_embed' in state_dict:
pos_embed_checkpoint = state_dict['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.visual.patch_embed.num_patches
num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
state_dict['pos_embed'] = new_pos_embed
patch_embed_proj = state_dict['patch_embed.proj.weight']
patch_size = model.visual.patch_embed.patch_size
state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
# Path: src/open_clip/eva_clip/factory.py
import json
import logging
import os
import pathlib
import re
import torch
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple, Union, Dict, Any
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
get_cast_dtype
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model
from .transform import image_transform
from .tokenizer import HFTokenizer, tokenize
from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, "r", encoding="utf8") as f:
| model_cfg = json.load(f) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LkPrtctrd/BSL-V53
# Path: Heart/Record/ByteStreamHelper.py
class ByteStreamHelper:
def readDataReference(self):
result = []
result.append(self.readVInt())
if not result[0]:
return None
result.append(self.readVInt())
return result
def writeDataReference(self, high=0, low=-1):
self.writeVInt(high)
if high != 0:
self.writeVInt(low)
def compress(self, data):
compressedText = zlib.compress(data)
self.writeInt(len(compressedText) + 4)
self.writeIntLittleEndian(len(data))
self.buffer += compressedText
def decompress(self):
data_length = self.readInt()
self.readIntLittleEndian()
return zlib.decompress(self.readBytes(data_length - 4))
def decodeIntList(self):
length = self.readVInt()
intList = []
for i in range(length):
intList.append(self.readVInt())
return intList
def decodeLogicLong(self, logicLong=None):
if logicLong is None:
logicLong = LogicLong(0, 0)
high = self.readVInt()
logicLong.high = high
low = self.readVInt()
logicLong.low = low
def decodeLogicLongList(self):
length = self.readVInt()
logicLongList = []
for i in range(length):
logicLongList.append(LogicLong(self.readVInt(), self.readVInt()))
return logicLongList
def encodeIntList(self, intList):
length = len(intList)
self.writeVInt(length)
for i in intList:
self.writeVInt(i)
def encodeLogicLong(self, logicLong):
if logicLong is None:
logicLong = LogicLong(0, 0)
self.writeVInt(logicLong.getHigherInt(self))
self.writeVInt(logicLong.getLowerInt(self))
def encodeLogicLongList(self, logicLongList):
length = len(logicLongList)
self.writeVInt(self, length)
for logicLong in logicLongList:
self.writeVInt(logicLong.getHigherInt(self))
self.writeVInt(logicLong.getLowerInt(self))
# Path: Heart/Record/ChecksumEncoder.py
class ChecksumEncoder:
def __init__(self):
self.checksum = 0
self.checksum2 = 0
self.checksumEnabled = True
def destruct(self):
self.checksum = 0
self.checksum2 = 0
self.checksumEnabled = True
def enableCheckSum(self, state):
if not self.checksumEnabled or state:
if not self.checksumEnabled and state:
self.checksum = self.checksum2
self.checksumEnabled = state
else:
self.checksum2 = self.checksum
self.checksumEnabled = False
def equals(self, checksum_instance):
if not checksum_instance:
return False
if not checksum_instance.checksumEnabled:
checksum = checksum_instance.checksum
else:
checksum2 = checksum_instance.checksum2
if not self.checksumEnabled:
checksum = self.checksum
else:
checksum2 = self.checksum2
return checksum == checksum2
def getCheckSum(self):
if not self.checksumEnabled:
checksum = self.checksum2
else:
checksum = self.checksum
return checksum
@staticmethod
def hashCode():
Debugger.error("ChecksumEncoder hashCode not designed")
return 42
@staticmethod
def isByteStream():
return False
def isCheckSumEnabled(self):
return self.checksumEnabled
@staticmethod
def isCheckSumOnlyMode():
return True
def resetCheckSum(self):
self.checksum = 0
def writeBoolean(self, value):
if value: integer = 13
else: integer = 7
self.checksum = integer + CPPDefs.__ROR4__(self.checksum, 31)
def writeByte(self, value):
self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 11
def writeBytes(self, value, length):
if value: integer = length + 38
else: integer = 37
self.checksum = CPPDefs.__ROR4__(self.checksum, 31)
def writeInt8(self, value):
if value + 0x80 >= 0x100:
Debugger.error("")
self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 11
def writeInt16(self, value):
if value + 0x8000 >= 0x10000:
Debugger.error("")
self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 19
def writeInt24(self, value):
if value + 0x800000 >= 0x1000000:
Debugger.error("")
self.checksum = (value & 0xFFFFFF) + CPPDefs.__ROR4__(self.checksum, 31) + value + 21
def writeInt(self, value):
self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 9
@staticmethod
def writeLong(bytestream, logicLong):
logicLong.encode(bytestream)
def writeLongLong(self, logicLong):
self.checksum = logicLong.getLowerInt() + CPPDefs.__ROR4__(logicLong.getHigherInt() + CPPDefs.__ROR4__(self.checksum, 31) + 67, 31) + 91
def writeShort(self, value):
self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 19
def writeString(self, value):
checksum = CPPDefs.__ROR4__(self.checksum, 31)
if value:
self.checksum = checksum + len(value) + 28
else:
self.checksum = checksum + 27
def writeStringReference(self, value):
self.checksum = len(value) + CPPDefs.__ROR4__(self.checksum, 31) + 38
def writeVInt(self, value):
self.checksum = value + CPPDefs.__ROR4__(self.checksum, 31) + 33
def writeVLong(self, high, low):
self.checksum = low + CPPDefs.__ROR4__(high + CPPDefs.__ROR4__(self.checksum, 31) + 65, 31) + 88
# Path: Heart/Logic/LogicStringUtil.py
class LogicStringUtil:
@staticmethod
def getBytes(string):
return string.encode()
@staticmethod
def getByteLength(string):
return len(string)
# Path: Heart/Record/Debugger.py
class Debugger:
@staticmethod
def error(message):
print("[ERROR]", message)
@staticmethod
def warning(message):
print("[WARNING]", message)
# Path: Heart/Logic/LogicLong.py
class LogicLong:
def __init__(self):
self.high = 0
self.low = 0
def __init__(self, high, low):
self.high = high
self.low = low
@staticmethod
def clone(logicLong):
return LogicLong(logicLong.high, logicLong.low)
def decode(self, bytestream):
self.high = bytestream.readInt()
self.low = bytestream.readInt()
def encode(self, bytestream):
bytestream.writeInt(self.high)
bytestream.writeInt(self.low)
def equals(self, logicLong):
if logicLong:
if self.low == logicLong.low:
return self.high == logicLong.high
return False
@staticmethod
def getHigherInt(longlong):
return longlong >> 32
@overload
def getHigherInt(self):
return self.high
@staticmethod
def getLowerInt(longlong):
result = longlong & 0x7FFFFFFF
if longlong < 0:
return longlong | 0x80000000
return result
@overload
def getLowerInt(self):
return self.low
def getLong(self):
result = self.low
if result >> 31 == -1:
return result | 0x80000000
return result
def greaterThan(self, logicLong):
result = False
if logicLong:
result = True
if self.high <= logicLong.high:
result = False
if self.high == logicLong.high:
return self.low > logicLong.low
return result
def hashCode(self):
return 31 * self.high + self.low
def isZero(self):
if not self.low:
return self.high == 0
else:
return False
def set(self, low, high):
lowerInt = low & 0x7FFFFFFF
if low < 0:
lowerInt = low | 0x80000000
self.high = high >> 32
self.low = lowerInt
def toLong(high, low):
lowerInt = low & 0x7FFFFFFF
if low < 0:
lowerInt = low | 0x80000000
return lowerInt | high << 32
def toString(text, logiclong):
print(text, f"LogicLong({logiclong.high},{logiclong.low})")
# Path: Heart/Record/ByteStream.py
import zlib
from Heart.Record.ByteStreamHelper import ByteStreamHelper
from Heart.Record.ChecksumEncoder import ChecksumEncoder
from Heart.Logic.LogicStringUtil import LogicStringUtil
from Heart.Record.Debugger import Debugger
from Heart.Logic.LogicLong import LogicLong
tempBuf.append(value & 0xFF)
self.messagePayload = bytes(tempBuf)
self.offset += 1
def writeInt16(self, value):
ChecksumEncoder.writeInt(self, value)
self.bitoffset = 0
tempBuf = list(self.messagePayload)
tempBuf.append(value >> 8 & 0xFF)
tempBuf.append(value & 0xFF)
self.messagePayload = bytes(tempBuf)
self.offset += 2
def writeInt24(self, value):
ChecksumEncoder.writeInt(self, value)
self.bitoffset = 0
tempBuf = list(self.messagePayload)
tempBuf.append(value >> 16 & 0xFF)
tempBuf.append(value >> 8 & 0xFF)
tempBuf.append(value & 0xFF)
self.messagePayload = bytes(tempBuf)
self.offset += 3
def writeInt(self, value):
ChecksumEncoder.writeInt(self, value)
ByteStream.writeIntToByteArray(self, value)
def writeIntLittleEndian(self, value):
self.bitoffset = 0
tempBuf = list(self.messagePayload)
tempBuf.append(value & 0xFF)
tempBuf.append(value >> 8 & 0xFF)
tempBuf.append(value >> 16 & 0xFF)
tempBuf.append(value >> 24 & 0xFF)
self.messagePayload = bytes(tempBuf)
self.offset += 4
def writeIntToByteArray(self, value):
self.bitoffset = 0
tempBuf = list(self.messagePayload)
tempBuf.append(value >> 24 & 0xFF)
tempBuf.append(value >> 16 & 0xFF)
tempBuf.append(value >> 8 & 0xFF)
tempBuf.append(value & 0xFF)
self.messagePayload = bytes(tempBuf)
self.offset += 4
def writeLongLong(self, longlong):
ChecksumEncoder.writeLongLong(self, longlong)
self.bitoffset = 0
high = LogicLong.getHigherInt(longlong)
ByteStream.writeIntToByteArray(self, high)
low = LogicLong.getLowerInt(longlong)
ByteStream.writeIntToByteArray(self, low)
def writeLong(self, high, low):
self.writeIntToByteArray(high)
self.writeIntToByteArray(low)
def writeShort(self, value):
ChecksumEncoder.writeShort(self, value)
self.bitoffset = 0
tempBuf = list(self.messagePayload)
tempBuf.append(value >> 8 & 0xFF)
tempBuf.append(value & 0xFF)
self.messagePayload = bytes(tempBuf)
self.offset += 2
def writeString(self, value=None):
ChecksumEncoder.writeString(self, value)
self.bitoffset = 0
if value != None:
str_bytes = LogicStringUtil.getBytes(value)
str_length = LogicStringUtil.getByteLength(str_bytes)
if str_length < 900001:
ByteStream.writeIntToByteArray(self, str_length)
self.messagePayload += str_bytes
self.offset += str_length
else:
Debugger.warning(f"ByteStream::writeString invalid string length {str_length}")
ByteStream.writeIntToByteArray(self, -1)
else:
ByteStream.writeIntToByteArray(self, -1)
def writeStringReference(self, value):
ChecksumEncoder.writeStringReference(self, value)
self.bitoffset = 0
str_bytes = LogicStringUtil.getBytes(value)
str_length = LogicStringUtil.getByteLength(str_bytes)
if str_length < 900001:
ByteStream.writeIntToByteArray(self, str_length)
self.messagePayload += str_bytes
self.offset += str_length
else:
Debugger.warning(f"ByteStream::writeString invalid string length {str_length}")
ByteStream.writeIntToByteArray(self, -1)
def writeVInt(self, data):
self.bitoffset = 0
if type(data) == str:
data = int(data)
final = b''
if (data & 2147483648) != 0:
if data >= -63:
final += (data & 0x3F | 0x40).to_bytes(1, 'big', signed=False)
self.offset += 1
elif data >= -8191:
final += (data & 0x3F | 0xC0).to_bytes(1, 'big', signed=False)
final += ((data >> 6) & 0x7F).to_bytes(1, 'big', signed=False)
self.offset += 2
elif data >= -1048575:
final += (data & 0x3F | 0xC0).to_bytes(1, 'big', signed=False)
final += ((data >> 6) & 0x7F | 0x80).to_bytes(1, 'big', signed=False)
final += ((data >> 13) & 0x7F).to_bytes(1, 'big', signed=False)
self.offset += 3
elif data >= -134217727:
final += (data & 0x3F | 0xC0).to_bytes(1, 'big', signed=False)
final += ((data >> 6) & 0x7F | 0x80).to_bytes(1, 'big', signed=False)
final += ((data >> 13) & 0x7F | 0x80).to_bytes(1, 'big', signed=False)
final += ((data >> 20) & 0x7F).to_bytes(1, 'big', signed=False)
| self.offset += 4 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DavidBellamy/labrador
# Path: lab_transformers/data/tokenize_tabular_data.py
class mimic4_eCDFer:
def __init__(self, ecdf_data: np.lib.npyio.NpzFile) -> None:
"""
Maps an iterable of lab codes and and an iterable of corresponding lab values to their probabilities on the corresponding eCDF.
Parameters:
ecdf_data: a NumPy .npz data archive containing named arrays: {itemid}_x and {itemid}_y for all itemid's in MIMIC-IV.
{itemid}_x contains the *unique* values of the random variable (e.g. lab values).
{itemid}_y contains the probabilities corresponding to P(X <= x) for that itemid.
Note: {itemid}_x, {itemid}_y are index-aligned such that:
ecdf_data[f"{itemid}_y"][i] = P(X <= ecdf_data[f"{itemid}_x"][i]) for all i.
"""
self.ecdf_data = ecdf_data
self.itemids = list(set([int(itemid[:-2]) for itemid in ecdf_data.files]))
def __call__(
self,
itemids: Union[Iterable[int], NDArray[np.int_]],
lab_values: Union[Iterable[float], NDArray[np.float_]],
null_token: Union[int, np.nan] = np.nan,
) -> NDArray[np.float_]:
"""
Returns Pr(X <= x) for all x in lab_values.
i.e. maps all values in lab_values to their probabilities on the eCDF of the corresponding itemid
itemids: an iterable of integer lab codes (called itemid's in MIMIC-IV).
Missing values are not allowed because they are used to index into the eCDF database.
lab_values: an iterable of float lab values.
Missing values are allowed and will be mapped to null_token.
null_token: the token to use for missing values. Default is np.nan.
Returns an array of probabilities corresponding to the input lab_values.
"""
assert len(itemids) == len(
lab_values
), "itemids and lab_values must be the same length"
# Find the indices of the nearest values in the compressed eCDF cut-off points
ixs = [
self.find_nearest_ecdf_cutoff(itemid, labval)
for itemid, labval in zip(itemids, lab_values)
]
# Return the corresponding eCDF probabilities
return np.array(
[
self.ecdf_data[f"{itemid}_y"][ix].item()
if ix is not None
else null_token
for itemid, ix in zip(itemids, ixs)
]
)
def find_nearest_ecdf_cutoff(
self, itemid: int, lab_value: float
) -> Union[int, None]:
"""
Finds the nearest value to `lab_value` in the eCDF for `itemid`.
Returns the index of this nearest value or None if the lab_value is missing.
"""
if np.isnan(lab_value):
idx = None
else:
lab_value = np.array(lab_value)
idx = (
np.abs(self.ecdf_data[f"{itemid}_x"] - lab_value.reshape(-1, 1))
).argmin(axis=1)
return idx
def __len__(self):
return len(self.itemids)
# Path: lab_transformers/utils.py
class NpEncoder(json.JSONEncoder):
"""A JSONEncoder subclass to handle Numpy integers, floats and arrays when writing JSON lines to disk.
Usage: json.dumps(data, cls=NpEncoder)
This function overwrites the default() method of JSONEncoder to handle additional types; specifically Numpy
integers, floats and arrays. For all other types, the standard default() method is used for encoding.
"""
def default(
self, obj: Union[np.integer, np.floating, np.ndarray, Any]
) -> Union[int, float, List[Any], Any]:
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
# Path: scripts/preprocessing/pretraining_raw_data_to_bert_jsonl.py
import json
import os
import os.path as op
import sqlite3
import sys
import numpy as np
import pandas as pd
from itertools import groupby
from typing import Dict, Tuple, Union
from numpy.typing import NDArray
from statsmodels.distributions import ECDF
from tqdm import tqdm
from lab_transformers.data.tokenize_tabular_data import mimic4_eCDFer
from lab_transformers.utils import NpEncoder
codebook.columns = ["itemid", "frequency_rank"]
d_labitems = os.path.join(self.data_path, "d_labitems.csv")
labitem_descriptions = pd.read_csv(
d_labitems
) # load descriptions of each lab code
codebook = codebook.merge(
labitem_descriptions, on="itemid"
) # merge the descriptions with the codebook
filename = os.path.join(self.output_path, "labcode_codebook_labrador.csv")
codebook.to_csv(filename, index=False) # save the codebook
return frequency_ranks
def compute_time_delta(self, df: pd.DataFrame) -> pd.DataFrame:
# Convert charttime Pandas datetime (for computing time deltas later)
df["charttime"] = pd.to_datetime(df["charttime"])
# Sort by subject_id and charttime (ascending)
df = df.sort_values(["subject_id", "charttime"], inplace=False)
# calculate time deltas (next time minus previous time)
df["time_delta"] = df.charttime - df.charttime.shift(1)
# correct rows at border between 2 patients (replace with 0)
df.loc[(df.subject_id != df.subject_id.shift(1)), "time_delta"] = pd.Timedelta(
"0 days"
)
# Convert time_delta's to decimal days (e.g. 5.35 days)
df["time_delta"] = df["time_delta"].dt.total_seconds() / (60 * 60 * 24)
return df
def split_data(
self, df: pd.DataFrame
) -> Tuple[Dict[str, NDArray[np.integer]], Dict[str, pd.DataFrame]]:
# Sort patients into train/validation/test sets
patient_list = df.subject_id.unique()
# Shuffle the order of patients
self.rng.shuffle(patient_list)
train_size = int(np.floor(self.train_pct * len(patient_list)))
val_size = int(np.ceil(self.val_pct * len(patient_list)))
test_size = int(len(patient_list) - train_size - val_size)
train_patients = patient_list[:train_size]
val_patients = patient_list[train_size : train_size + val_size]
test_patients = patient_list[train_size + val_size :]
# Split out the training data
train_df = df[df.subject_id.isin(train_patients)]
# Extract the unique itemid's from the training data partition
train_itemids = train_df.itemid.unique()
# Split out the val/test sets if the itemid also exists in the training data
val_df = df[
(df.subject_id.isin(val_patients)) & (df.itemid.isin(train_itemids))
]
test_df = df[
(df.subject_id.isin(test_patients)) & (df.itemid.isin(train_itemids))
]
return {
"train_patients": train_patients,
"val_patients": val_patients,
"test_patients": test_patients,
}, {"train_df": train_df, "val_df": val_df, "test_df": test_df}
def probability_transform_values(
self, splits: Dict[str, pd.DataFrame]
) -> Dict[str, pd.DataFrame]:
train_df = splits["train_df"]
val_df = splits["val_df"]
test_df = splits["test_df"]
unique_itemids = train_df.itemid.unique()
compressed_ecdf_data = {}
for itemid in tqdm(unique_itemids, desc="Computing eCDFs"):
lab_values = train_df[
~np.isnan(train_df.valuenum) & (train_df.itemid == itemid)
]["valuenum"].values
if len(lab_values) == 0:
continue
# Calculate the empirical CDF for the current lab test
ecdf = ECDF(lab_values)
# Compress the eCDF to just the unique lab values (and their probabilities)
unique_ixs = []
cum_lengths = 0
for _, g in groupby(ecdf.x):
group = list(g)
cum_lengths += len(group)
unique_ix = cum_lengths - 1
unique_ixs.append(unique_ix)
# Store the resulting compressed eCDF data
compressed_ecdf_data[f"{itemid}_x"] = ecdf.x[unique_ixs]
compressed_ecdf_data[f"{itemid}_y"] = ecdf.y[unique_ixs]
# Save the compressed eCDF values and probabilities
np.savez(op.join(self.output_path, "mimic4_ecdfs.npz"), **compressed_ecdf_data)
# Load the result back and use it to probability transform the validation and test data splits
ecdf_data = np.load(op.join(self.output_path, "mimic4_ecdfs.npz"))
eCDFer = mimic4_eCDFer(ecdf_data)
# filter rows to just itemid's in the eCDF Numpy zip archive (npz)
train_df[train_df.itemid.isin(eCDFer.itemids)] = train_df[
train_df.itemid.isin(eCDFer.itemids)
].apply(eCDFer, axis=1)
val_df = val_df[val_df.itemid.isin(eCDFer.itemids)]
test_df = test_df[test_df.itemid.isin(eCDFer.itemids)]
train_df.apply(
| eCDFer, axis=1 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: NLP-Core-Team/RealCode_eval
# Path: lm_eval/generators.py
class InfillGenerator:
def __init__(self,
model_path: str,
num_samples: int,
prefix_tokens: tp.Union[str, tp.List[int]] = [],
middle_tokens: tp.Union[str, tp.List[int]] = [],
suffix_tokens: tp.Union[str, tp.List[int]] = [],
max_context_length: int = None,
left_context_ratio: int = 1,
dtype = torch.bfloat16,
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"],
model_kwargs: tp.Dict = {},
generation_params: tp.Dict[str, tp.Any] = {},
context_parser: BaseParser = TrivialContextParser(),
add_extra_spaces_to_generation=0,
):
"""
Class to generate code in fill-in-the-middle mode
params:
model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained
num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params
prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens
middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length)
left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context
dtype=torch.bfloat16 - torch dtype to use for inference
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"] - regular expressions that determine end of geneartion
model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained
generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate
context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts
add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)
"""
self.device = torch.device("cuda")
# self.device = torch.device("cpu")
logger.info(f"Loading model from {model_path} with kwargs f{model_kwargs}")
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(model_path,
torch_dtype=dtype, device_map="auto", trust_remote_code=True, **model_kwargs
).eval()
logger.info(f"Loaded model from {model_path} with kwargs f{model_kwargs}")
logger.info(f"Device map: \n{self.model.hf_device_map}")
self.num_samples = num_samples
self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)
self.middle_tokens = self.tokenize_special_tokens(middle_tokens)
self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)
logger.debug(f"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}")
self.eos_sequences = eos_sequences[:]
#context truncation parameters
self.max_context_length = max_context_length
self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)
self.right_context_truncate_at = 1 / (left_context_ratio + 1)
self.generation_params = generation_params
self.generation_params['num_return_sequences'] = self.num_samples
self.context_parser = context_parser
# Number of tokens before and after truncating to max_context_length
self.count_inferenced_tokens = []
self.count_possible_tokens = []
self.add_extra_spaces_to_generation = add_extra_spaces_to_generation
def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor:
if type(str_or_list) == str:
return self.tokenizer.encode(str_or_list, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
else:
return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)
def _prepare_tokens(self, task: Task) -> torch.Tensor:
left_context_str, right_context_str = self.context_parser.get_left_and_right_context(task)
logger.info("\n" + "\n".join(left_context_str.split('\n')[-20:]))
left_tokens = self.tokenizer.encode(
left_context_str, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
right_tokens = self.tokenizer.encode(
right_context_str, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
self.count_possible_tokens.append(left_tokens.shape[1] + right_tokens.shape[1])
if self.max_context_length and left_tokens.shape[1] + right_tokens.shape[1] > self.max_context_length:
logger.debug("Truncating context")
left_tokens = left_tokens[:, -min(int(self.max_context_length * self.left_context_truncate_at), left_tokens.shape[1]) + 1:]
right_tokens = right_tokens[:, :min(int(self.max_context_length * self.right_context_truncate_at), right_tokens.shape[1]) - 1]
tokens = torch.cat([self.prefix_tokens, left_tokens, self.middle_tokens, right_tokens, self.suffix_tokens], dim=-1).type(torch.long)
return tokens
def _postprocess(self, generation: str):
new_gen = []
for i, line in enumerate(generation.split('\n')):
if i == 0 and self.add_extra_spaces_to_generation:
# ugly hack for codellama, weirdly removing space for skip_special_tokens=True
line = ' '*self.add_extra_spaces_to_generation + line
for eos in self.eos_sequences:
if re.search(eos, line):
return "\n".join(new_gen).rstrip() + '\n\n'
new_gen.append(line)
return "\n".join(new_gen).rstrip() + '\n\n'
@torch.no_grad()
def generate(self, tasks: tp.List[Task]) -> tp.List[tp.List[str]]:
res = []
for i, task in tqdm(enumerate(tasks)):
tokens = self._prepare_tokens(task)
if i == 0:
logger.debug(f"\nTokens: {tokens[:, :5]} ... {tokens[:, -5:]}\n")
generated_tokens = self.model.generate(tokens, **self.generation_params)
generations = self.tokenizer.batch_decode(generated_tokens[:, tokens.shape[1]:], skip_special_tokens=True)
if i % 1 == 0:
logger.debug(f"Generation for task {i}:\n{self._postprocess(generations[0])}")
res.append([self._postprocess(t) for t in generations])
self.count_inferenced_tokens.append([len(t) for t in tokens])
return res
# Path: lm_eval/generators.py
class LMGenerator(InfillGenerator):
def __init__(self,
lm_prefix_tokens: tp.Union[str, tp.List[int]] = [],
lm_suffix_tokens: tp.Union[str, tp.List[int]] = [],
**kwargs
):
"""
Class to generate code in causal LM mode, uses only left context
params:
lm_prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the context. Can be either str or list of int tokens
lm_suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the context. Can be either str or list of int tokens
"""
super().__init__(**kwargs)
self.lm_prefix_tokens = super().tokenize_special_tokens(lm_prefix_tokens)
self.lm_suffix_tokens = super().tokenize_special_tokens(lm_suffix_tokens)
logger.debug(f"lm_prefix_tokens: {self.lm_prefix_tokens}, lm_suffix_tokens: {self.lm_suffix_tokens}")
def _prepare_tokens(self, task: Task) -> torch.Tensor:
left_context_str, _ = self.context_parser.get_left_and_right_context(task)
logger.info("\n" + "\n".join(left_context_str.split('\n')[-20:]))
left_tokens = self.tokenizer.encode(
left_context_str, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
self.count_possible_tokens.append(left_tokens.shape[1])
if self.max_context_length and left_tokens.shape[1] > self.max_context_length:
left_tokens = left_tokens[:, -self.max_context_length:]
tokens = torch.cat([self.lm_prefix_tokens, left_tokens, self.lm_suffix_tokens], dim=-1).type(torch.long)
return tokens
# Path: lm_eval/evaluator.py
class Evaluator:
def __init__(self,
dataset_root: os.PathLike,
num_samples: int,
pass_k_list: tp.List[int] = [1],
njobs: int = 1,
working_dir: tp.Optional[os.PathLike] = None,
metric_aggregations: tp.Dict[str, tp.Callable[[Task], int]] = METRIC_AGGREGATIONS
):
self.metrics = []
for pass_k in pass_k_list:
if num_samples < pass_k:
raise ValueError(f"num_samples {num_samples} must be greater than or equal to PassK={pass_k}")
self.metrics.append(PassK(pass_k, num_samples))
self.dataset_root = dataset_root
self.num_samples = num_samples
self.njobs = njobs
self.working_dir = working_dir
self.metric_aggregations = metric_aggregations
def evaluate(self,
tasks: tp.List[Task],
generations: tp.List[tp.List[str]],
) -> tp.Dict[tp.Literal["aggregated", "detailed"], tp.Any]:
logger.info(f"Evaluating {len(tasks)} tasks with {self.num_samples} samples on {self.njobs} CPUs")
# Run test evaluation
if self.njobs == 1:
results = [
[evaluate_override( self.dataset_root, task, gen, os.path.join(self.working_dir) ) for gen in generations[i]]
for i, task in enumerate(tasks)
]
else:
with Manager() as manager:
cache = manager.dict()
with manager.Pool(processes=self.njobs) as pool:
results = [[None for _2 in range(self.num_samples)] for _ in tasks]
async_result = pool.starmap_async(
evaluate_override_wrapped, [
( self.dataset_root, task, gen, os.path.join(self.working_dir, f"{j}_{i}"), j, i, cache )
for j, task in enumerate(tasks) for i, gen in enumerate(generations[j])
]
)
res = async_result.get()
for task_n, gen_n, result in res:
results[task_n][gen_n] = result
if task_n % 25 == 0 and gen_n == 0:
logger.debug(result['output'])
# Calculate metrics per task
all_metric_names = ['compilation_error_rate', 'exact_match'] + [t.name() for t in self.metrics]
metrics = []
agg_metrics = {level: {metric_name: defaultdict(list) for metric_name in all_metric_names} for level in self.metric_aggregations}
for task, task_results, task_generations in zip(tasks, results, generations):
if len(task_results) != self.num_samples:
raise ValueError(f"Task {task} has {len(task_results)} samples, expected {self.num_samples}")
correct = sum([int(t['passed'] == task.total_tests) for t in task_results])
not_compiles = mean([int(t['passed'] + t['failed'] == 0) for t in task_results])
exact_match = mean([int(re.sub(r'\W+', '', task.gt) == re.sub(r'\W+', '', gen)) for gen in task_generations])
task_metrics = {'compilation_error_rate': not_compiles, 'exact_match': exact_match}
for metric in self.metrics:
task_metrics[metric.name()] = metric(correct)
task_metrics['evaluations'] = [t['output'] for t in task_results]
metrics.append(task_metrics)
for level, level_func in self.metric_aggregations.items():
for metric in all_metric_names:
agg_metrics[level][metric][level_func(task)].append(task_metrics[metric])
for level in self.metric_aggregations:
for metric_name in all_metric_names:
means = {val: mean(agg_metrics[level][metric_name][val]) for val in agg_metrics[level][metric_name]}
agg_metrics[level][metric_name] = means
# Save metics
metrics = agg_metrics | {
"detailed": [asdict(task) | task_metric for task, task_metric in zip(tasks, metrics)]
}
return metrics
# Path: lm_eval/context_parser.py
class TrivialContextParser(BaseParser):
def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:
"""
returns left and right context without processing
"""
return task.left_context, task.right_context
# Path: lm_eval/utils.py
def load_dataset(root_path: os.PathLike, meta_file: str = 'dataset.json', limit: int = 10_000) -> List[Task]:
with open(Path(root_path) / meta_file, 'r') as f:
dataset = [Task(**t) for t in json.load(f)][:limit]
return dataset
# Path: main.py
import hydra
import torch
import numpy as np
import random
import json
import os
import logging
from lm_eval.generators import InfillGenerator, LMGenerator
from lm_eval.evaluator import Evaluator
from lm_eval.context_parser import TrivialContextParser
from lm_eval.utils import load_dataset
from omegaconf import DictConfig, OmegaConf
logger = logging.getLogger("RealCode")
logger.setLevel(logging.DEBUG)
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@hydra.main(config_path="config", config_name="config")
def main(cfg: DictConfig) -> None:
seed_all(cfg.seed)
print(cfg)
dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
if 'context_parser' in cfg:
parser = hydra.utils.instantiate(cfg.context_parser)
else:
parser = TrivialContextParser()
dtype_map = {'fp16': torch.float16, 'fp32': torch.float, 'bf16': torch.bfloat16}
if cfg.generator_mode == 'infill':
generator = InfillGenerator(
add_extra_spaces_to_begin=0,
model_path=cfg.model_path,
dtype=dtype_map[cfg.dtype],
num_samples=cfg.num_samples,
prefix_tokens=cfg.prefix_tokens,
middle_tokens=cfg.middle_tokens,
suffix_tokens=cfg.suffix_tokens,
max_context_length=cfg.max_context_length,
generation_params=dict(cfg.generation_params),
eos_sequences=cfg.eos_sequences,
model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {},
context_parser=parser,
left_context_ratio=cfg.left_context_ratio,
add_extra_spaces_to_generation=cfg.tokenizer_fix
)
elif cfg.generator_mode == 'lm':
generator = LMGenerator(
model_path=cfg.model_path,
dtype=dtype_map[cfg.dtype],
num_samples=cfg.num_samples,
lm_prefix_tokens=cfg.lm_prefix_tokens if 'lm_prefix_tokens' in cfg else [],
lm_suffix_tokens=cfg.lm_suffix_tokens if 'lm_suffix_tokens' in cfg else [],
max_context_length=cfg.max_context_length,
generation_params=dict(cfg.generation_params),
eos_sequences=cfg.eos_sequences,
model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {},
context_parser=parser,
add_extra_spaces_to_generation=cfg.tokenizer_fix
)
else:
raise ValueError(f"generator_mode can be either 'lm' or 'infill', found {cfg.generator_mode}")
evaluator = Evaluator(
dataset_root=cfg.dataset_root,
num_samples=cfg.num_samples,
| pass_k_list=cfg.pass_k_list, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ENDEVSOLS/Long-Trainer
# Path: longtrainer/loaders.py
class DocumentLoader:
def load_csv(self, path):
"""
Load data from a CSV file at the specified path.
Args:
path (str): The file path to the CSV file.
Returns:
The loaded CSV data.
Exceptions:
Prints an error message if the CSV loading fails.
"""
try:
loader = CSVLoader(file_path=path)
data = loader.load()
return data
except Exception as e:
print(f"Error loading CSV: {e}")
def wikipedia_query(self, search_query):
"""
Query Wikipedia using a given search term and return the results.
Args:
search_query (str): The search term to query on Wikipedia.
Returns:
The query results.
Exceptions:
Prints an error message if the Wikipedia query fails.
"""
try:
data = WikipediaLoader(query=search_query, load_max_docs=2).load()
return data
except Exception as e:
print(f"Error querying Wikipedia: {e}")
def load_urls(self, urls):
"""
Load and parse content from a list of URLs.
Args:
urls (list): A list of URLs to load.
Returns:
The loaded data from the URLs.
Exceptions:
Prints an error message if loading URLs fails.
"""
try:
loader = UnstructuredURLLoader(urls=urls)
data = loader.load()
return data
except Exception as e:
print(f"Error loading URLs: {e}")
def load_YouTubeVideo(self, urls):
"""
Load YouTube video information from provided URLs.
Args:
urls (list): A list of YouTube video URLs.
Returns:
The loaded documents from the YouTube URLs.
Exceptions:
Prints an error message if loading YouTube videos fails.
"""
try:
loader = YoutubeLoader.from_youtube_url(
urls, add_video_info=True, language=["en", "pt", "zh-Hans", "es", "ur", "hi"],
translation="en")
documents = loader.load()
return documents
except Exception as e:
print(f"Error loading YouTube video: {e}")
def load_pdf(self, path):
"""
Load data from a PDF file at the specified path.
Args:
path (str): The file path to the PDF file.
Returns:
The loaded and split PDF pages.
Exceptions:
Prints an error message if the PDF loading fails.
"""
try:
loader = PyPDFLoader(path)
pages = loader.load_and_split()
return pages
except Exception as e:
print(f"Error loading PDF: {e}")
def load_text_from_html(self, path):
"""
Load and parse text content from an HTML file at the specified path.
Args:
path (str): The file path to the HTML file.
Returns:
The loaded HTML data.
Exceptions:
Prints an error message if loading text from HTML fails.
"""
try:
loader = BSHTMLLoader(path)
data = loader.load()
return data
except Exception as e:
print(f"Error loading text from HTML: {e}")
def load_markdown(self, path):
"""
Load data from a Markdown file at the specified path.
Args:
path (str): The file path to the Markdown file.
Returns:
The loaded Markdown data.
Exceptions:
Prints an error message if loading Markdown fails.
"""
try:
loader = UnstructuredMarkdownLoader(path)
data = loader.load()
return data
except Exception as e:
print(f"Error loading Markdown: {e}")
def load_doc(self, path):
"""
Load data from a DOCX file at the specified path.
Args:
path (str): The file path to the DOCX file.
Returns:
The loaded DOCX data.
Exceptions:
Prints an error message if loading DOCX fails.
"""
try:
loader = Docx2txtLoader(path)
data = loader.load()
return data
except Exception as e:
print(f"Error loading DOCX: {e}")
# Path: longtrainer/loaders.py
class TextSplitter:
def __init__(self, chunk_size=1024, chunk_overlap=100):
"""
Initialize the TextSplitter with a specific chunk size and overlap.
Args:
chunk_size (int): The size of each text chunk.
chunk_overlap (int): The overlap size between chunks.
"""
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
def split_documents(self, documents):
"""
Split the provided documents into chunks based on the chunk size and overlap.
Args:
documents (list): A list of documents to be split.
Returns:
A list of split documents.
Exceptions:
Prints an error message if splitting documents fails.
"""
try:
return self.text_splitter.split_documents(documents)
except Exception as e:
print(f"Error splitting documents: {e}")
# Path: longtrainer/retrieval.py
class DocRetriever:
"""
Advanced Document Retriever integrates retrieval techniques
to efficiently retrieve documents based on provided queries.
"""
def __init__(self, documents, embedding_model, existing_faiss_index=None, num_k=3):
"""
Initializes the AdvancedDocumentRetriever with a set of documents and an embedding model.
Args:
documents (list): A list of documents to be indexed and retrieved.
embedding_model (OpenAIEmbeddings): The embedding model used for document vectorization.
"""
try:
self.embedding_model = embedding_model
self.document_collection = documents
self.faiss_index = existing_faiss_index
if not documents:
raise ValueError("Document collection is empty.")
if not self.faiss_index:
# Index documents using FAISS
self._index_documents()
# Initialize BM25 and FAISS retrievers
self.bm25_retriever = BM25Retriever.from_documents(documents)
if self.faiss_index:
self.faiss_retriever = self.faiss_index.as_retriever(search_kwargs={"k": num_k})
else:
self.faiss_retriever = None
# Create an Ensemble Retriever combining BM25 and FAISS
self.ensemble_retriever = EnsembleRetriever(
retrievers=[self.bm25_retriever, self.faiss_retriever],
weights=[0.5, 0.5]
)
except Exception as e:
print(f"Initialization error in AdvancedDocumentRetriever: {e}")
def _index_documents(self):
"""
Indexes the provided documents into the FAISS index for efficient retrieval.
Handles large document collections by segmenting them into smaller batches.
"""
if self.faiss_index is None: # Only index if there's no existing FAISS index
try:
if len(self.document_collection) < 2000:
self.faiss_index = FAISS.from_documents(self.document_collection, self.embedding_model)
else:
self.faiss_index = FAISS.from_documents(self.document_collection[:2000], self.embedding_model)
for i in range(2000, len(self.document_collection), 2000):
end_index = min(i + 2000, len(self.document_collection))
additional_index = FAISS.from_documents(self.document_collection[i:end_index], self.embedding_model)
self.faiss_index.merge_from(additional_index)
except Exception as e:
print(f"Error indexing documents: {e}")
def save_index(self, file_path):
"""
Saves the FAISS index to a specified file path.
Args:
file_path (str): Path where the FAISS index will be saved.
"""
try:
self.faiss_index.save_local(file_path)
except Exception as e:
print(f"Error saving FAISS index: {e}")
def update_index(self, new_documents):
"""
Updates the FAISS index with new documents.
Args:
new_documents (list): A list of new documents to add to the index.
"""
# Add this method to handle updates to the existing index
if not self.faiss_index:
raise ValueError("FAISS index not initialized.")
if len(new_documents) < 2000:
new_index = FAISS.from_documents(new_documents, self.embedding_model)
else:
# self.faiss_index = FAISS.from_documents(self.document_collection[:2000], self.embedding_model)
new_index = FAISS.from_documents(new_documents[:2000], self.embedding_model)
for i in range(2000, len(new_documents), 2000):
end_index = min(i + 2000, len(new_documents))
additional_index = FAISS.from_documents(self.new_documents[i:end_index], self.embedding_model)
new_index.merge_from(additional_index)
new_index = FAISS.from_documents(new_documents, self.embedding_model)
self.faiss_index.merge_from(new_index)
def delete_index(self, file_path):
"""
Deletes the FAISS index directory from the specified path.
Args:
file_path (str): Path of the FAISS index directory to be deleted.
"""
try:
if os.path.exists(file_path):
if os.path.isdir(file_path):
shutil.rmtree(file_path)
else:
os.remove(file_path)
else:
print("FAISS index path does not exist.")
except Exception as e:
print(f"Error deleting FAISS index path: {e}")
def retrieve_documents(self):
"""
Retrieves relevant documents based on the provided query using the Ensemble Retriever.
Args:
query (str): Query string for retrieving relevant documents.
Returns:
A list of documents relevant to the query.
"""
try:
return self.ensemble_retriever
except Exception as e:
print(f"Error retrieving documents: {e}")
# Path: longtrainer/bot.py
class ChainBot:
def __init__(self, retriever, llm, prompt, token_limit):
"""
Initialize the ChainBot with a retriever, language model (llm), prompt,
and an optional maximum token limit.
Args:
retriever: The document retriever object.
llm: Language learning model for generating responses.
prompt (str): The initial prompt to start the conversation.
max_token_limit (int, optional): Maximum token limit for the conversation buffer. Defaults to 200.
"""
try:
# Memory and chain setup with dynamic max token limit
self.memory = ConversationTokenBufferMemory(
llm=llm,
max_token_limit=token_limit,
memory_key="chat_history",
return_messages=True,
output_key='answer'
)
self.chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
return_source_documents=True,
chain_type='stuff', # Modify as needed
combine_docs_chain_kwargs={"prompt": prompt},
memory=self.memory,
verbose=False,
)
except Exception as e:
# Handle any exceptions that occur during initialization
print(f"Error initializing ChainBot: {e}")
def get_chain(self):
"""
Retrieve the conversational retrieval chain.
Returns:
The ConversationalRetrievalChain instance.
"""
return self.chain
# Path: longtrainer/vision_bot.py
class VisionMemory:
def __init__(self, token_limit, ensemble_retriever=None):
model_name='gpt-4-1106-preview'
self.llm = ChatOpenAI(model_name=model_name)
self.memory = ConversationTokenBufferMemory(
llm=self.llm,
max_token_limit=token_limit,
memory_key="chat_history",
return_messages=True,
output_key='answer'
)
self.chat_history = []
self.prompt_template = '''
Act as Intelligent assistant
{context}
Your task is to answer the query with accurate answer using the chat history context.
If the answer is unknown, admitting ignorance is preferred over fabricating a response. Dont need to add irrelevant text explanation in response.
Chat History: {chat_history}
Question: {question}
Answer
'''
self.ensemble_retriever = ensemble_retriever
def save_chat_history(self, query, answer):
self.chat_history.append([query, answer])
self.memory.save_context({"input": query}, {"answer": answer})
def generate_prompt(self, query, additional_context):
memory_history = self.memory.load_memory_variables({})
return self.prompt_template.format(context=f"you will answer the query from provided context: {additional_context}", chat_history=memory_history, question=query)
def get_answer(self, query):
docs = self.ensemble_retriever.get_relevant_documents(query)
prompt = self.generate_prompt(query, docs)
return prompt
# Path: longtrainer/vision_bot.py
class VisionBot:
def __init__(self, prompt_template, max_tokens=1024):
model_name = "gpt-4-vision-preview"
self.vision_chain = ChatOpenAI(model=model_name, max_tokens=max_tokens)
self.prompt_template = prompt_template # Save prompt template to instance variable
self.human_message_content = [] # Initialize as an empty list
def encode_image(self, image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
def create_vision_bot(self, image_files):
for file in image_files:
encoded_image = self.encode_image(file) # Use the encode_image function
image_snippet = {
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{encoded_image}"} # Corrected key to "url"
}
self.human_message_content.append(image_snippet)
def get_response(self, query):
# Create a message with the current query
self.human_message_content.insert(0, {"type": "text", "text": query})
# Uncomment and modify the invoke call
msg = self.vision_chain.invoke(
[AIMessage(
content=self.prompt_template # Use self.prompt_template
),
HumanMessage(content=self.human_message_content)
]
)
return msg.content
# Path: longtrainer/trainer.py
from longtrainer.loaders import DocumentLoader, TextSplitter
from longtrainer.retrieval import DocRetriever
from longtrainer.bot import ChainBot
from longtrainer.vision_bot import VisionMemory, VisionBot
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from pymongo import MongoClient
import uuid
class LongTrainer:
def __init__(self, mongo_endpoint='mongodb://localhost:27017/', llm=None,
embedding_model=None,
prompt_template=None, max_token_limit=32000):
"""
Initialize the LongTrainer with optional language learning model, embedding model,
prompt template, maximum token limit, and MongoDB endpoint.
Args:
mongo_endpoint (str): MongoDB connection string.
llm: Language learning model, defaults to ChatOpenAI with GPT-4.
embedding_model: Embedding model for document vectorization, defaults to OpenAIEmbeddings.
prompt_template: Template for generating prompts, defaults to a predefined template.
max_token_limit (int): Maximum token limit for the conversational buffer.
"""
self.llm = llm if llm is not None else ChatOpenAI(model_name='gpt-4-1106-preview')
self.embedding_model = embedding_model if embedding_model is not None else OpenAIEmbeddings()
self.prompt_template = prompt_template if prompt_template is not None else self._default_prompt_template()
self.prompt = PromptTemplate(template=self.prompt_template,
input_variables=["context", "chat_history", "question"])
self.max_token_limit = max_token_limit
self.document_loader = DocumentLoader()
self.text_splitter = TextSplitter(chunk_size=1024, chunk_overlap=100)
self.bot_data = {}
# MongoDB setup
self.client = MongoClient(mongo_endpoint)
self.db = self.client['longtrainer_db']
self.bots = self.db['bots']
self.chats = self.db['chats']
self.vision_chats = self.db['vision_chats']
def initialize_bot_id(self):
"""
Initializes a new bot with a unique identifier and initial data structure.
This method generates a unique bot_id, sets up the initial structure for the bot data, and stores
this data in Redis. Additionally, it inserts a record into the bots table in the database.
Returns:
str: The unique identifier (bot_id) for the newly initialized bot.
The bot data initialized with this method includes empty structures for documents, chains, assistants,
and other fields necessary for the bot's operation.
"""
bot_id = 'bot-' + str(uuid.uuid4())
self.bot_data[bot_id] = {
'documents': [],
'chains': {},
'assistants': {},
'retriever': None,
'ensemble_retriever': None,
'conversational_chain': None,
'faiss_path': f'faiss_index_{bot_id}',
'assistant': None
}
# Insert data into the bots table
self.bots.insert_one({"bot_id": bot_id, "faiss_path": self.bot_data[bot_id]['faiss_path']})
return bot_id
def _default_prompt_template(self):
"""
Returns the default prompt template for the assistant.
"""
return """
Act as an Intelligent Assistant:
{context}
Use the following pieces of information to answer the user's question. If the answer is unknown, admitting ignorance is preferred over fabricating a response. Dont need to add irrelevant text explanation in response.
Answers should be direct, professional, and to the point without any irrelevant details.
Assistant must focus solely on the provided question, considering the chat history for context.
Chat History: {chat_history}
Question: {question}
| Answer: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: pan-x-c/EE-LLM
# Path: megatron/data/indexed_dataset.py
class MMapIndexedDataset(torch.utils.data.Dataset):
def __init__(self, path: str, skip_warmup: bool = False, multimodal: bool = False) -> None:
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._multimodal = multimodal
self._do_init(path, skip_warmup, multimodal)
def __getstate__(self) -> str:
return self._path
def __setstate__(self, path: str) -> None:
self._do_init(path, skip_warmup=True, multimodal=False)
def __del__(self) -> None:
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self) -> int:
return len(self._index)
def __getitem__(self, idx: Union[int, np.integer, slice]) -> np.ndarray:
if isinstance(idx, (int, np.integer)):
sequence_pointer, sequence_length, sequence_mode = self._index[idx]
sequence = np.frombuffer(
self._bin_buffer,
dtype=self._index.dtype,
count=sequence_length,
offset=sequence_pointer,
)
return (sequence, sequence_mode) if sequence_mode is not None else sequence
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sequence_lengths = self._index._sequence_lengths[idx]
sequence_modes = self._index._sequence_modes[idx] if self._multimodal else None
sequence_offsets = list(accumulate(sequence_lengths))
sequences = np.split(
np.frombuffer(
self._bin_buffer,
dtype=self._index.dtype,
count=sum(sequence_lengths),
offset=self._index._sequence_pointers[start],
),
sequence_offsets[:-1],
)
return (sequences, sequence_modes) if sequence_modes is not None else sequences
else:
raise TypeError("Unexpected type received for idx: {}".format(type(idx)))
def _do_init(self, path: str, skip_warmup: bool, multimodal: bool) -> None:
self._path = path
if not skip_warmup:
print_rank_0(" warming up index mmap file...")
self.warmup_mmap_file(get_idx_path(self._path))
self._index = _IndexReader(get_idx_path(self._path), multimodal)
if not skip_warmup:
print_rank_0(" warming up data mmap file...")
self.warmup_mmap_file(get_bin_path(self._path))
print_rank_0(" creating np buffer of mmap...")
self._bin_buffer_mmap = np.memmap(get_bin_path(self._path), mode="r", order="C")
print_rank_0(" creating memory view of np buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def get(self, idx: int, offset: int = 0, length: Optional[int] = None) -> np.ndarray:
"""Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
sequence_pointer, sequence_length, sequence_mode = self._index[idx]
if length is None:
length = sequence_length - offset
sequence_pointer += offset * DType.size(self._index.dtype)
sequence = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=length, offset=sequence_pointer
)
return (sequence, sequence_mode) if sequence_mode is not None else sequence
@property
def sizes(self) -> np.ndarray:
return self._index.sizes
@property
def doc_idx(self) -> np.ndarray:
return self._index._document_indices
def get_doc_idx(self) -> np.ndarray:
return self._index._document_indices
def set_doc_idx(self, doc_idx: np.ndarray) -> None:
self._index._document_indices = doc_idx
def modes(self) -> np.ndarray:
return self._index.modes
@property
def supports_prefetch(self) -> bool:
return False
@staticmethod
def exists(path_prefix: str) -> bool:
return os.path.exists(get_idx_path(path_prefix)) and os.path.exists(
get_bin_path(path_prefix)
)
@staticmethod
def warmup_mmap_file(path: str) -> None:
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
# Path: megatron/tokenizer/gpt2_tokenization.py
PRETRAINED_MERGES_ARCHIVE_MAP = {
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
}
# Path: megatron/tokenizer/gpt2_tokenization.py
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
}
# Path: tools/merge_datasets.py
def main():
args = get_args()
prefixes = set()
for basename in os.listdir(args.input):
prefix, ext = os.path.splitext(basename)
if prefix in prefixes:
continue
if not os.path.isfile(os.path.join(args.input, basename)):
continue
ext_pair = ".bin" if ext == ".idx" else ".idx"
assert os.path.isfile(
os.path.join(args.input, prefix) + ext_pair
), f"ERROR: {ext_pair} file not provided for {os.path.join(args.input, prefix)}"
prefixes.add(prefix)
builder = None
for prefix in sorted(prefixes):
if builder is None:
dataset = MMapIndexedDataset(os.path.join(args.input, prefix))
builder = MMapIndexedDatasetBuilder(
get_bin_path(args.output_prefix), dtype=dataset._index.dtype
)
del dataset
builder.merge_file_(os.path.join(args.input, prefix))
builder.finalize(get_idx_path(args.output_prefix))
# Path: tools/preprocess_data.py
class Encoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
# Use Encoder class as a container for global data
Encoder.tokenizer = build_tokenizer(self.args)
if self.args.split_sentences:
if not nltk_available:
print("NLTK is not available to split sentences.")
exit()
if os.environ.get("NLTK_DATA"):
library = os.path.join(os.environ.get("NLTK_DATA"), "tokenizers", "punkt", f"{self.args.lang}.pickle")
url = f"file:{library}"
else:
library = os.path.join("tokenizers", "punkt", f"{self.args.lang}.pickle")
url = f"nltk:{library}"
splitter = nltk.load(url)
if self.args.keep_newlines:
# this prevents punkt from eating newlines after sentences
Encoder.splitter = nltk.tokenize.punkt.PunktSentenceTokenizer(
train_text = splitter._params,
lang_vars = CustomLanguageVars())
else:
Encoder.splitter = splitter
else:
Encoder.splitter = IdentitySplitter()
def split(self, json_line):
data = json.loads(json_line)
output = {}
for key in self.args.json_keys:
text = data[key]
max_len = 1000000
tokens_list = [Encoder.splitter.tokenize(text[i:i+max_len]) for i in range(0, len(text), max_len)]
output[key] = [tokens for partial in tokens_list for tokens in partial]
return json.dumps(output), len(json_line)
def encode(self, json_line):
data = json.loads(json_line)
ids = {}
lens = {}
for key in self.args.json_keys:
text = data[key]
if isinstance(text, list):
sentences = text
else:
sentences = [text]
doc_ids = []
sentence_lens = []
for sentence in sentences:
sentence_ids = Encoder.tokenizer.tokenize(sentence)
if len(sentence_ids) > 0:
doc_ids.extend(sentence_ids)
sentence_lens.append(len(sentence_ids))
if len(doc_ids) > 0 and self.args.append_eod:
doc_ids.append(Encoder.tokenizer.eod)
sentence_lens[-1] += 1
ids[key] = doc_ids
lens[key] = sentence_lens
return ids, lens, len(json_line)
# Path: tools/preprocess_data.py
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, required=True,
help='Path to input JSON')
group.add_argument('--json-keys', nargs='+', default=['text'],
help='space separate listed of keys to extract from json')
group.add_argument('--split-sentences', action='store_true',
help='Split documents into sentences.')
group.add_argument('--keep-newlines', action='store_true',
help='Keep newlines between sentences when splitting.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True,
choices=['BertWordPieceLowerCase','BertWordPieceCase',
'GPT2BPETokenizer', 'SentencePieceTokenizer',
'GPTSentencePieceTokenizer', 'NullTokenizer'],
help='What type of tokenizer to use.')
group.add_argument('--tokenizer-model', type=str, default=None,
help='YTTM tokenizer model.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file')
group.add_argument('--vocab-size', default=786,
help='size of vocab for use with NullTokenizer')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file (if necessary).')
group.add_argument('--append-eod', action='store_true',
help='Append an <eod> token to the end of a document.')
group.add_argument('--lang', type=str, default='english',
help='Language to use for NLTK-powered sentence splitting.')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True,
help='Path to binary output file without suffix')
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, required=True,
help=('Number of worker processes to launch.'
'A good default for fast pre-processing '
'is: (workers * partitions) = available CPU cores.'))
group.add_argument('--partitions', type=int, default=1,
help='Number of file partitions')
group.add_argument('--log-interval', type=int, default=1000,
help='Interval between progress updates')
group.add_argument('--keep-sequential-samples', action='store_true',
help='Ensure ordering of samples in .jsonl files is '
'preserved when using partitions>1.')
args = parser.parse_args()
args.keep_empty = False
if args.tokenizer_type.lower().startswith('bert') and not args.split_sentences:
print("Are you sure you don't want to split sentences?")
# some default/dummy values for the tokenizer
args.rank = 1
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
return args
# Path: tools/preprocess_data.py
def main():
args = get_args()
if args.split_sentences:
if nltk_available:
nltk.download("punkt", quiet=True, download_dir=os.environ.get("NLTK_DATA"))
else:
raise Exception(
"nltk library required for sentence splitting is not available.")
in_ss_out_names = []
if args.partitions == 1:
file_name, extension = os.path.splitext(args.input)
sentence_split_file = file_name + "_ss" + extension
file_names = {
'partition': args.input,
'sentence_split': sentence_split_file,
'output_prefix': args.output_prefix}
in_ss_out_names.append(file_names)
else:
in_file_names = glob.glob(args.input)
# Count total number of lines across .jsonl files
if args.keep_sequential_samples:
total_sample_count = 0
for filename in in_file_names:
with open(filename, "r") as fin:
for fc, _ in enumerate(fin):
pass
total_sample_count += (fc + 1)
partition_size = math.ceil(total_sample_count / args.partitions)
# create .jsonl parition files
for idx in range(args.partitions):
in_ss_out_name = get_file_name(args, idx)
in_ss_out_names.append(in_ss_out_name)
# check to see if paritions were already created
partitions_present = check_files_exist(in_ss_out_names, 'partition', args.partitions)
# check to see if paritions with split sentences already created
split_sentences_present = check_files_exist(in_ss_out_names, 'sentence_split', args.partitions)
if not partitions_present and not split_sentences_present:
# populate .jsonl partition files from parent files
partitioned_input_files = []
for idx in range(args.partitions):
partitioned_input_file = open(in_ss_out_names[idx]['partition'], 'w')
partitioned_input_files.append(partitioned_input_file)
index = 0
if args.keep_sequential_samples: line_count = 0
for in_file_name in in_file_names:
# support for gzip files
if in_file_name.endswith(".gz"):
fin = gzip.open(in_file_name, 'rt')
else:
fin = open(in_file_name, 'r', encoding='utf-8')
for line in fin:
partitioned_input_files[index].write(line)
if args.keep_sequential_samples:
line_count += 1
if line_count % partition_size == 0:
index += 1
else:
index = (index + 1)%args.partitions
fin.close()
for idx in range(args.partitions):
partitioned_input_files[idx].close()
assert args.workers % args.partitions == 0
partition = Partition(args, args.workers//args.partitions)
# check to see if paritions with split sentences already created
split_sentences_present = check_files_exist(in_ss_out_names, 'sentence_split', args.partitions)
# split sentences in partition files
if args.split_sentences and not split_sentences_present:
processes = []
for name in in_ss_out_names:
p = multiprocessing.Process(target=partition.split_sentences,
args=((name['partition'], name['sentence_split']),))
p.start()
processes.append(p)
for p in processes:
p.join()
if args.partitions == 1:
return
# encode partition files in parallel
processes = []
input_key = 'sentence_split' if args.split_sentences else 'partition'
for name in in_ss_out_names:
p = multiprocessing.Process(target=partition.process_json_file,
args=((name[input_key], name['output_prefix']),))
p.start()
processes.append(p)
for p in processes:
p.join()
if args.partitions == 1:
return
# merge bin/idx partitions
level = "document"
if args.split_sentences:
level = "sentence"
output_bin_files = {}
output_idx_files = {}
builders = {}
tokenizer = build_tokenizer(args)
for key in args.json_keys:
output_bin_files[key] = "{}_{}_{}.bin".format(args.output_prefix,
key, level)
output_idx_files[key] = "{}_{}_{}.idx".format(args.output_prefix,
key, level)
builders[key] = indexed_dataset.MMapIndexedDatasetBuilder(
output_bin_files[key],
dtype=indexed_dataset.DType.optimal_dtype(tokenizer.vocab_size),
)
for name in in_ss_out_names:
parition_output_prefix = name['output_prefix']
full_partition_output_prefix = "{}_{}_{}".format(parition_output_prefix,
key, level)
builders[key].merge_file_(full_partition_output_prefix)
builders[key].finalize(output_idx_files[key])
# Path: tests/unit_tests/data/test_preprocess_data.py
import json
import os
import sys
import tempfile
import nltk
import requests
from megatron.data.indexed_dataset import MMapIndexedDataset
from megatron.tokenizer.gpt2_tokenization import (
PRETRAINED_MERGES_ARCHIVE_MAP,
PRETRAINED_VOCAB_ARCHIVE_MAP,
)
from tools.merge_datasets import main as merge_main
from tools.preprocess_data import Encoder
from tools.preprocess_data import get_args as build_args
from tools.preprocess_data import main as build_main
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
__HUGGINGFACE_BERT_BASE_UNCASED_VOCAB = (
"https://huggingface.co/bert-base-uncased/raw/main/vocab.txt"
)
def dummy_jsonl(odir):
# numbers
list_numbers = [json.dumps({"text": str(i + 1)}) + "\n" for i in range(100)]
with open(os.path.join(odir, "numbers.jsonl"), "w") as writer:
writer.writelines(list_numbers)
# numbers ascending
list_numbers_ascending = [
json.dumps({"text": " ".join([str(j + 1) for j in range(i + 1)])}) + "\n"
for i in range(100)
]
with open(os.path.join(odir, "numbers_ascending.jsonl"), "w") as writer:
writer.writelines(list_numbers_ascending)
# test
list_test = []
with open(__file__) as reader:
for line in reader:
list_test.append(json.dumps({"text": line}) + "\n")
with open(os.path.join(odir, "test.jsonl"), "w") as writer:
writer.writelines(list_test)
def build_datasets(idir, odir, extra_args=[]):
for name in os.listdir(idir):
sys.argv = [
sys.argv[0],
"--input",
os.path.join(idir, name),
"--output-prefix",
os.path.join(odir, os.path.splitext(name)[0]),
] + extra_args
build_main()
| def merge_datasets(idir): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mitrefireline/simharness
# Path: simharness2/agents/agent.py
class ReactiveAgent:
"""A simple agent that reacts to its environment.
FIXME: update docstring style, using llama2 suggestion for now.
Parameters
----------
agent_id : int
The unique ID of this agent.
sim_id : int
The unique ID of the simulation this agent belongs to.
initial_position : tuple[int, int]
The (x,y) starting position of the agent, where (0,0) is the top-left corner of
the map and (max_x, max_y) is the bottom-right corner of the map.
Properties
----------
x : int
The current X coordinate of the agent.
y : int
The current Y coordinate of the agent.
row : int
The current row number where the agent resides.
col : int
The current column number where the agent resides.
latest_movement : str or None
The last movement made by the agent, if applicable.
latest_interaction : str or None
The last interaction had by the agent, if applicable.
mitigation_placed : bool
Whether the agent has placed any mitigations recently.
moved_off_map : bool
Whether the agent has moved off the map recently.
"""
# NOTE: `agent_speed` ommitted, only used within `_do_one_simulation_step`
# Attrs that should be specified on initialization
agent_id: Any # ex: "agent_0", "dozer_0", "handcrew_0", "ff_0", etc.
sim_id: int # should be contained within sim.agents.keys()
initial_position: Tuple[int, int]
# Attributes with default values
latest_movement: int = None
latest_interaction: int = None
mitigation_placed: bool = False
moved_off_map: bool = False
def __post_init__(self):
self._current_position = self.initial_position
self.x, self.y = self.initial_position
self.row, self.col = self.y, self.x
@property
def current_position(self) -> Tuple[int, int]:
return self._current_position
@current_position.setter
def current_position(self, value: Tuple[int, int]):
self._current_position = value
self.x, self.y = value
self.row, self.col = self.y, self.x
@property
def x(self) -> int:
return self._current_position[0]
@x.setter
def x(self, value: int):
self._current_position = (value, self.y)
@property
def y(self) -> int:
return self._current_position[1]
@y.setter
def y(self, value: int):
self._current_position = (self.x, value)
@property
def row(self) -> int:
return self._current_position[1]
@row.setter
def row(self, value: int):
self._current_position = (self.x, value)
@property
def col(self) -> int:
return self._current_position[0]
@col.setter
def col(self, value: int):
self._current_position = (value, self.y)
def reset(self):
self.latest_movement = None
self.latest_interaction = None
self.mitigation_placed = False
self.moved_off_map = False
self.__post_init__()
# self.current_position = self.initial_position
# self.reward = 0
# def move(self, env: np.ndarray, direction: int) -> bool:
# """Moves the agent in the given direction if possible."""
# current_x, current_y = self.current_position
# dx, dy = self.actions[direction]
# next_x, next_y = current_x + dx, current_y + dy
# if env[next_y][next_x] == "_":
# self.current_position = (next_x, next_y)
# return True
# else:
# return False
# Path: simharness2/environments/harness.py
class Harness(gym.Env, ABC, Generic[AnySimulation]):
def __init__(
self,
*,
sim: AnySimulation,
attributes: List[str],
normalized_attributes: List[str],
in_evaluation: bool = False,
):
self.sim = sim
self.attributes = attributes
# TODO: Maybe use `attributes_to_normalize` over `normalized_attributes`?
self.normalized_attributes = normalized_attributes
if not set(self.normalized_attributes).issubset(self.attributes):
raise AssertionError(
f"All normalized attributes ({str(self.normalized_attributes)}) must be "
f"in attributes ({str(self.attributes)})!"
)
self.sim_attributes, self.nonsim_attributes = self._separate_sim_nonsim()
# Count total timesteps that have occurred within an episode.
self.timesteps = 0
# Evaluation specific attributes.
self.in_evaluation = in_evaluation
self._num_eval_iters = 0
@abstractmethod
def create_agents(self):
"""Create agents for the simulation."""
raise NotImplementedError
@abstractmethod
def get_nonsim_attribute_data(self) -> OrderedDict[str, np.ndarray]:
"""Get data that does not come from the simulation."""
raise NotImplementedError
@abstractmethod
def get_nonsim_attribute_bounds(self) -> OrderedDict[str, Dict[str, int]]:
"""Get bounds for data that does not come from the simulation."""
raise NotImplementedError
@abstractmethod
def get_harness_to_sim_action_map(self) -> Dict[int, int]:
"""Get the mapping from harness actions to sim actions."""
raise NotImplementedError
@property
def trial_logdir(self) -> str:
"""The path to the directory where (tune) trial results will be stored."""
return self._trial_logdir
@trial_logdir.setter
def trial_logdir(self, path: str):
if not os.path.isdir(path):
raise ValueError(f"{path} is not a valid directory.")
self._trial_logdir = path
def _separate_sim_nonsim(self) -> Tuple[List[str], List[str]]:
"""Separate attributes based on if they are supported by the Simulation or not."""
sim_attributes = self.sim.get_attribute_data()
sim_attributes_list = []
nonsim_attributes_list = []
for attribute in self.attributes:
if attribute not in sim_attributes.keys():
nonsim_attributes_list.append(attribute)
else:
sim_attributes_list.append(attribute)
return sim_attributes_list, nonsim_attributes_list
def _normalize_obs(
self, observations: Dict[str, np.ndarray]
) -> Dict[str, np.ndarray]:
"""Convert an observation to the [0,1] range based on known min and max."""
def normalize(data, min_max):
# FIXME: Explain purpose/intention behind using a nested class here.
return (data - min_max["min"]) / (min_max["max"] - min_max["min"])
for attribute in self.normalized_attributes:
observations[attribute] = normalize(
observations[attribute], self.min_maxes[attribute]
)
return observations
# FIXME: Does not use any instance or class attributes, make staticmethod?
def _select_from_dict(
self, dictionary: OrderedDict[str, Any], selections: List[str]
) -> OrderedDict[str, Any]:
"""Create an ordered subset with only specific keys from the input `dictionary`.
Arguments:
dictionary: A dictionary used to extract values from.
selections: A list containing the desired keys to keep from `dictionary`.
Returns:
An ordered dictionary containing a subset of the input `dictionary`.
"""
return_dict = OrderedDict()
for selection in selections:
return_dict[selection] = dictionary[selection]
return return_dict
# FIXME: Update name and make property?
def _increment_evaluation_iterations(self) -> None:
"""Increment the number of calls to `Algorithm.evaluate()` (rllib)."""
self._num_eval_iters += 1
def _log_env_reset(self):
"""Log information about the environment that is being reset."""
# if not self._debug_mode or self._episodes_debugged > self._debug_duration:
# return
# TODO: What log level should we use here?
# for idx, feat in enumerate(self.attributes):
# low, high = self._low[..., idx].min(), self._high[..., idx].max()
# obs_min = round(self.state[..., idx].min(), 2)
# obs_max = round(self.state[..., idx].max(), 2)
# Log lower bound of the (obs space) and max returned obs for each attribute.
# logger.info(f"{feat} LB: {low}, obs min: {obs_min}")
# # Log upper (lower) bounds of the returned observations for each attribute.
# logger.info(f"{feat} UB: {high}, obs max: {obs_max}")
pass
def _log_env_init(self):
"""Log information about the environment that is being initialized."""
# if self._is_eval_env:
# i, j = self.worker_idx, self.vector_idx
# logger.warning(
# f"Object {hex(id(self))}: index (i+1)*(j+1) == {(i+1)*(j+1)}"
# )
# if not self._debug_mode:
# return
# # TODO: What log level should we use here?
# logger.info(f"Object {hex(id(self))}: worker_index: {self.worker_idx}")
# logger.info(f"Object {hex(id(self))}: vector_index: {self.vector_idx}")
# logger.info(f"Object {hex(id(self))}: num_workers: {self.num_workers}")
# logger.info(f"Object {hex(id(self))}: is_remote: {self.is_remote}")
pass
def _set_debug_options(self) -> None:
"""Set debug options for the simulation."""
# self._debug_mode = config.get("debug_mode", False)
# # unit == episodes
# self._debug_duration = config.get("debug_duration", 1)
# self._episodes_debugged = 0
# logger.debug(f"Initializing environment {hex(id(self))}")
pass
# Path: simharness2/environments/fire_harness.py
import copy
import logging
import os
import numpy as np
from abc import abstractmethod
from collections import OrderedDict as ordered_dict
from functools import partial
from typing import (
Any,
Callable,
Dict,
List,
Optional,
OrderedDict,
SupportsFloat,
Tuple,
TypeVar,
)
from gymnasium import spaces
from simfire.enums import BurnStatus
from simfire.sim.simulation import FireSimulation
from simfire.utils.config import Config
from simharness2.agents.agent import ReactiveAgent
from simharness2.environments.harness import Harness
logger = logging.getLogger(__name__)
AnyFireSimulation = TypeVar("AnyFireSimulation", bound=FireSimulation)
class FireHarness(Harness[AnyFireSimulation]):
def __init__(
self,
*,
sim: AnyFireSimulation,
attributes: List[str],
normalized_attributes: List[str],
movements: List[str],
interactions: List[str],
action_space_cls: Callable,
in_evaluation: bool = False,
benchmark_sim: Optional[AnyFireSimulation] = None,
harness_analytics_partial: Optional[partial] = None,
reward_cls_partial: Optional[partial] = None,
num_agents: int = 1,
agent_speed: int = 1,
agent_initialization_method: str = "automatic",
initial_agent_positions: Optional[List[Tuple[int, int]]] = None,
):
super().__init__(
sim=sim,
attributes=attributes,
normalized_attributes=normalized_attributes,
in_evaluation=in_evaluation,
)
# TODO: Define `benchmark_sim` in `DamageAwareReactiveHarness`.
# Define attributes that are specific to the FireHarness.
self.benchmark_sim = benchmark_sim
# TODO: use more apt name, ex: `available_movements`, `possible_movements`.
self.movements = copy.deepcopy(movements) # FIXME: is deepcopy necessary?
# TODO: use more apt name, ex: `available_interactions`, `possible_interactions`.
self.interactions = copy.deepcopy(interactions) # FIXME: is deepcopy necessary?
self.harness_to_sim = self.get_harness_to_sim_action_map()
# Verify that all interactions are supported by the simulator.
sim_actions = self.sim.get_actions()
interaction_types = [x for x in self.interactions if x != "none"]
if not set(interaction_types).issubset(list(sim_actions.keys())):
raise AssertionError(
f"All interactions ({str(interaction_types)}) must be "
f"in the simulator's actions ({str(list(sim_actions.keys()))})!"
)
self.agent_speed = agent_speed
self.num_agents = num_agents
# Each sim_agent_id is used to "encode" the agent position within the `fire_map`
# dimension of the returned observation of the environment. The intention is to
# help the model learn/use the location of the respective agent on the fire_map.
# NOTE: Assume that every simulator will support 3 base scenarios:
# 1. Untouched (Ex: simfire.enums.BurnStatus.UNBURNED)
# 2. Currently Being Affected (Ex: simfire.enums.BurnStatus.BURNING)
# 3. Affected (Ex: simfire.enums.BurnStatus.BURNED)
# The max value is +1 of the max mitigation value available (wrt the sim).
self._agent_id_start = max(self.harness_to_sim.values()) + 1
self._agent_id_stop = self._agent_id_start + self.num_agents
self._sim_agent_ids = np.arange(self._agent_id_start, self._agent_id_stop)
# FIXME: Usage of "agent_{}" doesn't allow us to delineate agents groups.
self._agent_ids = {f"agent_{i}" for i in self._sim_agent_ids}
self.default_agent_id = f"agent_{self._agent_id_start}"
# Spawn the agent (s) that will interact with the simulation
logger.debug(f"Creating {self.num_agents} agent (s)...")
input_kwargs = {}
if agent_initialization_method == "manual":
if initial_agent_positions is None:
raise ValueError(
"Must provide 'initial_agent_positions' when using 'manual' agent "
"initialization method."
)
input_kwargs.update({"method": "manual", "pos_list": initial_agent_positions})
elif agent_initialization_method == "automatic":
input_kwargs.update({"method": "random"})
else:
raise ValueError(
"Invalid agent initialization method. Must be either 'automatic' or "
"'manual'."
)
self.agents = self.create_agents(**input_kwargs)
self.min_maxes = self._get_min_maxes()
self.observation_space = self.get_observation_space()
self.action_space = self.get_action_space(action_space_cls)
# FIXME: Update method naming and return value for below methods.
# TODO: Update type anns on harness_analytics and reward_cls
# If provided, construct the class used to monitor this `ReactiveHarness` object.
| self._setup_harness_analytics(harness_analytics_partial) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: racinette/querky
# Path: querky/result_shape.py
def one_(typename: str | None, *, optional: bool = True) -> typing.Callable[[Query], ResultShape]:
def late_binding(query: Query) -> One:
return One(query, typename, optional=optional)
return late_binding
# Path: querky/result_shape.py
def all_(typename: str | None) -> typing.Callable[[Query], ResultShape]:
def late_binding(query: Query) -> All:
return All(query, typename)
return late_binding
# Path: querky/result_shape.py
def value_(annotation: str | TypeMetaData | None = None, *, optional: bool = False) -> typing.Callable[[Query], ResultShape]:
def late_binding(query: Query) -> Value:
return Value(query, annotation, optional=optional)
return late_binding
# Path: querky/result_shape.py
def status_() -> typing.Callable[[Query], ResultShape]:
def late_binding(query: Query) -> Status:
return Status(query)
return late_binding
# Path: querky/result_shape.py
def column_(annotation: str | TypeMetaData | None = None, *, elem_optional: bool = False) -> typing.Callable[[Query], ResultShape]:
def late_binding(query: Query) -> Value:
return Column(query, annotation, elem_optional=elem_optional)
return late_binding
# Path: querky/result_shape.py
class One(ResultShape):
def __init__(self, query: Query, typename: str | None, *, optional: bool = True):
super().__init__(query)
if self.query.parent_query is None:
if self.querky.type_factory is not None:
self.ctor = self.querky.type_factory(self.query, typename)
else:
self.ctor = None
else:
# забираем конструктор типа из базового запроса
parent_shape = self.query.parent_query.shape
if not isinstance(parent_shape, (All, One)):
raise ValueError("Invalid shape, must be a row shape")
self.ctor = parent_shape.ctor
# копируем название типа из отеческого запроса
typename = parent_shape.ctor.typename
if self.ctor.shape is None:
self.ctor.shape = self
self.optional = optional
if self.ctor is not None:
type_meta = TypeMetaData(typename)
else:
type_meta = self.query.contract.get_default_record_type_metadata()
self.return_type = TypeKnowledge(
metadata=type_meta,
is_optional=self.optional,
is_array=False,
elem_is_optional=None
)
self.annotate()
def annotate(self):
self.query.annotation_generator.annotate(self.return_type, context='result_type')
def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):
for attribute in self.query.query_signature.attributes:
try:
if attr_hint := self.query.attr_hints.get(attribute.name, None):
attribute.consume_attr(attr_hint)
self.query.annotation_generator.annotate(attribute.type_knowledge, 'attribute')
except Exception as ex:
raise QueryInitializationError(self.query, f"attribute `{attribute.name}`") from ex
if self.ctor is not None:
if self.ctor.attributes is None:
self.ctor.set_attributes(attrs)
elif self.ctor.attributes != attrs:
raise QueryInitializationError(
self.query,
"Expected the same return type signature, but the attributes are not equal:\n"
f"Expected: {self.ctor.attributes}\n"
f"Got: {attrs}"
)
def generate_type_code(self) -> typing.List[str] | None:
if self.ctor is not None and not self.ctor.type_code_generated:
return self.ctor.generate_type_code()
else:
return None
def get_imports(self) -> set[str]:
s = super().get_imports()
if self.ctor is not None:
return s.union(self.ctor.get_imports())
return s
async def fetch(self, conn, params):
contract = self.query.module.querky.contract
row = await contract.fetch_one(conn, self.query, params)
if self.ctor.row_factory and row is not None:
row = self.ctor.row_factory(row)
return row
def fetch_sync(self, conn, params):
contract = self.query.module.querky.contract
row = contract.fetch_one_sync(conn, self.query, params)
if self.ctor.row_factory:
row = self.ctor.row_factory(row)
return row
def get_exports(self) -> typing.Sequence[str]:
if self.ctor is not None:
return [self.ctor.get_exported_name()]
else:
return []
# Path: querky/result_shape.py
class All(One):
def __init__(self, query: Query, typename: str | None,):
super().__init__(query, typename, optional=False)
self.return_type.is_optional = False
self.return_type.is_array = True
self.return_type.elem_is_optional = False
self.query.annotation_generator.annotate(self.return_type, context='result_type')
def annotate(self):
pass
async def fetch(self, conn, params):
contract = self.query.module.querky.contract
rows = await contract.fetch_all(conn, self.query, params)
if self.ctor.row_factory:
rows = [
self.ctor.row_factory(row)
for row in rows
]
return rows
def fetch_sync(self, conn, params):
contract = self.query.module.querky.contract
rows = contract.fetch_all_sync(conn, self.query, params)
if self.ctor.row_factory:
rows = [
self.ctor.row_factory(row)
for row in rows
]
return rows
# Path: querky/result_shape.py
class ResultShape(ABC, GetImportsMixin):
def __init__(self, query: Query) -> None:
self.query: Query = query
self.return_type: TypeKnowledge | None = None
@property
def querky(self):
return self.query.querky
def get_imports(self) -> set[str]:
return self.return_type.get_imports()
@abstractmethod
def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):
...
@abstractmethod
def generate_type_code(self) -> typing.List[str] | None:
...
def get_annotation(self) -> str:
return self.return_type.typehint
@abstractmethod
async def fetch(self, conn, bound_params):
...
@abstractmethod
async def fetch_sync(self, conn, bound_params):
...
@abstractmethod
def get_exports(self) -> typing.Sequence[str]:
...
# Path: querky/conn_param_config.py
class ConnParamConfig:
name: str
def create_parameter(
self,
query: Query,
parameters: typing.Sequence[Parameter],
type_metadata: TypeMetaData
) -> tuple[Parameter, TypeKnowledge, int]:
...
# Path: querky/conn_param_config.py
class First(ConnParamConfig):
positional: bool = False
def create_parameter(
self,
_query: Query,
parameters: typing.Sequence[Parameter],
type_metadata: TypeMetaData
) -> tuple[Parameter, TypeKnowledge, int]:
if self.positional:
kind = Parameter.POSITIONAL_ONLY
else:
if parameters and parameters[0].kind == Parameter.POSITIONAL_ONLY:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
p = Parameter(self.name, kind)
return p, TypeKnowledge(type_metadata, False, False, False), 0
# Path: querky/annotation_generator.py
class AnnotationGenerator(ABC):
@abstractmethod
def generate(self, knowledge: TypeKnowledge, context: str) -> str:
...
def annotate(self, knowledge: TypeKnowledge, context: str, force: bool = False) -> None:
if knowledge.typehint is None or force:
knowledge.typehint = self.generate(knowledge, context)
# Path: querky/type_constructor.py
class TypeConstructor(typing.Generic[T], GetImportsMixin):
def __init__(
self,
query: Query,
typename: str,
required_imports: typing.Set[str],
row_factory: typing.Callable[[typing.Any], T] | None
):
self.query = query
self.type_code_generated = False
self.typename = typename
self.required_imports = required_imports
self.shape: typing.Optional[ResultShape] = None
self.attributes: typing.Optional[typing.Tuple[ResultAttribute, ...]] = None
self.row_factory = row_factory
self.type_code_generated: bool = False
self.attributes_collected: bool = False
def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):
self.attributes = attrs
def get_imports(self) -> set[str]:
s = set(self.required_imports)
for attr in self.attributes:
s.update(attr.get_imports())
return s
def get_exported_name(self) -> str:
return self.typename
def indent(self, i: int) -> str:
return self.shape.query.querky.get_indent(i)
# Path: querky/module_constructor.py
class ModuleConstructor:
def __init__(
self,
querky: Querky,
module: types.ModuleType,
fullpath: str,
module_path: str,
filedir: str
):
self.module = module
self.querky = querky
self.imports = set(querky.imports)
self.exports = set()
self.fullpath = fullpath
self.module_path = module_path
self.filedir = filedir
self.queries_list = []
def indent(self, i: int) -> str:
return self.querky.get_indent(i)
def _post_init(self):
# Generate module code
code = []
for query in self.queries_list:
query_code = query.generate_code()
if not query_code:
continue
code.append('')
code.append('')
code.extend(query_code)
code.append('')
# Collect imports
for query in self.queries_list:
self.imports.update(query.get_imports())
# Collect exports
for query in self.queries_list:
self.exports.update(query.get_exports())
# Create import lines
imports = [
*getattr(self.module, '__imports__', []),
*self.imports
]
for query in self.queries_list:
imports.append(
f"from {self.module.__name__} import {query.query.__name__} as {query.local_name}"
)
# Imports + Code
code = [
*imports,
*code,
]
# If there are exports, create them at the end of the file (__all__)
if self.exports:
code.append('')
code.append('__all__ = [')
for export in self.exports:
code.append(f'{self.indent(1)}"{export}",')
code.append(']')
code.append('')
self.querky.sign_file_contents(code)
code = '\n'.join(code)
# checking, if file already exists
file_exists = path.isfile(self.fullpath)
if file_exists:
# check, if we can overwrite the contents
self.querky.check_file_is_mine(self.fullpath)
if self.querky.subdir:
os.makedirs(self.filedir, exist_ok=True)
with open(self.fullpath, encoding='utf-8', mode='w') as f:
f.write(code)
async def generate_module(self, db):
for query in self.queries_list:
await query.fetch_types(db)
self._post_init()
def generate_module_sync(self, db):
for query in self.queries_list:
query.fetch_types_sync(db)
self._post_init()
# Path: querky/base_types.py
class TypeMetaData(GetImportsMixin):
counterpart: str
required_imports: set[str] | None = None
def get_imports(self) -> set[str]:
if self.required_imports is None:
return set()
return set(self.required_imports)
@classmethod
def from_type(cls, t: typing.Type) -> TypeMetaData:
type_name = t.__name__
module_path = t.__module__
return TypeMetaData(
counterpart=type_name,
required_imports={f"from {module_path} import {type_name}"}
)
# Path: querky/query.py
class Query(typing.Generic[RS]):
defaults: dict[str, typing.Any]
def __init__(
self,
func: typing.Callable,
shape: typing.Callable[[Query], RS],
module: ModuleConstructor,
conn_param_config: ConnParamConfig,
explicit_name: typing.Optional[str],
parent_query: typing.Optional[Query[One | All]],
kwargs: typing.Optional[typing.Dict[str, typing.Any]]
) -> None:
self.parent_query: Query[One | All] | None = parent_query
self.imports = set()
self.kwargs = kwargs or dict()
self.query = func
self.name = explicit_name or func.__name__
self.conn_param_config = conn_param_config
self.sig = inspect.signature(func)
self.template_signature = None
self.module = module
self.module.queries_list.append(self)
self.param_mapper: ParamMapper = self.contract.create_param_mapper(self)
self.sql = self.param_mapper.parametrize_query()
self.default = DictGetAttr(self.param_mapper.defaults)
# side effect: attr gets populated, so we flush it
self.attr_hints: dict[str, Attr] = {
a.name: a
for a in _attr_.__getattrs__()
}
module_filename = self.module.module.__file__
common = path.commonprefix([module.querky.basedir, module_filename])
self.relative_path = module_filename[len(common):]
self.unique_name = f"{self.relative_path}:{self.query.__name__}"
self.local_name = self.get_local_name()
self.query_signature: QuerySignature | None = None
self.conn_type_knowledge: TypeKnowledge | None = None
self.bound_type = None
self.shape: ResultShape = shape(self)
if not isinstance(self.shape, (One, All)) and parent_query:
raise ValueError("Only One and All queries can have a parent query.")
if parent_query and not isinstance(parent_query.shape, (One, All)):
raise ValueError("Parent query must be of either One or All shape.")
logger.debug(
"Query: %s\nSQL: %s",
self.unique_name, self.sql
)
@property
def annotation_generator(self):
return self.querky.annotation_generator
@property
def contract(self):
return self.module.querky.contract
@property
def querky(self):
return self.module.querky
def bind_type(self, t) -> None:
self.bound_type = t
async def execute(self, conn, *args, **kwargs):
params = self.param_mapper.map_params(*args, **kwargs)
return await self.shape.fetch(conn, params)
def execute_sync(self, conn, *args, **kwargs):
params = self.param_mapper.map_params(*args, **kwargs)
return self.shape.fetch_sync(conn, params)
def _after_types_fetched(self):
# типы параметров передадим мапперу
self.param_mapper.assign_type_knowledge(self.query_signature.parameters)
# а типы аттрибутов - результату
self.shape.set_attributes(self.query_signature.attributes)
async def fetch_types(self, db) -> None:
try:
self.query_signature = await self.contract.get_query_signature(db, self)
self._after_types_fetched()
except QueryInitializationError:
raise
except Exception as ex:
raise QueryInitializationError(self, additional_hint="fetching types") from ex
def fetch_types_sync(self, db) -> None:
try:
self.query_signature = self.contract.get_query_signature_sync(db, self)
self._after_types_fetched()
except QueryInitializationError:
raise
except Exception as ex:
raise QueryInitializationError(self, additional_hint="fetching types") from ex
def string_signature(self):
return f"{self.relative_path}: {self.query.__name__}{self.sig}"
def get_local_name(self) -> str:
return f"_q{self.module.queries_list.index(self)}"
def _generate_proxy_function_code(self):
try:
new_params = []
for param in self.param_mapper.params:
name = param.name
old_param = param.param
if old_param.default is not inspect._empty:
default = ReprHelper(f"{self.local_name}.default.{name}")
else:
default = inspect._empty
typehint = param.type_knowledge.typehint
if typehint is None:
raise QueryInitializationError(
self,
f"{param.name}: parameter type annotation is missing"
)
new_params.append(
Parameter(
name,
old_param.kind,
annotation=ReprHelper(typehint),
default=default
)
)
conn_param, type_knowledge, index = self.conn_param_config.create_parameter(
self,
new_params,
self.contract.get_connection_type_metadata()
)
self.conn_type_knowledge = type_knowledge
self.annotation_generator.annotate(type_knowledge, context='conn_param')
if type_knowledge.typehint is not None:
conn_param = conn_param.replace(annotation=ReprHelper(type_knowledge.typehint))
new_params.insert(index, conn_param)
return_annotation = self.shape.get_annotation()
if return_annotation is None:
raise QueryInitializationError(
self,
f"return type annotation is missing"
)
return_annotation_repr = ReprHelper(return_annotation)
self.new_signature = self.sig.replace(
parameters=new_params,
return_annotation=return_annotation_repr
)
is_async = self.contract.is_async()
async_ = 'async ' if is_async else ''
await_ = 'await ' if is_async else ''
_sync = "_sync" if not is_async else ''
conn_str = self.conn_param_config.name
arg_remap_string = self.param_mapper.mirror_arguments()
arg_string = f"{conn_str}, {arg_remap_string}"
try:
code = [
f"{async_}def {self.name}{self.new_signature}:",
f"{self.querky.get_indent(1)}return {await_}{self.local_name}.execute{_sync}({arg_string})"
]
except Exception as _ex:
# for debugging
raise
logger.debug('[OK] - %s', self.unique_name)
return code
except Exception as ex:
logger.exception('[BAD] - %s', self.unique_name)
raise ex
def get_type_bind_ident(self) -> typing.Optional[str]:
if isinstance(self.shape, (Value, Column, Status)):
return None
elif isinstance(self.shape, (One, All)):
if self.shape.ctor:
return self.shape.ctor.typename
return None
def get_exports(self):
exports = {
self.name,
*self.shape.get_exports()
}
if parent := self.parent_query:
parent_shape = parent.shape
if not isinstance(parent_shape, (One, All)):
raise ValueError("parent shape must be ether One or All")
shape: typing.Union[One, All] = parent_shape
exports.add(shape.ctor.typename)
return exports
def get_imports(self):
imports = set(self.imports)
for elem in self.param_mapper.params:
imports.update(elem.get_imports())
if self.conn_type_knowledge is not None:
imports.update(self.conn_type_knowledge.get_imports())
if (parent := self.parent_query) and parent.module is not self.module:
parent_shape = parent.shape
if isinstance(parent_shape, (One, All)):
imports.add(
f"from {parent.module.module_path} import {parent_shape.ctor.typename}"
)
else:
raise ValueError("you can only use return types from 'one' and 'many' queries")
else:
# we're gonna create the type from scratch, so we need the imports
imports.update(self.shape.get_imports())
return imports
def generate_code(self):
lines = []
# data type code
if type_code := self.shape.generate_type_code():
if cb := self.module.querky.on_before_type_code_emit:
type_code = cb(type_code, self)
lines.extend(type_code)
lines.append('')
lines.append('')
# proxy function code, which simply accepts annotated arguments and proxies the call to this query
func_code = self._generate_proxy_function_code()
if cb := self.module.querky.on_before_func_code_emit:
func_code = cb(func_code, self)
lines.extend(func_code)
if bound_type_ident := self.get_type_bind_ident():
# binding return type to the underlying query
lines.append('')
lines.append(f'{self.local_name}.bind_type({bound_type_ident})')
return lines
def __call__(self, conn, *args, **kwargs):
if self.contract.is_async():
return self.execute(conn, *args, **kwargs)
else:
return self.execute_sync(conn, *args, **kwargs)
# Path: querky/contract.py
class Contract(ABC):
@abstractmethod
def create_param_mapper(self, query: Query) -> ParamMapper:
...
@abstractmethod
def get_default_record_type_metadata(self) -> TypeMetaData:
...
@abstractmethod
def get_connection_type_metadata(self) -> TypeMetaData:
...
@abstractmethod
async def get_query_signature(self, db, query: Query) -> QuerySignature:
...
@abstractmethod
def get_query_signature_sync(self, db, query: Query) -> QuerySignature:
...
@abstractmethod
def is_async(self) -> bool:
...
@abstractmethod
async def fetch_value(self, conn, query: Query, bound_params):
...
@abstractmethod
async def fetch_one(self, conn, query: Query, bound_params):
...
@abstractmethod
async def fetch_all(self, conn, query: Query, bound_params):
...
@abstractmethod
async def fetch_column(self, conn, query: Query, bound_params):
...
@abstractmethod
async def fetch_status(self, conn, query: Query, bound_params):
...
@abstractmethod
async def raw_execute(self, conn, sql: str, params):
...
@abstractmethod
async def raw_fetchval(self, conn, sql: str, params):
...
@abstractmethod
async def raw_fetchone(self, conn, sql: str, params):
...
@abstractmethod
async def raw_fetch(self, conn, sql: str, params):
...
@abstractmethod
def fetch_value_sync(self, conn, query: Query, bound_params):
...
@abstractmethod
def fetch_one_sync(self, conn, query: Query, bound_params):
...
@abstractmethod
def fetch_all_sync(self, conn, query: Query, bound_params):
...
@abstractmethod
def fetch_column_sync(self, conn, query: Query, bound_params):
...
@abstractmethod
def fetch_status_sync(self, conn, query: Query, bound_params):
...
@abstractmethod
async def raw_execute_sync(self, conn, sql: str, params):
...
@abstractmethod
async def raw_fetchval_sync(self, conn, sql: str, params):
...
@abstractmethod
def raw_fetchone_sync(self, conn, sql: str, params):
...
@abstractmethod
def raw_fetch_sync(self, conn, sql: str, params):
...
# Path: querky/querky.py
import importlib
import types
import typing
import inspect
import os
import logging
from types import ModuleType
from os import path
from querky.result_shape import one_, all_, value_, status_, column_, One, All, ResultShape
from querky.conn_param_config import ConnParamConfig, First
from querky.annotation_generator import AnnotationGenerator
from querky.type_constructor import TypeConstructor
from querky.module_constructor import ModuleConstructor
from querky.base_types import TypeMetaData
from querky.query import Query
from querky.contract import Contract
class Querky:
def __init__(
self,
basedir: str | None = None,
annotation_generator: AnnotationGenerator | None = None,
contract: Contract | None = None,
conn_param_config: ConnParamConfig | None = None,
type_factory: typing.Callable[[Query, str], TypeConstructor] | None = None,
subdir: str | None = "queries",
on_before_func_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None,
on_before_type_code_emit: typing.Optional[typing.Callable[[typing.List[str], Query], typing.List[str]]] = None,
imports: typing.Optional[typing.Set[str]] = None,
indent: str = ' ',
query_class: typing.Type[Query] = Query
):
self.basedir = basedir
self.on_before_func_code_emit = on_before_func_code_emit
self.on_before_type_code_emit = on_before_type_code_emit
self.query_class = query_class
self.imports = imports or set()
self.indent = indent
self.annotation_generator = annotation_generator
self.module_ctors: dict[types.ModuleType, ModuleConstructor] = dict()
self.type_factory = type_factory
if conn_param_config is None:
conn_param_config = First(name='__conn', positional=True)
self.conn_param_config = conn_param_config
self.contract = contract
self.subdir = subdir
if self.subdir and not str.isidentifier(self.subdir):
raise ValueError("subdir must be a valid python identifier")
self.file_signature = "# ~ AUTOGENERATED BY QUERKY ~ #"
def get_indent(self, i: int):
return self.indent * i
def create_query(
self,
fn: typing.Callable[[...], str],
shape: typing.Callable[[Query], ResultShape],
conn_param_config: ConnParamConfig | None,
explicit_name: str | None,
parent_query: typing.Optional[Query],
kwargs: typing.Optional[typing.Dict[str, typing.Any]]
) -> Query:
module = inspect.getmodule(fn)
if module in self.module_ctors:
module_ctor = self.module_ctors[module]
else:
filename = self.generate_filename(module)
if not str.isidentifier(filename):
raise ValueError(f"Generated a filename which is not a valid python identifier: {filename}")
filedir = path.dirname(module.__file__)
new_module_name = module.__name__.rsplit('.', maxsplit=1)[0]
if self.subdir:
filedir = path.join(filedir, self.subdir)
new_module_name = f"{new_module_name}.{self.subdir}"
fullpath = path.join(filedir, f'{filename}.py')
new_module_name = f"{new_module_name}.{filename}"
module_ctor = ModuleConstructor(self, module, fullpath, new_module_name, filedir)
self.module_ctors[module] = module_ctor
return self.query_class(
fn,
shape,
module_ctor,
self.conn_param_config or conn_param_config,
explicit_name,
parent_query,
kwargs
)
def query(
self,
arg: str | TypeMetaData | Query | typing.Callable[[...], str] | None = None,
*,
shape: ShapeStringRepr = 'status',
optional: bool | None = None,
**kwargs
) -> QueryDef | Query:
def wrapper(fn: typing.Callable[[...], str]) -> Query:
nonlocal optional
if shape in ['many', 'one']:
if isinstance(arg, TypeMetaData):
raise ValueError(
"TypeMetaData is not supported for `many` or `one` constructors. "
"Use it only for `one` and `column` constructors."
)
if not isinstance(arg, Query):
if arg is None:
# if we don't have a name provided for us, we're gonna create it out of the function name
type_name = to_camel_case(fn.__name__)
else:
type_name = arg
if not type_name.isidentifier():
raise ValueError(f"Name type should be a valid python identifier. You provided: {type_name}")
else:
type_name = None
type_name: str | None
if shape == 'many':
if optional is not None:
raise TypeError(
'ALL constructor does not accept `optional` flag -- '
| 'at least an empty set will always be returned' |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: javrtg/C2P
# Path: nonmin_pose/models/c2p.py
class C2P(NonMinRelPoseBase):
SDP_COMPUTES_POSE = True
DEFAULT_PARAMS = {
"E": Parameter("E", 1, list(range(1, 10))),
"t": Parameter("t", 1, list(range(10, 13))),
"q": Parameter("q", 1, list(range(13, 16))),
"h": Parameter("h", 1, [16]),
"sct": Parameter("sct", 2, [1]),
"scr": Parameter("scr", 3, [1]),
# "scr2": Parameter("scr2", 4, [1]),
}
DEFAULT_CONSTRAINTS = {
"manif_def_left": [0],
"manif_def_right": [0],
"norm_t": None,
"norm_q": None,
"homogenization": None,
"adjoint": None,
"norm_e": None,
"cheirality_translation_v2": None,
"cheirality_rotation": None,
# "right_null_space": None,
# "left_null_space": None,
# "cheirality_rotation_q": None,
}
def _get_params_and_manager(self, params=None, constraints=None):
params = self.DEFAULT_PARAMS if params is None else {p.name: p for p in params}
constraints = self.DEFAULT_CONSTRAINTS if constraints is None else constraints
params_list = list(params.values())
manager = ConstraintManager(params_list, constraints, self.cfg["use_top_k"])
return params, manager
def retrieve_solution(self) -> Dict[str, Union[np.ndarray, bool]]:
X = self.npt_problem.getResultYMat(self.params["h"].block)
_, svals, Vt = np.linalg.svd(X)
idx = 0
E01 = Vt[idx, :9].reshape(3, 3)
t01 = Vt[idx, 9:12].reshape(3, 1)
q = Vt[idx, 12:15].reshape(3, 1)
h = Vt[idx, 15]
E01, t01, q = (E01, t01, q) if h > 0 else (-E01, -t01, -q)
sct = self.npt_problem.getResultYMat(2)[0, 0]
is_pure_rot = sct < self.cfg["th_pure_rot_sdp"]
if sct < self.cfg["th_pure_rot_noisefree_sdp"]:
# improve numerical conditioning.
_, _, Vt = np.linalg.svd(X[:15, :15])
E01_ = Vt[idx, :9].reshape(3, 3)
t01_ = Vt[idx, 9:12].reshape(3, 1)
q_ = Vt[idx, 12:15].reshape(3, 1)
# correct sign.
id_mx = np.abs(t01).argmax()
E01, t01, q = (
(E01_, t01_, q_)
if t01[id_mx, 0] * t01_[id_mx, 0] > 0
else (-E01_, -t01_, -q_)
)
# manifold projections.
Ue, _, Vte = np.linalg.svd(E01)
E01 = Ue[:, :2] @ Vte[:2]
t01 = t01 / np.linalg.norm(t01)
q = q / np.linalg.norm(q)
R01 = rot_given_Etq(E01, t01, q)
# check optimality.
eps = self.cfg["th_rank_optimality"]
is_optimal = (svals > eps).sum() <= 3
return {
"R01": R01,
"t01": t01,
"E01": E01,
"is_optimal": is_optimal,
"is_pure_rot": is_pure_rot,
}
# Path: nonmin_pose/models/c2p.py
class C2PFast(NonMinRelPoseBase):
SDP_COMPUTES_POSE = True
PARAMS = {
"E": Parameter("E", 1, list(range(1, 10))),
"t": Parameter("t", 1, list(range(10, 13))),
"q": Parameter("q", 1, list(range(13, 16))),
"h": Parameter("h", 1, [16]),
"sct": Parameter("sct", 2, [1]),
"scr": Parameter("scr", 3, [1]),
}
CONSTRAINTS = {
"norm_t": None,
"norm_q": None,
"homogenization": None,
"adjoint": None,
"norm_e": None,
"cheirality_translation_v2": None,
"cheirality_rotation": None,
}
def _get_params_and_manager(self, *args, **kwargs):
params_list = list(self.PARAMS.values())
manager = ConstraintManager(params_list, self.CONSTRAINTS)
return self.PARAMS, manager
def retrieve_solution(self) -> Dict[str, Union[np.ndarray, bool]]:
X = self.npt_problem.getResultYMat(self.params["h"].block)
_, svals, Vt = np.linalg.svd(X)
idx = 0
E01 = Vt[idx, :9].reshape(3, 3)
t01 = Vt[idx, 9:12].reshape(3, 1)
q = Vt[idx, 12:15].reshape(3, 1)
h = Vt[idx, 15]
E01, t01, q = (E01, t01, q) if h > 0 else (-E01, -t01, -q)
sct = self.npt_problem.getResultYMat(2)[0, 0]
is_pure_rot = sct < self.cfg["th_pure_rot_sdp"]
if sct < self.cfg["th_pure_rot_noisefree_sdp"]:
# improve numerical conditioning.
_, _, Vt = np.linalg.svd(X[:15, :15])
E01_ = Vt[idx, :9].reshape(3, 3)
t01_ = Vt[idx, 9:12].reshape(3, 1)
q_ = Vt[idx, 12:15].reshape(3, 1)
# correct sign.
id_mx = np.abs(t01).argmax()
E01, t01, q = (
(E01_, t01_, q_)
if t01[id_mx, 0] * t01_[id_mx, 0] > 0
else (-E01_, -t01_, -q_)
)
# manifold projections.
Ue, _, Vte = np.linalg.svd(E01)
E01 = Ue[:, :2] @ Vte[:2]
t01 = t01 / np.linalg.norm(t01)
q = q / np.linalg.norm(q)
R01 = rot_given_Etq(E01, t01, q)
# check optimality.
eps = self.cfg["th_rank_optimality"]
is_optimal = (svals > eps).sum() <= 3
return {
"R01": R01,
"t01": t01,
"E01": E01,
"is_optimal": is_optimal,
"is_pure_rot": is_pure_rot,
}
# Path: nonmin_pose/models/essential_mat_gsalguero.py
class EssentialGSalguero(NonMinRelPoseBase):
"""Essential matrix solver using García-Salguero's ADJ (adjugate) method [1].
[1] A Tighter Relaxation for the Relative Pose Problem Between Cameras,
M. Garcia-Salguero, J. Briales and J. Gonzalez-Jimenez.
"""
SDP_COMPUTES_POSE = False
PARAMS = {
"E": Parameter("E", 1, list(range(1, 10))),
"t": Parameter("t", 2, list(range(1, 4))),
"q": Parameter("q", 2, list(range(4, 7))),
}
CONSTRAINTS = {
"manif_def_left": [0],
"manif_def_right": [0],
"norm_t": None,
"norm_q": None,
"adjoint": None,
"norm_e": None,
}
def _get_params_and_manager(self, *args, **kwargs):
params_list = list(self.PARAMS.values())
manager = ConstraintManager(params_list, self.CONSTRAINTS)
return self.PARAMS, manager
def retrieve_solution(self) -> Dict[str, Union[np.ndarray, bool]]:
E, t, q = self.params["E"], self.params["t"], self.params["q"]
Eb, tb, qb = E.block, t.block, q.block
assert Eb != tb == qb
# get X = xx^\top.
Xe = self.npt_problem.getResultYMat(Eb)
Xtq = self.npt_problem.getResultYMat(tb)
# check optimality.
eps = self.cfg["th_rank_optimality"]
_, svals_e, Vt_e = np.linalg.svd(Xe)
svals_tq = np.linalg.svd(Xtq, compute_uv=False)
is_optimal = (svals_e > eps).sum() == 1 == (svals_tq > eps).sum()
E01 = Vt_e[0].reshape(3, 3)
return {"E01": E01, "is_optimal": is_optimal}
# Path: nonmin_pose/models/essential_mat_zhao.py
class EssentialZhao(NonMinRelPoseBase):
"""Essential matrix estimation using Zhao's method [1].
[1] An efficient solution to non-minimal case essential matrix estimation, J. Zhao.
"""
SDP_COMPUTES_POSE = False
PARAMS = {
"E": Parameter("E", 1, list(range(1, 10))),
"t": Parameter("t", 1, list(range(10, 13))),
}
CONSTRAINTS = {"manif_def_left": None, "norm_t": None}
def _get_params_and_manager(self, *args, **kwargs):
params_list = list(self.PARAMS.values())
manager = ConstraintManager(params_list, self.CONSTRAINTS)
return self.PARAMS, manager
def retrieve_solution(self) -> Dict[str, Union[np.ndarray, bool]]:
"""Get the essential matrix and check its optimality."""
E, t = self.params["E"], self.params["t"]
Eb, tb = E.block, t.block
assert Eb == tb
# get X = xx^\top.
X = self.npt_problem.getResultYMat(Eb)
Xe, Xt = X[:9, :9], X[9:, 9:]
# check optimality.
eps = self.cfg["th_rank_optimality"]
_, svals_e, Vt_e = np.linalg.svd(Xe)
_, svals_t, _ = np.linalg.svd(Xt)
is_optimal = (svals_e > eps).sum() == 1 == (svals_t > eps).sum()
E01 = Vt_e[0].reshape(3, 3)
return {"E01": E01, "is_optimal": is_optimal}
# Path: nonmin_pose/utils.py
def compute_data_matrix_C(
f0: np.ndarray, f1: np.ndarray, w: Optional[np.ndarray] = None
) -> np.ndarray:
"""Compute the data matrix C from the bearing vectors.
Args:
f0: (3, n) bearing vectors in camera 0.
f1: (3, n) bearing vectors in camera 1.
w: (n,) array of weights for each epipolar residual. (default: None)
Returns:
C: (9, 9) data matrix.
"""
assert w is None or w.ndim == 1, "w must be a 1D array."
n = f0.shape[1]
f0_kron_f1 = (n**-0.5) * (f0[:, None] * f1).reshape(9, n)
if w is None:
return f0_kron_f1 @ f0_kron_f1.T
return w * f0_kron_f1 @ f0_kron_f1.T
# Path: nonmin_pose/utils.py
def decompose_essmat(
U: np.ndarray,
Vt: np.ndarray,
f0: np.ndarray,
f1: np.ndarray,
th_pure_rotation: float = 1 - 1e-8,
) -> Tuple[np.ndarray, np.ndarray, bool]:
"""Decompose the essential matrix into relative rotation and (normalized)
translation.
The extraction of the 4 possible relative pose factorizations given an essential
matrix, follows the approach explained in [1, Sec. 9.6.2].
To select the best pose candidate, we check the sign of the factor that
multiplies each bearing vector. This factor must be positive since a bearing
vector is equivalent to $f_i := X_i / ||X_i||$, where $X_i$ is the corresponding
3D point. Thus, to recover the 3D point, we multiply $f$ with an estimated
scalar factor that *must* be positive. This constraint is independent of the
camera model used (pinhole, fisheye, omnidirectional etc.), thus the camera
model is not a limiting factor for this approach.
To compute the scalar factor (the norm of X_i), we use the classic midpoint
method (see e.g. [2]). However, instead of explicitly computing (triangulating)
the 3D points, we just compute the sign of the scalar factors (lambdas). As a
result, we save some computation. Specifically, we avoid computing:
1) the term (sin angle(f0, R01@f1))^2 = || f0 x R01@f1 ||^2, for each point, and
2) the XY coordinates of each 3D point.
[1]: Multiple View Geometry in Computer Vision, Hartley and Zisserman, 2003.
[2]: Triangulation: why optimize?, Lee and Civera, 2019.
Args:
U: (3, 3) left singular vectors of the essential matrix.
Vt: (3, 3) right singular vectors of the essential matrix.
f0: (3, n) bearing vectors in camera 0.
f1: (3, n) bearing vectors in camera 1.
th_pure_rotation: threshold for checking if the motion is a pure rotation.
Returns:
R: (3, 3) rotation matrix.
t: (3, 1) translation vector.
is_pure_rotation: True if a (near-)pure rotation is detected.
"""
# avoid reflection (ensure rotation) when decomposing the essential matrix.
Vt[2] = -Vt[2] if np.linalg.det(U) * np.linalg.det(Vt) < 0 else Vt[2]
Ra, Rb = U @ _W @ Vt # (2, 3, 3)
ta, tb = U[:, 2:], -U[:, 2:]
# check if it is a pure rotation.
is_pure_rotation, choice = check_pure_rotation(
f0, np.stack((Ra, Rb)) @ f1, th_pure_rotation
)
# (Ra, ta)
Raf1 = Ra @ f1
lambda0_rhs = (
np.cross((Raf1).T, f0.T)[:, None] @ np.cross((Raf1).T, ta.T)[..., None]
)
lambda1_rhs = np.cross((Raf1).T, f0.T)[:, None] @ np.cross(f0.T, ta.T)[..., None]
npos_aa = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()
# (Rb, ta)
Rbf1 = Rb @ f1
lambda0_rhs = (
np.cross((Rbf1).T, f0.T)[:, None] @ np.cross((Rbf1).T, ta.T)[..., None]
)
lambda1_rhs = np.cross((Rbf1).T, f0.T)[:, None] @ np.cross(f0.T, ta.T)[..., None]
npos_ba = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()
# (Ra, tb)
lambda0_rhs = (
np.cross((Raf1).T, f0.T)[:, None] @ np.cross((Raf1).T, tb.T)[..., None]
)
lambda1_rhs = np.cross((Raf1).T, f0.T)[:, None] @ np.cross(f0.T, tb.T)[..., None]
npos_ab = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()
# (Rb, tb)
lambda0_rhs = (
np.cross((Rbf1).T, f0.T)[:, None] @ np.cross((Rbf1).T, tb.T)[..., None]
)
lambda1_rhs = np.cross((Rbf1).T, f0.T)[:, None] @ np.cross(f0.T, tb.T)[..., None]
npos_bb = ((lambda0_rhs > 0) & (lambda1_rhs > 0)).sum()
npos_tpos = np.r_[npos_aa, npos_ba]
npos_tneg = np.r_[npos_ab, npos_bb]
if is_pure_rotation and (npos_tpos[choice] == npos_tneg[choice] == 0):
# Pure rotation with perfect bearings alignment by just rotating them.
R01 = Ra if choice == 0 else Rb
return R01, ta, is_pure_rotation
if is_pure_rotation:
# Pure rotation with imperfect bearings alignment. Choose the translation
# candidate that satisfies the most the positive-norm bearings' constraint.
t01 = ta if npos_tpos[choice] >= npos_tneg[choice] else tb
R01 = Ra if choice == 0 else Rb
return R01, t01, is_pure_rotation
# Otherwise, select the candidate that satisfies the most the positive-norm
# bearings' constraint.
choice, npos = max(
enumerate((npos_tpos[0], npos_tpos[1], npos_tneg[0], npos_tneg[1])),
key=lambda x: x[1],
)
t01 = ta if choice < 2 else tb
R01 = Rb if choice % 2 else Ra
return R01, t01, is_pure_rotation
# Path: experiments/runtimes.py
from pathlib import Path
from exp_utils import SyntheticData
from nonmin_pose import C2P, C2PFast, EssentialGSalguero, EssentialZhao
from nonmin_pose.utils import compute_data_matrix_C, decompose_essmat
import cv2
import numpy as np
import perfplot
def zhao_midpoint(f0, f1):
return ess_zhao(f0, f1, do_disambiguation=True, use_opencv=False) # type: ignore
def zhao_triangulation(f0, f1):
return ess_zhao(f0, f1, do_disambiguation=True, use_opencv=True) # type: ignore
def salguero_midpoint(f0, f1):
return ess_salguero(f0, f1, do_disambiguation=True, use_opencv=False) # type: ignore
def salguero_triangulation(f0, f1):
return ess_salguero(f0, f1, do_disambiguation=True, use_opencv=True) # type: ignore
def c2p(f0, f1):
return nonmin_relpose(f0, f1)
def c2p_fast(f0, f1):
return nonmin_relpose_fast(f0, f1)
def sample_data(n):
data = dataset.generate_data(max_npoints=n, noise_level=0.0)
return data
def monkeypatch_call(
self, f0, f1, do_disambiguation=True, already_normalized=False, use_opencv=False
):
"""function for monkey-patching the __call__ method of the base class to include
OpenCV's recoverPose() disambiguation, which triangulates the correspondences."""
sh0, sh1 = f0.shape, f1.shape
assert sh0 == sh1 and len(sh0) == 2 and sh0[0] == 3 and sh0[1] >= 5
if not already_normalized:
f0 = f0 / np.linalg.norm(f0, axis=0)
f1 = f1 / np.linalg.norm(f1, axis=0)
C = compute_data_matrix_C(f0, f1)
self.solveSDP(C, f0, f1)
sol = self.retrieve_solution()
if not self.SDP_COMPUTES_POSE:
U, _, Vt = np.linalg.svd(sol["E01"])
sol["E01"] = U[:, :2] @ Vt[:2]
| if do_disambiguation and not use_opencv: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Jack24658735/FedLGT
# Path: dataloaders/voc2007_20.py
class Voc07Dataset(torch.utils.data.Dataset):
def __init__(self, img_dir='./data/VOCdevkit/VOC2007/JPEGImages', anno_path='./data/VOCdevkit/VOC2007/Main/trainval.txt', image_transform=None, labels_path='./data/VOCdevkit/VOC2007/Annotations',known_labels=0,testing=False,use_difficult=False):
self.img_names = []
with open(anno_path, 'r') as f:
self.img_names = f.readlines()
self.img_dir = img_dir
self.num_labels = 20
self.known_labels = known_labels
self.testing=testing
self.labels = []
for name in self.img_names:
label_file = os.path.join(labels_path,name[:-1]+'.xml')
label_vector = np.zeros(self.num_labels)
DOMTree = xml.dom.minidom.parse(label_file)
root = DOMTree.documentElement
objects = root.getElementsByTagName('object')
for obj in objects:
if (not use_difficult) and (obj.getElementsByTagName('difficult')[0].firstChild.data) == '1':
continue
tag = obj.getElementsByTagName('name')[0].firstChild.data.lower()
label_vector[int(category_info[tag])] = 1.0
self.labels.append(label_vector)
# self.labels = np.array(self.labels).astype(np.float32)
self.labels = np.array(self.labels).astype(int)
self.image_transform = image_transform
self.epoch = 1
def __getitem__(self, index):
name = self.img_names[index][:-1]+'.jpg'
image = Image.open(os.path.join(self.img_dir, name)).convert('RGB')
if self.image_transform:
image = self.image_transform(image)
labels = torch.Tensor(self.labels[index])
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels,self.epoch)
mask = labels.clone()
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample = {}
sample['image'] = image
sample['labels'] = labels
sample['mask'] = mask
sample['imageIDs'] = str(name)
return sample
def __len__(self):
return len(self.img_names)
# Path: dataloaders/vg500_dataset.py
class VGDataset(torch.utils.data.Dataset):
def __init__(self, img_dir, img_list, image_transform,label_path,known_labels=40,testing=False):
with open(img_list, 'r') as f:
self.img_names = f.readlines()
with open(label_path, 'r') as f:
self.labels = json.load(f)
self.image_transform = image_transform
self.img_dir = img_dir
self.num_labels= 500
self.known_labels = known_labels
self.testing=testing
self.epoch = 1
def __getitem__(self, index):
name = self.img_names[index][:-1]
img_path = os.path.join(self.img_dir, name)
image = image_loader(img_path,self.image_transform)
label = np.zeros(self.num_labels).astype(np.float32)
label[self.labels[name]] = 1.0
label = torch.Tensor(label)
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask = label.clone()
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample = {}
sample['image'] = image
sample['labels'] = label
sample['mask'] = mask
sample['imageIDs'] = name
return sample
def __len__(self):
return len(self.img_names)
# Path: dataloaders/coco80_dataset.py
class Coco80Dataset(Dataset):
def __init__(self, split,num_labels,data_file,img_root,annotation_dir,max_samples=-1,transform=None,known_labels=0,testing=False,analyze=False):
self.split=split
self.split_data = pickle.load(open(data_file,'rb'))
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing=testing
self.epoch = 1
def __len__(self):
return len(self.split_data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image_ID = self.split_data[idx]['file_name']
img_name = os.path.join(self.img_root,image_ID)
image = image_loader(img_name,self.transform)
labels = self.split_data[idx]['objects']
labels = torch.Tensor(labels)
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask = labels.clone()
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample = {}
sample['image'] = image
sample['labels'] = labels
sample['mask'] = mask
sample['imageIDs'] = image_ID
return sample
# Path: dataloaders/news500_dataset.py
class NewsDataset(torch.utils.data.Dataset):
def __init__(self, ann_dir,split='train',transform=None,known_labels=0,testing=False):
# Load training data.
self.ann_dir = ann_dir # the path you save train_split.p and caption_test_space.npy
self.split = split
self.transform = transform
self.num_labels = 500
self.known_labels = known_labels
self.testing=testing
# Load annotations.
print(('Loading %s Label annotations...') % self.split)
self.annData = pickle.load(open(os.path.join(ann_dir, '%s_split.p' % self.split),'rb'))
self.targets = torch.Tensor(np.load(open(os.path.join(ann_dir, 'caption_%s_space.npy' % self.split),'rb')))
self.epoch = 1
def __getitem__(self, index):
sample = self.annData[index]
img_path = sample['file_path']
image_id = sample['image_id']
img_path = img_path.replace('/localtmp/data/data/',self.ann_dir)
image = image_loader(img_path,self.transform)
labels = self.targets[index, :]
mask = labels.clone()
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample = {}
sample['image'] = image
sample['labels'] = labels
sample['mask'] = mask
sample['imageIDs'] = str(image_id)
return sample
def __len__(self):
return len(self.annData)
# Path: dataloaders/coco1000_dataset.py
class Coco1000Dataset(torch.utils.data.Dataset):
def __init__(self, annotation_dir,image_dir,split='train',transform = None,known_labels=0,testing=False):
# Load training data.
self.split = split
self.image_dir = image_dir
self.transform = transform
self.testing=testing
self.num_labels = 1000#num_labels
self.epoch = 1
self.known_labels = known_labels
# Load annotations.
print(('\nLoading %s object annotations...') % self.split)
self.objData = json.load(open(os.path.join(annotation_dir, 'captions_' + self.split + '2014.json')))
self.imageIds = [entry['id'] for entry in self.objData['images']]
self.imageNames = [entry['file_name'] for entry in self.objData['images']]
self.imageId2index = {image_id: idx for (idx, image_id) in enumerate(self.imageIds)}
if os.path.exists("data/coco/coco_words_vocabulary.p"):
self.vocabulary = pickle.load(open('data/coco/coco_words_vocabulary.p', 'rb'))
else:
self.vocabulary = get_vocab(self.objData)
label_file_path = os.path.join(annotation_dir, '1000_labels_' + self.split + '2014.npy')
if os.path.exists(label_file_path):
print('Loading labels')
self.labels = np.load(label_file_path)
else:
print('Preparing label space')
lem = WordNetLemmatizer()
self.labels = np.zeros((len(self.objData['images']), len(self.vocabulary[0])))
for (i, entry) in enumerate(self.objData['annotations']):
# if i % 10000 == 0: print('.'),
image_id = entry['image_id']
caption = entry['caption']
for word in word_tokenize(caption.lower()):
word = lem.lemmatize(word)
if word in self.vocabulary[1].keys():
self.labels[self.imageId2index[image_id], self.word2id(word)] = 1
np.save(label_file_path, self.labels)
def getLabelWeights(self):
return (self.labels == 0).sum(axis = 0) / self.labels.sum(axis = 0)
def decodeCategories(self, labelVector):
return [self.id2word(idx) for idx in np.nonzero(labelVector)[0]]
def id2word(self, idx):
return self.vocabulary[0][idx]
def word2id(self, word):
return self.vocabulary[1][word]
def imageName(self, index):
return self.split + '2014/' + self.imageNames[index]
def __getitem__(self, index):
split_str = self.split if (self.split != 'test') else 'val'
imageName_ = split_str + '2014/' + self.imageNames[index]
image = pil_loader(os.path.join(self.image_dir, imageName_))
if self.transform is not None:
image = self.transform(image)
sample = {'image': image,'labels':torch.Tensor(self.labels[index, :])}
mask = sample['labels'].clone()
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample['mask'] = mask
sample['imageIDs'] = imageName_
return sample
def __len__(self):
return len(self.imageIds)
def numCategories(self):
return len(self.vocabulary[0])
# Path: dataloaders/cub312_dataset.py
class CUBDataset(Dataset):
def __init__(self, img_dir, img_list, image_transform,known_labels=0,attr_group_dict=None,testing=False,n_groups=1):
with open(img_list, "rb" ) as f:
self.labels = pickle.load(f)
self.image_transform = image_transform
self.img_dir = img_dir
self.num_concepts= 112
self.num_labels= 200
# np.random.seed()
self.attr_group_dict = attr_group_dict
known_indices = []
for group in np.random.choice(28, n_groups, replace=False):
known_indices += attr_group_dict[group]
self.group_unk_mask = np.ones(self.num_concepts)
self.group_unk_mask[known_indices] = 0
self.known_labels = known_labels
self.testing=testing
self.epoch = 1
def __getitem__(self, index):
name = self.labels[index]['img_path']
name = name.replace('/juice/scr/scr102/scr/thaonguyen/CUB_supervision/datasets/CUB_200_2011/images/','')
img_path = os.path.join(self.img_dir, name)
image = image_loader(img_path,self.image_transform)
concept = torch.Tensor(self.labels[index]['attribute_label'])
class_label = torch.Tensor([self.labels[index]['class_label']])
concept_certainty = torch.Tensor(self.labels[index]['attribute_certainty'])
unk_mask_indices = get_unk_mask_indices_cub(image,self.testing,self.num_concepts,self.known_labels,np.copy(self.group_unk_mask),self.attr_group_dict,concept_certainty)
mask = concept.clone()
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
class_mask = torch.Tensor(self.num_labels).fill_(-1)
mask = torch.cat((mask,class_mask),0)
sample = {}
sample['image'] = image
sample['labels'] = concept
sample['class_label'] = class_label
sample['concept_certainty'] = concept_certainty
sample['mask'] = mask
sample['imageIDs'] = name
return sample
def __len__(self):
return len(self.labels)
# Path: dataloaders/flair_dataset.py
class FlairDataset(Dataset):
def __init__(self, split, num_labels, data_file, img_root,max_samples=-1,transform=None,known_labels=0,testing=False, label_mapping=None, fine_grained_label_mapping=None):
super(FlairDataset, self).__init__()
# print(data_file)
#self.split_data = h5py.File('/home/liujack/multi_label/C-Tran/data/flair/cent_data.hdf5', 'r')
self.split_data = h5py.File('/media/liujack/flair_hdf5/cent_data.hdf5', 'r')
self.split = split
self.fine_grained_label_mapping = fine_grained_label_mapping
self.label_mapping = label_mapping
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing = testing
self.image_id_list = list(self.split_data[self.split].keys())
def __len__(self):
return len(self.image_id_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# take a sample
image_ID = self.image_id_list[idx]
img = np.array(self.split_data[self.split][image_ID]['image'])
image = self.transform(img)
user_id = np.array(self.split_data[self.split][image_ID]['user_id'])
if self.fine_grained_label_mapping != None:
# fine grained labels are used
labels_str = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
else:
# coarse grained labels are used
labels_str = np.array(self.split_data[self.split][image_ID]['labels'])
assert self.label_mapping != None
# fg_labels = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
# image_ID = self.split_data[idx]['file_name']
# img_name = os.path.join(self.img_root,image_ID + '.jpg')
# image = image_loader(img_name,self.transform)
labels_str = labels_str.tolist()
labels_str = str(labels_str)[2:-1].split('|')
tran_labels = [0] * self.num_labels
if self.fine_grained_label_mapping != None:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.fine_grained_label_mapping[label]))
else:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.label_mapping[label]))
assert tran_labels.count(1) == len(labels_str)
labels = torch.Tensor(tran_labels)
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask = labels.clone()
# perform the random masking 25%
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample = {}
sample['image'] = image
sample['labels'] = labels
sample['mask'] = mask
sample['imageIDs'] = image_ID
return sample
# Path: dataloaders/flair_dataset_fed.py
class FlairFedDataset(Dataset):
def __init__(self, inp_data, split, num_labels, data_file, img_root, curr_user=None, max_samples=-1,transform=None,known_labels=0,testing=False, label_mapping=None, fine_grained_label_mapping=None):
super(FlairFedDataset, self).__init__()
# print(data_file)
#self.split_data = h5py.File('/home/liujack/multi_label/C-Tran/data/flair/cent_data.hdf5', 'r')
self.split_data = inp_data
self.split = split
self.fine_grained_label_mapping = fine_grained_label_mapping
self.label_mapping = label_mapping
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing = testing
self.curr_user = curr_user
self.image_id_list = list(self.split_data[self.split][self.curr_user]['image_ids'])
self.image_list = list(self.split_data[self.split][self.curr_user]['images'])
self.label_list = list(self.split_data[self.split][self.curr_user]['labels'])
self.fg_label_list = list(self.split_data[self.split][self.curr_user]['fine_grained_labels'])
def __len__(self):
return len(self.image_id_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# take a sample
image_ID = self.image_id_list[idx]
# img = np.array(self.split_data[self.split][self.curr_user][image_ID]['image'])
img = self.image_list[idx]
image = self.transform(img)
if self.fine_grained_label_mapping != None:
# fine grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
labels_str = self.fg_label_list[idx]
else:
# coarse grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['labels'])
labels_str = self.label_list[idx]
assert self.label_mapping != None
# fg_labels = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
# image_ID = self.split_data[idx]['file_name']
# img_name = os.path.join(self.img_root,image_ID + '.jpg')
# image = image_loader(img_name,self.transform)
labels_str = labels_str.tolist()
labels_str = str(labels_str)[2:-1].split('|')
tran_labels = [0] * self.num_labels
if self.fine_grained_label_mapping != None:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.fine_grained_label_mapping[label]))
else:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.label_mapping[label]))
assert tran_labels.count(1) == len(labels_str)
labels = torch.Tensor(tran_labels)
unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels)
mask = labels.clone()
# perform the random masking 25%
mask.scatter_(0,torch.Tensor(unk_mask_indices).long() , -1)
sample = {}
sample['image'] = image
sample['labels'] = labels
sample['mask'] = mask
sample['imageIDs'] = image_ID
return sample
# Path: load_data.py
import torch
import numpy as np
import os, random
import json
import h5py
import warnings
from skimage import io, transform
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from dataloaders.voc2007_20 import Voc07Dataset
from dataloaders.vg500_dataset import VGDataset
from dataloaders.coco80_dataset import Coco80Dataset
from dataloaders.news500_dataset import NewsDataset
from dataloaders.coco1000_dataset import Coco1000Dataset
from dataloaders.cub312_dataset import CUBDataset
from dataloaders.flair_dataset import FlairDataset
from dataloaders.flair_dataset_fed import FlairFedDataset
warnings.filterwarnings("ignore")
def get_data(args, curr_user=None):
dataset = args.dataset
data_root = args.dataroot
batch_size = args.batch_size
rescale = args.scale_size
random_crop = args.crop_size
attr_group_dict = args.attr_group_dict
workers = args.workers
n_groups = args.n_groups
normTransform = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
scale_size = rescale
crop_size = random_crop
if args.test_batch_size == -1:
args.test_batch_size = batch_size
trainTransform = transforms.Compose([
transforms.Resize((scale_size, scale_size)),
transforms.Resize((crop_size, crop_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normTransform])
testTransform = transforms.Compose([
transforms.Resize((scale_size, scale_size)),
transforms.CenterCrop(crop_size),
transforms.ToTensor(),
normTransform])
test_dataset = None
test_loader = None
drop_last = False
if dataset == 'coco':
coco_root = os.path.join(data_root,'coco')
ann_dir = os.path.join(coco_root,'annotations_pytorch')
train_img_root = os.path.join(coco_root,'train2014')
test_img_root = os.path.join(coco_root,'val2014')
train_data_name = 'train.data'
val_data_name = 'val_test.data'
# Note: the val_test means the validation set and test set are combined
# 20000 + 20504 = 40504 images
train_dataset = Coco80Dataset(
split='train',
num_labels=args.num_labels,
data_file=os.path.join(coco_root,train_data_name),
img_root=train_img_root,
annotation_dir=ann_dir,
max_samples=args.max_samples,
transform=trainTransform,
known_labels=args.train_known_labels,
testing=False)
valid_dataset = None
valid_loader = None
test_dataset = Coco80Dataset(split='val',
num_labels=args.num_labels,
data_file=os.path.join(coco_root,val_data_name),
img_root=test_img_root,
annotation_dir=ann_dir,
max_samples=args.max_samples,
transform=testTransform,
known_labels=args.test_known_labels,
testing=True)
elif dataset == 'coco1000':
ann_dir = os.path.join(data_root,'coco','annotations_pytorch')
data_dir = os.path.join(data_root,'coco')
train_img_root = os.path.join(data_dir,'train2014')
| test_img_root = os.path.join(data_dir,'val2014') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AgriCodeHub/dairy-django-backend
# Path: core/choices.py
class CowBreedChoices(models.TextChoices):
"""
Enumeration of choices for representing different cow breeds.
Choices:
- `FRIESIAN`: Represents the Friesian cow breed.
- `SAHIWAL`: Represents the Sahiwal cow breed.
- `JERSEY`: Represents the Jersey cow breed.
- `GUERNSEY`: Represents the Guernsey cow breed.
- `CROSSBREED`: Represents a crossbreed of cows.
- `AYRSHIRE`: Represents the Ayrshire cow breed.
Usage:
This enumeration provides predefined choices for the cow breed field in the CowBreed model.
Use these choices when defining or querying CowBreed instances to represent specific cow breeds.
Example:
```
class CowBreed(models.Model):
name = models.CharField(max_length=50, choices=CowBreedChoices.choices)
```
"""
FRIESIAN = "Friesian"
SAHIWAL = "Sahiwal"
JERSEY = "Jersey"
GUERNSEY = "Guernsey"
CROSSBREED = "Crossbreed"
AYRSHIRE = "Ayrshire"
# Path: core/choices.py
class CowAvailabilityChoices(models.TextChoices):
"""
Choices for the availability status of a cow.
Choices:
- `ALIVE`: Cow is alive and active.
- `SOLD`: Cow has been sold.
- `DEAD`: Cow has died.
Usage:
These choices represent the availability status of a cow in the Cow model.
Use these choices when defining or querying Cow instances to represent the current status of a cow.
Example:
```
class Cow(models.Model):
availability_status = models.CharField(max_length=50, choices=CowAvailabilityChoices.choices)
```
"""
ALIVE = "Alive"
SOLD = "Sold"
DEAD = "Dead"
# Path: core/choices.py
class CowPregnancyChoices(models.TextChoices):
"""
Choices for the pregnancy status of a cow.
Choices:
- `OPEN`: Cow is not pregnant.
- `PREGNANT`: Cow is pregnant.
- `CALVED`: Cow has calved.
- `UNAVAILABLE`: Cow cannot have pregnancy status.
Usage:
These choices represent the pregnancy status of a cow in the Cow model.
Use these choices when defining or querying Cow instances to represent the current pregnancy status of a cow.
Example:
```
class Cow(models.Model):
current_pregnancy_status = models.CharField(max_length=15, choices=CowPregnancyChoices.choices)
```
"""
OPEN = "Open"
PREGNANT = "Pregnant"
CALVED = "Calved"
UNAVAILABLE = "Unavailable"
# Path: core/choices.py
class CowCategoryChoices(models.TextChoices):
"""
Choices for the category of a cow.
Choices:
- `CALF`: Represents a calf.
- `WEANER`: Represents a weaner.
- `HEIFER`: Represents a heifer.
- `BULL`: Represents a bull.
- `MILKING_COW`: Represents a milking cow.
Usage:
These choices represent the category of a cow in the Cow model.
Use these choices when defining or querying Cow instances to represent the category of a cow.
Example:
```
class Cow(models.Model):
category = models.CharField(max_length=15, choices=CowCategoryChoices.choices)
```
"""
CALF = "Calf"
WEANER = "Weaner"
HEIFER = "Heifer"
BULL = "Bull"
MILKING_COW = "Milking Cow"
# Path: core/choices.py
class CowProductionStatusChoices(models.TextChoices):
"""
Choices for the production status of a cow.
Choices:
- `OPEN`: Cow is open (not pregnant or lactating).
- `PREGNANT_NOT_LACTATING`: Cow is pregnant but not lactating.
- `PREGNANT_AND_LACTATING`: Cow is pregnant and lactating.
- `DRY`: Cow is dry (not lactating).
- `CULLED`: Cow has been culled.
- `QUARANTINED`: Cow is quarantined.
- `BULL`: Represents a bull.
- `YOUNG_BULL`: Represents a young bull.
- `YOUNG_HEIFER`: Represents a young heifer.
- `MATURE_BULL`: Represents a mature bull.
- `CALF`: Represents a calf.
- `WEANER`: Represents a weaner.
Usage:
These choices represent the production status of a cow in the Cow model.
Use these choices when defining or querying Cow instances to represent the current production status of a cow.
Example:
```
class Cow(models.Model):
current_production_status = models.CharField(max_length=15, choices=CowProductionStatusChoices.choices)
```
"""
OPEN = "Open"
PREGNANT_NOT_LACTATING = "Pregnant not Lactating"
PREGNANT_AND_LACTATING = "Pregnant and Lactating"
DRY = "Dry"
CULLED = "Culled"
QUARANTINED = "Quarantined"
BULL = "Bull"
YOUNG_BULL = "Young Bull"
YOUNG_HEIFER = "Young Heifer"
MATURE_BULL = "Mature Bull"
CALF = "Calf"
WEANER = "Weaner"
# Path: core/models.py
class Cow(models.Model):
"""
Represents an individual cow in the dairy farm.
Attributes:
- `name` (str): The name of the cow.
- `breed` (CowBreed): The breed of the cow.
- `date_of_birth` (date): The birthdate of the cow.
- `gender` (str): The gender of the cow.
- `availability_status` (str): The availability status of the cow.
- `sire` (Cow or None): The sire (father) of the cow.
- `dam` (Cow or None): The dam (mother) of the cow.
- `current_pregnancy_status` (str): The current pregnancy status of the cow.
- `category` (str): The category of the cow.
- `current_production_status` (str): The current production status of the cow.
- `date_introduced_in_farm` (date): The date the cow was introduced to the farm.
- `is_bought` (bool): Indicates whether the cow was bought or not.
- `date_of_death` (date or None): The date of death of the cow, if applicable.
"""
name = models.CharField(max_length=35)
breed = models.ForeignKey(CowBreed, on_delete=models.PROTECT, related_name="cows")
date_of_birth = models.DateField()
gender = models.CharField(max_length=6, choices=SexChoices.choices)
availability_status = models.CharField(
choices=CowAvailabilityChoices.choices,
default=CowAvailabilityChoices.ALIVE,
max_length=5,
)
current_pregnancy_status = models.CharField(
choices=CowPregnancyChoices.choices,
default=CowPregnancyChoices.UNAVAILABLE,
max_length=12,
)
category = models.CharField(
choices=CowCategoryChoices.choices,
default=CowCategoryChoices.CALF,
max_length=11,
)
current_production_status = models.CharField(
choices=CowProductionStatusChoices.choices,
max_length=22,
default=CowProductionStatusChoices.CALF,
)
is_bought = models.BooleanField(default=False)
sire = models.ForeignKey(
"self", on_delete=models.SET_NULL, null=True, related_name="offspring"
)
dam = models.ForeignKey(
"self", on_delete=models.SET_NULL, null=True, related_name="calves"
)
date_introduced_in_farm = models.DateField(auto_now=True)
date_of_death = models.DateField(null=True)
objects = CowManager()
@property
def tag_number(self):
"""
Returns the tag number of the cow.
"""
return Cow.objects.get_tag_number(self)
@property
def age(self):
"""
Calculates and returns the age of the cow in days.
"""
return Cow.objects.calculate_age(self)
@property
def age_in_farm(self):
"""
Calculates and returns the age of the cow in days since introduction to the farm.
"""
return Cow.objects.calculate_age_in_farm(self)
@property
def parity(self):
"""
Calculates and returns the parity of the cow.
"""
return Cow.objects.calculate_parity(self)
@property
def calf_records(self):
return Cow.objects.get_calf_records(self)
def clean(self):
"""
Performs validation checks before saving the cow.
Raises:
- `ValidationError`: If cow validation fails.
"""
if self.pk:
CowValidator.validate_production_status_2(
self.current_production_status,
self.gender,
self.category,
self.age,
self.calf_records,
self.is_bought,
self,
)
CowValidator.validate_age_category(
self.age,
self.category,
self.gender,
self.calf_records,
self.is_bought,
self,
)
else:
CowValidator.validate_pregnancy_status(
self,
self.age,
self.current_pregnancy_status,
self.availability_status,
self.gender,
)
CowValidator.validate_uniqueness(self.name)
CowValidator.validate_cow_age(self.age, self.date_of_birth)
CowValidator.validate_gender_update(self.pk, self.gender)
CowValidator.validate_sire_dam_relationship(self.sire, self.dam)
CowValidator.validate_production_status_1(
self.current_production_status,
self.gender,
self.age,
)
CowValidator.validate_pregnancy_status(
self,
self.age,
self.current_pregnancy_status,
self.availability_status,
self.gender,
)
CowValidator.validate_date_of_death(
self.availability_status, self.date_of_death
)
def __str__(self):
"""
Returns a string representation of the cow.
"""
return self.tag_number
def save(self, *args, **kwargs):
"""
Overrides the save method to ensure validation before saving.
"""
self.clean()
super().save(*args, **kwargs)
# Path: core/serializers.py
class CowSerializer(serializers.ModelSerializer):
"""
Serializer for the Cow model.
Fields:
- `breed`: A nested serializer field representing the cow breed, using CowBreedSerializer.
- `tag_number`: A read-only field representing the cow's tag number.
- `parity`: A read-only field representing the cow's parity.
- `age`: A read-only field representing the cow's age in days.
- `age_in_farm`: A read-only field representing the cow's age in days since introduction to the farm.
- And more...
Meta:
- `model`: The Cow model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Usage:
Use this serializer to convert Cow model instances to JSON representations
and vice versa. It includes nested serialization for the 'breed' field and
read-only fields for additional information such as tag number and age.
Methods:
- `create(validated_data)`: Overrides the default create method to handle nested serialization for the 'breed' field.
- `update(instance, validated_data)`: Overrides the default update method to exclude certain fields from updating.
Example:
```
class Cow(models.Model):
breed = models.ForeignKey(CowBreed, on_delete=models.CASCADE)
tag_number = models.CharField(max_length=20)
parity = models.IntegerField()
age = models.IntegerField()
age_in_farm = models.IntegerField()
class CowSerializer(serializers.ModelSerializer):
breed = CowBreedSerializer()
tag_number = serializers.ReadOnlyField()
parity = serializers.ReadOnlyField()
age = serializers.ReadOnlyField()
age_in_farm = serializers.ReadOnlyField()
class Meta:
model = Cow
fields = "__all__"
```
"""
breed = CowBreedSerializer()
tag_number = serializers.ReadOnlyField()
parity = serializers.ReadOnlyField()
age = serializers.ReadOnlyField()
age_in_farm = serializers.ReadOnlyField()
class Meta:
model = Cow
fields = "__all__"
def create(self, validated_data):
breed_data = validated_data.pop("breed")
breed, _ = CowBreed.objects.get_or_create(**breed_data)
cow = Cow.objects.create(breed=breed, **validated_data)
return cow
def update(self, instance, validated_data):
fields_to_exclude = [
"breed",
"gender",
"sire",
"dam",
"is_bought",
"date_introduced_in_farm",
]
for field in fields_to_exclude:
validated_data.pop(field, None)
return super().update(instance, validated_data)
# Path: core/utils.py
# Path: reproduction/choices.py
class PregnancyOutcomeChoices(models.TextChoices):
"""
Choices for the outcome of a cow's pregnancy.
Choices:
- `LIVE`: Live birth.
- `STILLBORN`: Stillborn birth.
- `MISCARRIAGE`: Miscarriage.
Usage:
These choices represent the outcome of a cow's pregnancy in the Pregnancy model.
Use these choices when defining or querying Pregnancy instances to represent the outcome of a cow's pregnancy.
Example:
```
class Pregnancy(models.Model):
pregnancy_outcome = models.CharField(
max_length=11, choices=PregnancyOutcomeChoices.choices, null=True
)
```
"""
LIVE = "Live"
STILLBORN = "Stillborn"
MISCARRIAGE = "Miscarriage"
# Path: reproduction/choices.py
class PregnancyStatusChoices(models.TextChoices):
"""
Choices for the pregnancy status of a cow.
Choices:
- `CONFIRMED`: Pregnancy is confirmed.
- `UNCONFIRMED`: Pregnancy is unconfirmed.
- `FAILED`: Pregnancy has failed.
Usage:
These choices represent the pregnancy status in the Pregnancy model.
Use these choices when defining or querying Pregnancy instances to represent the status of a cow's pregnancy.
Example:
```
class Pregnancy(models.Model):
pregnancy_status = models.CharField(
max_length=11,
choices=PregnancyStatusChoices.choices,
default=PregnancyStatusChoices.UNCONFIRMED,
)
```
"""
CONFIRMED = "Confirmed"
UNCONFIRMED = "Unconfirmed"
FAILED = "Failed"
# Path: reproduction/serializers.py
class PregnancySerializer(serializers.ModelSerializer):
"""
Serializer for the Pregnancy model.
Fields:
- `id`: A read-only field representing the unique identifier of the pregnancy.
- `cow`: A nested serializer field representing the cow associated with the pregnancy.
- `start_date`: A date field representing the start date of the pregnancy.
- `date_of_calving`: A date field representing the date of calving.
- `pregnancy_status`: A choice field representing the status of the pregnancy.
- `pregnancy_notes`: A text field representing notes related to the pregnancy.
- `calving_notes`: A text field representing notes related to calving.
- `pregnancy_scan_date`: A date field representing the date of pregnancy scanning.
- `pregnancy_failed_date`: A date field representing the date when the pregnancy failed.
- `pregnancy_outcome`: A choice field representing the outcome of the pregnancy.
Meta:
- `model`: The Pregnancy model for which the serializer is defined.
- `fields`: The fields to include in the serialized representation.
Usage:
Use this serializer to convert Pregnancy model instances to JSON representations
and vice versa. It includes read-only fields for additional information such as
pregnancy duration and due date.
Example:
```
class Pregnancy(models.Model):
cow = models.ForeignKey(Cow, on_delete=models.CASCADE)
start_date = models.DateField()
date_of_calving = models.DateField()
pregnancy_status = models.CharField(max_length=50, choices=PregnancyStatusChoices.choices)
pregnancy_notes = models.TextField()
calving_notes = models.TextField()
pregnancy_scan_date = models.DateField()
pregnancy_failed_date = models.DateField()
pregnancy_outcome = models.CharField(max_length=50, choices=PregnancyOutcomeChoices.choices)
class PregnancySerializer(serializers.ModelSerializer):
due_date = serializers.ReadOnlyField()
pregnancy_duration = serializers.ReadOnlyField()
class Meta:
model = Pregnancy
fields = ("id", "cow", "start_date", "date_of_calving", "pregnancy_status", "pregnancy_notes",
"calving_notes", "pregnancy_scan_date", "pregnancy_failed_date", "pregnancy_outcome",
"pregnancy_duration", "due_date")
```
"""
class Meta:
model = Pregnancy
fields = (
"id",
"cow",
"start_date",
"date_of_calving",
"pregnancy_status",
"pregnancy_notes",
"calving_notes",
"pregnancy_scan_date",
"pregnancy_failed_date",
"pregnancy_outcome",
"pregnancy_duration",
"due_date",
)
# Path: users/choices.py
class SexChoices(models.TextChoices):
MALE = "Male"
FEMALE = "Female"
# Path: tests/production/tests/conftest.py
from datetime import timedelta
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.choices import (
CowBreedChoices,
CowAvailabilityChoices,
CowPregnancyChoices,
CowCategoryChoices,
CowProductionStatusChoices,
)
from core.models import Cow
from core.serializers import CowSerializer
from core.utils import todays_date
from reproduction.choices import PregnancyOutcomeChoices, PregnancyStatusChoices
from reproduction.serializers import PregnancySerializer
from users.choices import SexChoices
import pytest
@pytest.fixture()
@pytest.mark.django_db
def setup_users():
client = APIClient()
# Create farm owner user
farm_owner_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Owner",
"phone_number": "+254787654321",
"sex": SexChoices.MALE,
"is_farm_owner": True,
}
farm_owner_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_owner_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_owner_login_data)
farm_owner_token = response.data["auth_token"]
# Create farm manager user
farm_manager_data = {
"username": "[email protected]",
"email": "[email protected]",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Manager",
"phone_number": "+254755555555",
"sex": SexChoices.MALE,
"is_farm_manager": True,
}
farm_manager_login_data = {
"username": "[email protected]",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_manager_login_data)
farm_manager_token = response.data["auth_token"]
# Create assistant farm manager user
| asst_farm_manager_data = { |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: facebookresearch/chat2map-official
# Path: chat2map/mapping/mapping_models/visual_cnn.py
class VisualEnc(nn.Module):
"""Visual encoder"""
def __init__(self, cfg=None):
"""Takes in RGB images and 90 degree FoV local egocentric map inputs and encodes them"""
super().__init__()
passive_mapping_cfg = cfg.PassiveMapping
sim_cfg = cfg.TASK_CONFIG.SIMULATOR
assert "RGB_SENSOR" in cfg.SENSORS
self._n_inputMap_channels = sim_cfg.EGO_LOCAL_OCC_MAP.NUM_CHANNELS
self._num_out_channels = passive_mapping_cfg.VisualEnc.num_out_channels
assert passive_mapping_cfg.MemoryNet.Transformer.input_size == 2 * self._num_out_channels
cnn_layers = [
conv_block(self._n_inputMap_channels, 64, norm_layer=nn.BatchNorm2d),
conv_block(64, 64, norm_layer= nn.BatchNorm2d),
conv_block(64, 128, padding=(2, 2), norm_layer=nn.BatchNorm2d),
conv_block(128, 256, (3, 3), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),
conv_block(256, self._num_out_channels, (3, 3), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d)
]
self.cnn = nn.Sequential(*cnn_layers)
for module in self.cnn:
for layer in module:
if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):
nn.init.kaiming_normal_(
layer.weight, nn.init.calculate_gain("leaky_relu", 0.2)
)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):
if layer.affine:
layer.weight.data.fill_(1)
layer.bias.data.zero_()
rgb_cnn_layers = [
conv_block(3, 64, norm_layer=nn.BatchNorm2d),
conv_block(64, 64, norm_layer=nn.BatchNorm2d),
conv_block(64, 128, norm_layer=nn.BatchNorm2d),
conv_block(128, 256, norm_layer=nn.BatchNorm2d),
conv_block(256, self._num_out_channels, norm_layer=nn.BatchNorm2d),
]
self.rgb_cnn = nn.Sequential(*rgb_cnn_layers)
for module in self.rgb_cnn:
for layer in module:
if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):
nn.init.kaiming_normal_(
layer.weight, nn.init.calculate_gain("leaky_relu", 0.2)
)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):
if layer.affine:
layer.weight.data.fill_(1)
layer.bias.data.zero_()
@property
def is_blind(self):
return False
@property
def n_out_feats(self):
return 16 * 512
def _preprocess_rgb(self, rgb_observations):
return rgb_observations
def forward(self, observations,):
"""Given RGB imags and 90 degree FoV egocentric local occupancy maps, produces visual features"""
assert "occ_map" in observations
occMap_observations = observations["occ_map"]
occMap_observations = occMap_observations.permute(0, 3, 1, 2)
occMap_out = self.cnn(occMap_observations)
assert "rgb" in observations
rgb_observations = observations["rgb"]
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
rgb_observations = rgb_observations.permute(0, 3, 1, 2)
rgb_observations = rgb_observations.float() / 255.0 # normalize RGB
rgb_observations = self._preprocess_rgb(rgb_observations)
rgb_out = self.rgb_cnn(rgb_observations)
out = torch.cat([occMap_out, rgb_out], dim=1)
return out
# Path: chat2map/mapping/mapping_models/visual_cnn.py
class OccMapDec(nn.Module):
"""Occupancy map decoder"""
def __init__(self, passive_mapping_cfg, sim_cfg,):
"""Takes in feature outputs of the transformer decoder and predicts estimates of 360 degree FoV local
egocentric occupancy map targets"""
super().__init__()
self._passive_mapping_cfg = passive_mapping_cfg
self._glob_can_occ_map_ego_crop_cfg = sim_cfg.GT_GLOBAL_CANONICAL_OCC_MAP_EGO_CROP
assert self._glob_can_occ_map_ego_crop_cfg.SIZE in [64, 80, 96, 128]
assert passive_mapping_cfg.MemoryNet.type == "transformer"
assert passive_mapping_cfg.MemoryNet.Transformer.decoder_out_size == 1024
self._n_inputMapFeat_channels = 1024
self._inputFeat_h = 4
self._inputFeat_w = 4
self._input_feat_size = self._n_inputMapFeat_channels * self._inputFeat_h * self._inputFeat_w
if self._glob_can_occ_map_ego_crop_cfg.SIZE == 64:
self.dec_cnn = nn.Sequential(
convT_block(1024, 64 * 8, norm_layer=nn.BatchNorm2d),
convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),
convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),
convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),
convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),
padding=(1, 1), outermost=True, use_sigmoid=True,),
)
elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 80:
self.dec_cnn = nn.Sequential(
conv_block(1024, 64 * 8, kernel_size=(2, 2), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),
convT_block(64 * 8, 64 * 8, norm_layer=nn.BatchNorm2d),
convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),
convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),
convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),
convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),
padding=(1, 1), outermost=True, use_sigmoid=True,),
)
elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 96:
self.dec_cnn = nn.Sequential(
conv_block(1024, 64 * 8, kernel_size=(1, 1), padding=(1, 1), stride=(1, 1), norm_layer=nn.BatchNorm2d),
convT_block(64 * 8, 64 * 8, norm_layer=nn.BatchNorm2d),
convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),
convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),
convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),
convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS, (3, 3), stride=(1, 1),
padding=(1, 1), outermost=True, use_sigmoid=True,),
)
elif self._glob_can_occ_map_ego_crop_cfg.SIZE == 128:
self.dec_cnn = nn.Sequential(
convT_block(1024, 64 * 8, norm_layer=nn.BatchNorm2d),
convT_block(64 * 8, 64 * 4, norm_layer=nn.BatchNorm2d),
convT_block(64 * 4, 64 * 2, norm_layer=nn.BatchNorm2d),
convT_block(64 * 2, 64 * 1, norm_layer=nn.BatchNorm2d),
convT_block(64 * 1, self._glob_can_occ_map_ego_crop_cfg.NUM_CHANNELS,
outermost=True, use_sigmoid=True,),
)
else:
raise NotImplementedError
self.layer_init()
def layer_init(self):
for module in self.dec_cnn:
for layer in module:
if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):
nn.init.kaiming_normal_(
layer.weight, nn.init.calculate_gain("relu")
)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):
if layer.affine:
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def forward(self, observations,):
"""Given feature outputs of the transformer memory decoder, computes estimates of the 360 degree FoV local
egocentric target occupancy maps"""
assert "memory_outFeats" in observations
memory_outFeats = observations["memory_outFeats"]
assert len(memory_outFeats.size()) == 2
assert memory_outFeats.size(1) == self._input_feat_size
memory_outFeats =\
memory_outFeats.reshape((memory_outFeats.size(0),
self._inputFeat_h,
self._inputFeat_w,
-1))
memory_outFeats = memory_outFeats.permute((0, 3, 1, 2))
out = self.dec_cnn(memory_outFeats)
assert len(out.size()) == 4
# permute tensor to dimension [BATCH x HEIGHT x WIDTH x CHANNEL]
out = out.permute(0, 2, 3, 1)
return out
# Path: chat2map/mapping/mapping_models/audio_cnn.py
class AudioEnc(nn.Module):
"""Audio encoder"""
def __init__(self, cfg,):
"""Transforms the spatial audio into spectrograms and computes their features"""
super().__init__()
self._passive_mapping_cfg = cfg.PassiveMapping
self._task_cfg = cfg.TASK_CONFIG
self._env_cfg = self._task_cfg.ENVIRONMENT
self._sim_cfg = self._task_cfg.SIMULATOR
self._audio_cfg = self._sim_cfg.AUDIO
audioEnc_cfg = self._passive_mapping_cfg.AudioEnc
self._n_input_channels = audioEnc_cfg.num_input_channels
self.stft_model = torchaudio.transforms.Spectrogram(
n_fft=self._audio_cfg.N_FFT,
win_length=self._audio_cfg.WIN_LENGTH,
hop_length=self._audio_cfg.HOP_LENGTH,
power=2,
)
self.model = nn.Sequential(
conv_block(self._n_input_channels, 64, norm_layer=nn.BatchNorm2d),
conv_block(64, 64, (8, 8), stride=(4, 4), padding=(2, 2), norm_layer=nn.BatchNorm2d),
conv_block(64, 128, norm_layer=nn.BatchNorm2d),
conv_block(128, 256, norm_layer=nn.BatchNorm2d),
conv_block(256, self._passive_mapping_cfg.MemoryNet.Transformer.input_size, norm_layer=nn.BatchNorm2d),
)
for module in self.model:
for layer in module:
if isinstance(layer, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):
nn.init.kaiming_normal_(
layer.weight, nn.init.calculate_gain("leaky_relu", 0.2)
)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
elif isinstance(layer, (nn.BatchNorm1d, nn.BatchNorm2d)):
if layer.affine:
layer.weight.data.fill_(1)
layer.bias.data.zero_()
@property
def n_out_feats(self):
return 1024
def forward(self, observations):
"""Given the audio waveforms, transforms them into spectrograms and computes their features"""
assert "audio" in observations
audio_wavs = observations["audio"]
audio_wavs = audio_wavs.permute(0, 2, 1)
B = audio_wavs.size(0)
n_channels = audio_wavs.size(1)
audio_mag_spects = self.stft_model(audio_wavs.reshape(audio_wavs.size(0) * audio_wavs.size(1), -1)).pow(0.5)
audio_mag_spects = audio_mag_spects.reshape(B, n_channels, *audio_mag_spects.size()[1:])
out = self.model(audio_mag_spects)
assert out.size(2) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[0]
assert out.size(3) == self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]
return out
# Path: chat2map/mapping/mapping_models/modality_tag_type_net.py
class ModalityTagTypeNet(nn.Module):
"""Takes the modality type tag for a certain modality and produces its embeddings"""
def __init__(self, n_modality_tag_types, passive_mapping_cfg,):
"""
Creates an instance of the class that takes the modality type tag for a certain modality and produces its
embeddings
:param n_modality_tag_types: number of modality tag types
:param passive_mapping_cfg: passive mapping config
"""
super().__init__()
self._positional_net_cfg = passive_mapping_cfg.PositionalNet
self._out_h = self._positional_net_cfg.patch_hwCh[0]
self._out_w = self._positional_net_cfg.patch_hwCh[1]
self._n_out_ch = self._positional_net_cfg.patch_hwCh[2]
assert self._n_out_ch == passive_mapping_cfg.modality_tag_type_encoding_size, print(self._n_out_ch,
passive_mapping_cfg.modality_tag_type_encoding_size)
self.modality_tag_type_lookup_dict = nn.Embedding(n_modality_tag_types,
passive_mapping_cfg.modality_tag_type_encoding_size,)
def forward(self, x):
"""Given the modality type tag, computes the modality embeddings"""
out = self.modality_tag_type_lookup_dict(x)
out = out.unsqueeze(-1).unsqueeze(-1)
out = out.repeat((1, 1, self._out_h, self._out_w))
return out
# Path: chat2map/mapping/mapping_models/positional_net.py
class PositionalNet(nn.Module):
"""
Takes in positional attributes and produces and produces their embeddings
"""
def __init__(self, passive_mapping_cfg,):
"""
Creates an instance of the class to take in positional attributes and produces and produces their embeddings
:param passive_mapping_cfg: passive mapping config
"""
super().__init__()
self._passive_mapping_cfg = passive_mapping_cfg
self._positional_net_cfg = passive_mapping_cfg.PositionalNet
self._n_positional_obs = 5
# source: 1. https://github.com/jalammar/jalammar.github.io/blob/master/notebookes/transformer/transformer_positional_encoding_graph.ipynb
# 2. https://towardsdatascience.com/master-positional-encoding-part-i-63c05d90a0c3
self._freqs = MIN_FREQ ** (2 * (torch.arange(self._positional_net_cfg.num_freqs_for_sinusoidal,
dtype=torch.float32) // 2) /
self._positional_net_cfg.num_freqs_for_sinusoidal)
assert passive_mapping_cfg.MemoryNet.Transformer.input_size == self._positional_net_cfg.patch_hwCh[2]
self._n_out_feats = self._positional_net_cfg.patch_hwCh[2]
self._positional_linear = nn.Sequential(
nn.Linear(self._positional_net_cfg.num_freqs_for_sinusoidal * self._n_positional_obs,
self._n_out_feats,
bias=False),
)
@property
def n_out_feats(self):
return self._n_out_feats
def forward(self, observations):
"""given the positional observations, computes the positional embeddings"""
positional_obs = observations["positional_obs"]
assert len(positional_obs.size()) == 2
assert positional_obs.size(-1) == self._n_positional_obs
freqs = self._freqs.unsqueeze(0).repeat((positional_obs.size(0), 1)).to(positional_obs.device)
positional_net_out = []
for positional_obs_idx in range(self._n_positional_obs):
positional_obs_thisIdx = positional_obs[:, positional_obs_idx].unsqueeze(-1)
positional_obs_thisIdx = positional_obs_thisIdx * freqs
positional_obs_thisIdxClone = positional_obs_thisIdx.clone()
positional_obs_thisIdxClone[..., ::2] = torch.cos(positional_obs_thisIdx[..., ::2])
positional_obs_thisIdxClone[..., 1::2] = torch.sin(positional_obs_thisIdx[..., 1::2])
positional_net_out.append(positional_obs_thisIdxClone)
positional_net_out = torch.cat(positional_net_out, dim=-1)
assert len(positional_net_out.size()) == 2
assert positional_net_out.size(0) == positional_obs.size(0)
assert positional_net_out.size(1) == (self._freqs.size(0) * self._n_positional_obs)
positional_net_out = self._positional_linear(positional_net_out)
positional_net_out = positional_net_out.unsqueeze(-1).unsqueeze(-1)
positional_net_out = positional_net_out.repeat(
(1,
1,
self._positional_net_cfg.patch_hwCh[0],
self._positional_net_cfg.patch_hwCh[1])
)
return positional_net_out
# Path: chat2map/mapping/mapping_models/positional_net.py
class PatchPositionalNet(nn.Module):
"""Takes in the positions of the feats corresponding to contiguous patches in an image or an audio spectrogram
in the rasterized order and produces their embeddings"""
def __init__(self, passive_mapping_cfg,):
"""
Creates an instance of the class that takes in the positions of the feats corresponding to contiguous patches
in an image or an audio spectrogram in the rasterized order and produces their embeddings
:param passive_mapping_cfg: passive mapping config
"""
super().__init__()
self._passive_mapping_cfg = passive_mapping_cfg
self._positional_net_cfg = passive_mapping_cfg.PositionalNet
self._n_positional_obs = 1
self._n_out_feats = self._positional_net_cfg.patch_hwCh[2]
# source: 1. https://github.com/jalammar/jalammar.github.io/blob/master/notebookes/transformer/transformer_positional_encoding_graph.ipynb
# 2. https://towardsdatascience.com/master-positional-encoding-part-i-63c05d90a0c3
self._freqs = MIN_FREQ ** (2 * (torch.arange(self._positional_net_cfg.num_freqs_for_sinusoidal,
dtype=torch.float32) // 2) /
self._positional_net_cfg.num_freqs_for_sinusoidal)
self._patch_positional_conv = nn.Sequential(
nn.Conv2d(self._positional_net_cfg.num_freqs_for_sinusoidal *self._n_positional_obs,
self._n_out_feats,
kernel_size=1,
bias=False),
)
positional_net_out = []
for i in range(self._positional_net_cfg.patch_hwCh[0]):
positional_net_out_thisRow = []
for j in range(self._positional_net_cfg.patch_hwCh[1]):
raster_idx = i * self._positional_net_cfg.patch_hwCh[1] + j
positional_obs_thisIdx = raster_idx * self._freqs
positional_obs_thisIdxClone = positional_obs_thisIdx.clone()
positional_obs_thisIdxClone[..., ::2] = torch.cos(positional_obs_thisIdxClone[..., ::2])
positional_obs_thisIdxClone[..., 1::2] = torch.sin(positional_obs_thisIdxClone[..., 1::2])
positional_net_out_thisRow.append(positional_obs_thisIdxClone)
positional_net_out.append(torch.stack(positional_net_out_thisRow, dim=0))
positional_net_out = torch.stack(positional_net_out, dim=0).permute((2, 0, 1))
self._positional_net_out = positional_net_out
assert self._n_out_feats == passive_mapping_cfg.MemoryNet.Transformer.input_size
@property
def n_out_feats(self):
return self._n_out_feats
def forward(self, observations):
positional_obs = observations["positional_obs"]
positional_net_out = self._positional_net_out.unsqueeze(0).repeat((positional_obs.size(0), 1, 1, 1))\
.to(positional_obs.device)
positional_net_out = self._patch_positional_conv(positional_net_out)
return positional_net_out
# Path: chat2map/mapping/mapping_models/fusion_net.py
class FusionNet(nn.Module):
"""Network to fuse modality features, positional embeddings and modality type tag embeddings"""
def __init__(self,):
super().__init__()
def forward(self, observations):
"""fuses given different features"""
for observation_idx, observation in enumerate(observations):
if observation_idx == 0:
out = observation
else:
out = out + observation
return out
# Path: chat2map/mapping/mapping_models/memory_net.py
class TransformerMemory(nn.Module):
"""Transformer memory"""
def __init__(self, cfg):
"""Creates an instance of the transformer memory"""
super().__init__()
self._cfg = cfg
self._passive_mapping_cfg = cfg.PassiveMapping
self._transformer_cfg = self._passive_mapping_cfg.MemoryNet.Transformer
self._task_cfg = cfg.TASK_CONFIG
self._env_cfg = self._task_cfg.ENVIRONMENT
self._sim_cfg = self._task_cfg.SIMULATOR
self.transformer = TransformerWoSelfAttnInDecoder(
d_model=self._transformer_cfg.input_size,
nhead=self._transformer_cfg.nhead,
num_encoder_layers=self._transformer_cfg.num_encoder_layers,
num_decoder_layers=self._transformer_cfg.num_decoder_layers,
dim_feedforward=self._transformer_cfg.hidden_size,
dropout=self._transformer_cfg.dropout,
activation=self._transformer_cfg.activation,
d_model_out=self._transformer_cfg.decoder_out_size,
)
context_length_multiplier = 3
context_length_multiplier *= self._sim_cfg.ALL_AGENTS.NUM
context_length_multiplier *= (self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1])
query_length_multiplier = self._passive_mapping_cfg.PositionalNet.patch_hwCh[0] *\
self._passive_mapping_cfg.PositionalNet.patch_hwCh[1]
self._src_mask = self._convert_attn_masks_to_transformer_format(
torch.ones((self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,
self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,))
)
self._mem_mask = self._convert_attn_masks_to_transformer_format(
torch.ones((self._env_cfg.MAX_QUERY_LENGTH * query_length_multiplier,
self._env_cfg.MAX_CONTEXT_LENGTH * context_length_multiplier,))
)
self._tgt_mask = self._convert_attn_masks_to_transformer_format(
torch.eye(self._env_cfg.MAX_QUERY_LENGTH * query_length_multiplier)
)
def _convert_key_padding_masks_to_transformer_format(self, key_padding_masks):
r"""The key_padding_masks is a FloatTensor with
- 0 for invalid locations, and
- 1 for valid locations.
The required format is a BoolTensor with
- True for invalid locations, and
- False for valid locations
source:
- https://pytorch.org/docs/1.4.0/_modules/torch/nn/modules/transformer.html#TransformerDecoder
- https://discuss.pytorch.org/t/how-to-add-padding-mask-to-nn-transformerencoder-module/63390/3
"""
return (1 - key_padding_masks) > 0
def _convert_attn_masks_to_transformer_format(self, attn_masks):
r"""The attn_masks is a FloatTensor with
- 0 for invalid locations, and
- 1 for valid locations.
The required format is a FloatTensor with
- float('-inf') for invalid locations, and
- 0. for valid locations
source:
- https://pytorch.org/docs/1.4.0/_modules/torch/nn/modules/transformer.html#TransformerDecoder
- https://discuss.pytorch.org/t/how-to-add-padding-mask-to-nn-transformerencoder-module/63390/3
"""
return attn_masks.float().masked_fill(attn_masks == 0, float('-inf')).masked_fill(attn_masks == 1, float(0.0))
def forward(self, observations):
"""computes transformer memory features given observations"""
assert "src_feats" in observations
src_feats = observations["src_feats"]
assert "tgt_feats" in observations
tgt_feats = observations["tgt_feats"]
"""how masks works -- source: https://github.com/pytorch/pytorch/blob/7f73f1d591afba823daa4a99a939217fb54d7688/torch/nn/functional.py#L3360"""
assert "src_key_padding_mask" in observations
src_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations["src_key_padding_mask"])
assert "tgt_key_padding_mask" in observations
tgt_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations["tgt_key_padding_mask"])
assert "memory_key_padding_mask" in observations
memory_key_padding_mask = self._convert_key_padding_masks_to_transformer_format(observations["memory_key_padding_mask"])
self._src_mask = self._src_mask.to(src_feats.device)
self._mem_mask = self._mem_mask.to(memory_key_padding_mask.device)
self._tgt_mask = self._tgt_mask.to(tgt_feats.device)
out = self.transformer(
src_feats,
tgt_feats,
src_mask=self._src_mask,
tgt_mask=self._tgt_mask,
memory_mask=self._mem_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
)
return out
# Path: chat2map/mapping/passive_mapping/policy.py
import os
import pickle
import math
import numpy as np
import torch
import torch.nn as nn
from torchsummary import summary
from chat2map.mapping.mapping_models.visual_cnn import VisualEnc, OccMapDec
from chat2map.mapping.mapping_models.audio_cnn import AudioEnc
from chat2map.mapping.mapping_models.modality_tag_type_net import ModalityTagTypeNet
from chat2map.mapping.mapping_models.positional_net import PositionalNet, PatchPositionalNet
from chat2map.mapping.mapping_models.fusion_net import FusionNet
from chat2map.mapping.mapping_models.memory_net import TransformerMemory
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Policy(nn.Module):
"""
Parent class of model for passive mapping
"""
def __init__(self,
context_views_enc,
context_audio_enc,
pose_net,
patchPose_net,
modality_tag_type_lookup_dict,
fusion_net,
memory_net,
query_occMap_dec,
cfg
):
"""Given the audio streams and sampled frames during a conversation, the model predicts estimates of target
occupancy maps"""
super().__init__()
self.context_views_enc = context_views_enc
self.context_audio_enc = context_audio_enc
self.pose_net = pose_net
| self.patchPose_net = patchPose_net |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wrongbad/badcad
# Path: badcad/utils.py
def display(thing,
vscode_fix=True,
wireframe=False,
color='#aaaa22',
smoothing_threshold=-1,
width=640,
height=640,
):
if vscode_fix:
fix_vscode_style()
if isinstance(thing, (tuple, list)):
verts, tris = thing
elif hasattr(thing, 'to_mesh'):
m = thing.to_mesh()
verts = m.vert_properties[...,:3].astype(np.float32)
tris = m.tri_verts.astype(np.uint32)
else:
raise ValueError(f'unsupported thing: {type(thing)}')
box0 = np.min(verts, axis=0)
box1 = np.max(verts, axis=0)
sz = np.linalg.norm(box1-box0)
mid = (box0+box1)/2
verts = verts - mid
tnormals = triangle_normals(verts, tris)
vnormals = smooth_normals(tris, tnormals, smoothing_threshold)
verts = verts[tris]
index = np.arange(tris.size, dtype=np.uint32)
geometry = pythreejs.BufferGeometry(
attributes = dict(
position = pythreejs.BufferAttribute(verts),
normal = pythreejs.BufferAttribute(vnormals),
),
index = pythreejs.BufferAttribute(index)
)
material = pythreejs.MeshPhysicalMaterial(
color = color,
reflectivity = 0.2,
clearCoat = 0.6,
clearCoatRoughness = 0.7,
wireframe = wireframe,
);
threemesh = pythreejs.Mesh(geometry, material)
lights = [
pythreejs.DirectionalLight(
color='white',
position=l[:3],
intensity=l[3],
)
for l in [
(-40, 5, 40, 0.5),
(0, 0, 40, 0.2),
(20, 5, -20, 0.1),
]
]
camera = pythreejs.PerspectiveCamera(
position=[0, 0, sz*1.3],
up=[0, 1, 0],
children=lights,
)
controls = pythreejs.OrbitControls(
controlling=camera,
rotateSpeed=1.0,
zoomSpeed=0.5,
enableZoom=False, # avoid notbook scroll conflict
)
scene = pythreejs.Scene(
children=[
threemesh,
camera,
pythreejs.AmbientLight(color='#aaf')
],
background=None,
)
return pythreejs.Renderer(
camera=camera,
scene=scene,
alpha=True,
clearOpacity=0.2,
controls=[controls],
width=width,
height=height,
)
# Path: badcad/utils.py
def triangle_normals(verts, tris):
a = verts[tris[:,1]] - verts[tris[:,0]]
b = verts[tris[:,2]] - verts[tris[:,1]]
tnormals = np.cross(a, b)
tnormals /= np.linalg.norm(tnormals, axis=-1, keepdims=True)
return tnormals
# Path: badcad/utils.py
def polygon_nearest_alignment(va, vb):
dist = lambda x: np.sum(x ** 2, axis=-1)
j0 = np.argmin(dist(vb - va[0]))
i, j = 0, j0
na, nb = len(va), len(vb)
out = []
while True:
ip1, jp1 = (i+1)%na, (j+1)%nb
d0 = dist(va[ip1] - vb[j])
d1 = dist(va[i] - vb[jp1])
if d0 < d1:
out += [[ip1, j]]
i = ip1
else:
out += [[i, jp1]]
j = jp1
if (i,j) == (0, j0):
break
return out
# Path: badcad/utils.py
def svg2polygons(svg, fn=8):
import svgelements
# this lib handles transforms and `use` tags
svg = svgelements.SVG.parse(BytesIO(svg))
polys = []
for e in svg.elements():
if isinstance(e, svgelements.Path):
# TODO policy for unclosed paths
p = PolyPath(fn=fn)
for s in e.segments():
if isinstance(s, svgelements.Move):
p.move(s.end)
elif isinstance(s, svgelements.Line):
p.line(s.end)
elif isinstance(s, svgelements.QuadraticBezier):
p.bez([s.control1, s.end])
elif isinstance(s, svgelements.CubicBezier):
p.bez([s.control1, s.control2, s.end])
elif isinstance(s, svgelements.Close):
p.close()
else:
raise ValueError(f'unsupported segment: {type(s)}')
polys += p.polys
return polys
# Path: badcad/utils.py
def text2svg(text, size=10, font="Helvetica"):
import cairo
memfile = BytesIO()
with cairo.SVGSurface(memfile, size, size) as surface:
ctx = cairo.Context(surface)
ctx.set_font_size(size)
ctx.select_font_face(font,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
ctx.show_text(text)
return memfile.getvalue()
# Path: badcad/utils.py
class PolyPath:
def __init__(self, fn=32):
self.polys = []
self.poly = []
self.pos = (0,0)
self.fn = fn
def move(self, p):
self.pos = p
return self
def line(self, p):
if len(self.poly) == 0:
self.poly += [self.pos]
self.poly += [p]
self.pos = p
return self
def bez(self, pts, fn=0):
if len(self.poly) == 0:
self.poly += [self.pos]
fn = fn or self.fn
vs = [p[0]+p[1]*1j for p in [self.pos, *pts]]
for i in range(1, fn):
n = len(vs) - 1
t = i / fn
u = 1 - t
c = u ** n
v = 0
for j in range(len(vs)):
v += c * vs[j]
c *= t * (n-j) / (u * (1+j))
self.poly += [(v.real, v.imag)]
self.poly += [pts[-1]]
self.pos = pts[-1]
return self
def close(self):
self.polys += [self.poly]
self.poly = []
# Path: badcad/badcad.py
import manifold3d
import numpy as np
from manifold3d import Manifold, CrossSection
from .utils import (
display,
triangle_normals,
polygon_nearest_alignment,
svg2polygons,
text2svg,
PolyPath
)
with open(fname, 'wb') as f:
f.write(binary)
return self
else:
return binary
class Shape:
def __init__(self, cross_section = CrossSection()):
self.cross_section = cross_section
def _repr_mimebundle_(self, **kwargs):
# called by jupyter to figure out how to display this object
# we create a scene on the fly with ability to customize
# controls and lights, etc.
return self.extrude(1e-9)._repr_mimebundle_(**kwargs)
def __add__(self, other):
return Shape(self.cross_section + other.cross_section)
def __sub__(self, other):
return Shape(self.cross_section - other.cross_section)
def __and__(self, other):
# manifold3d XOR is actually AND
return Shape(self.cross_section ^ other.cross_section)
def area(self):
return self.cross_section.area()
def bounds(self):
return self.cross_section.bounds()
def align(self,
xmin=None, x=None, xmax=None,
ymin=None, y=None, ymax=None):
x0, y0, x1, y1 = self.bounds()
dx, dy = 0, 0
if xmin is not None: dx = xmin-x0
if x is not None: dx = x-(x0+x1)/2
if xmax is not None: dx = xmax-x1
if ymin is not None: dy = ymin-y0
if y is not None: dy = y-(y0+y1)/2
if ymax is not None: dy = ymax-y1
return self.move(dx, dy)
def decompose(self):
return [Shape(p) for p in self.cross_section.decompose()]
def extrude(self, height, fn=0, twist=0, scale_top=(1,1), center=False):
s = Solid(self.cross_section.extrude(
height,
n_divisions=fn,
twist_degrees=twist,
scale_top=scale_top,
))
return s.move(z=-height/2) if center else s
def extrude_to(self, other, height, center=False):
polys1 = self.to_polygons()
assert len(polys1) == 1, 'extrude_to only supports simple polygons'
verts1 = np.pad(polys1[0], [[0,0],[0,1]], constant_values=0)
N1 = verts1.shape[0]
polys2 = other.to_polygons()
assert len(polys2) == 1, 'extrude_to only supports simple polygons'
verts2 = np.pad(polys2[0], [[0,0],[0,1]], constant_values=height)
# flip the bottom over
tris1 = manifold3d.triangulate(polys1)
tmp = tris1[:, 1].copy()
tris1[:, 1] = tris1[:, 2]
tris1[:, 2] = tmp
# offset top vertex indices
tris2 = manifold3d.triangulate(polys2)
tris2 += N1
alignment = polygon_nearest_alignment(verts1, verts2)
alignment = [(a, b+N1) for a, b in alignment]
# build the skirt faces
tris3 = []
for s in range(len(alignment)):
i, j = alignment[s]
pi, pj = alignment[s-1]
if i != pi:
tris3 += [[pi, i, pj]]
if j != pj:
tris3 += [[i, j, pj]]
tris3 = np.array(tris3)
verts = np.concatenate((verts1, verts2))
tris = np.concatenate((tris1, tris2, tris3))
mesh = manifold3d.Mesh(verts, tris)
s = Solid(Manifold(mesh))
return s.move(z=-height/2) if center else s
def hull(self, *others):
return Shape(CrossSection.batch_hull([self.cross_section, *[o.cross_section for o in others]]))
def is_empty(self):
return self.cross_section.is_empty()
def mirror(self, x=0, y=0):
return Shape(self.cross_section.mirror((x, y)))
def num_contour(self):
return self.cross_section.num_contour()
def num_vert(self):
return self.cross_section.num_vert()
def offset(self, delta, join_type='miter', miter_limit=2, circular_segments=0):
if join_type == 'round':
join_type = manifold3d.JoinType.Round
elif join_type == 'miter':
join_type = manifold3d.JoinType.Miter
| elif join_type == 'square': |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: PeriniM/Rotary-Pendulum-RL
# Path: control/reinforcement_learning/Environments/RealPendulumEnv.py
class RealPendulumEnv(gym.Env):
"""
Real rotary pendulum with ESP32
"""
metadata = {"render_modes": ["human"]}
def __init__(self, port, baudrate, render_mode="human"):
super(RealPendulumEnv, self).__init__()
"""
Initialize the environment.
Args:
port (str): The serial port to connect to.
baudrate (int): The baudrate to use for the serial connection.
render_mode (str, optional): The render mode. Defaults to "human".
Returns:
None
"""
self.ser = serial.Serial(
port=port,
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
self.reader = SerialReader(self.ser, simulation=False)
self.reader.start()
self.render_mode = render_mode
self.name = "RealPendulum"
self.nbJoint = 1
self.num_state = 2
self.action = 0.0
self.motorAngle = 0.0
self.terminated = False
self.truncated = False
self.iterCount = 0
self.maxIter = 1000
self.omega_max = 10.0
self.range_actions = np.array([-1.0, 1.0])
self.range_observation = np.array([-1.0, 1.0])
self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)
self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)
# variable to store angles of one episode
self.episode_angles = []
def reset(self, seed=None, options=None):
"""
Reset the environment to the initial state.
Args:
None
Returns:
state (np.array): [bar angle, bar angular velocity]
info (dict): Episode information
"""
super().reset(seed=seed, options=options)
# Reset the episode angles
self.episode_angles = []
# Send command to pendulum to go to home position.
self.send_serial("0,1")
# Wait for the pendulum to report it has finished resetting.
while (1):
self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()
if not self.terminated:
break
# Reset iteration count
self.iterCount = 0
self.info = {"episode": {"r": 0.0, "l": self.iterCount}}
return self.observation_space.astype(np.float32), self.info
def step(self, action):
"""
Take a step in the environment
Args:
action (float): Motor speed percentage [-100, 100]
Returns:
state (np.array): [bar angle, bar angular velocity]
reward (float): Reward for the current state
terminated (bool): Whether the episode is done or not
truncated (bool): Whether the episode is truncated or not
info (dict): Episode information
"""
# Send action to pendulum over serial
self.send_serial(f"{action*100},0")
self.action = action
# Read state and episode done flag from serial
self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()
# Store the angles of the episode for reward penalty
self.episode_angles.append(self.state[0])
# Calculate reward
reward = self.calculate_reward(self.observation_space)
self.episode_reward += reward
self.iterCount += 1
self.reset_policy(self.maxIter)
self.info = {"episode": {"r": self.episode_reward, "l": self.iterCount}}
return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info
def send_serial(self, command):
"""
Send a command to the pendulum over serial
Args:
command (str): [motor speed percentage, reset flag]
Returns:
None
"""
self.ser.write(f"{command}\n".encode())
# time.sleep(0.1)
def reset_policy(self, reset_count=200):
"""
Policy to reset the environment
Args:
reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.
Returns:
None
"""
if self.iterCount > reset_count:
self.terminated = True
def calculate_reward(self, state):
"""
Calculate the reward for the current state
Args:
state (np.array): [bar angle, bar angular velocity]
Returns:
reward (float): Reward for the current state
"""
# Constants to scale the angle and velocity penalties
ANGLE_WEIGHT = 1.0
VELOCITY_WEIGHT = 0.1
MOTOR_ANGLE_WEIGHT = 1.0
ACTION_WEIGHT = 0.01
# Penalize the angle to be minimized
angle_penalty = ANGLE_WEIGHT * (state[0] ** 2)
# Penalize the angular velocity to be minimized
velocity_penalty = VELOCITY_WEIGHT * (state[1] ** 2)
# Penalize the motor angle to be minimized
motor_angle = self.motorAngle / 180.0
motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (motor_angle ** 2)
# Penalize the action to be minimized
action_penalty = ACTION_WEIGHT * (self.action ** 2)
# Reward is higher when penalties are lower
reward = -(angle_penalty + velocity_penalty + motor_angle_penalty + action_penalty)
# Penalize the reward if the average angle of the episode is close to pi
# after 3/4 of the maximum iterations
if self.iterCount > self.maxIter*3/4:
if np.abs(np.mean(self.episode_angles)) < (np.pi-0.8):
reward-=100.0
# if self.terminated:
# if self.iterCount < self.maxIter*1/10:
# reward-=100.0
return reward
def render(self, camera=False):
"""
Render the state (optional), e.g. display the video stream
"""
if camera:
print("Connect the camera to the pendulum and display the video stream.")
def close(self):
"""
Close the serial connection
Args:
None
Returns:
None
"""
self.ser.close()
# Path: control/reinforcement_learning/Environments/PyBulletPendulumEnv.py
class PyBulletPendulumEnv(gym.Env):
"""
PyBullet Rotary Pendulum
"""
metadata = {"render_modes": ["human"]}
def __init__(self, render_mode="human"):
super(PyBulletPendulumEnv, self).__init__()
"""
Initialize the PyBullet Rotary Pendulum environment
Args:
render (bool, optional): Whether to render the environment. Defaults to True.
Returns:
None
"""
self.render_mode = render_mode
# Initialize PyBullet
if render_mode == "human":
self.physicsClient = p.connect(p.GUI)
else:
self.physicsClient = p.connect(p.DIRECT)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0, 0, -9.806)
# move camera to focus on the robot
p.resetDebugVisualizerCamera(cameraDistance=0.4, cameraYaw=0, cameraPitch=-30, cameraTargetPosition=[0,0,0.1])
# Load the plane and pendulum URDF
self.planeId = p.loadURDF("plane.urdf")
self.load_pendulum_urdf()
# Define other environment parameters
self.name = "PyBulletPendulum"
self.nbJoint = 1
self.num_state = 2
self.action = 0.0
self.n_actions = 101
self.range_actions = np.array([-1.0, 1.0])
self.range_observation = np.array([-1.0, 1.0])
self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)
self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)
self.motorAngle = 0.0
self.terminated = False
self.truncated = False
self.info = {}
self.iterCount = 0
self.maxIter = 1500
self.omega_max = 10.0
self.episode_reward = 0.0
# variable to store angles of one episode
self.episode_angles = []
def load_pendulum_urdf(self):
"""
Load the pendulum URDF into the environment.
Args:
None
Returns:
None
"""
cubeStartPos = [0, 0, 0]
cubeStartOrientation = p.getQuaternionFromEuler([np.pi / 2, 0, 0])
curr_dir = os.path.abspath(os.path.dirname(__file__))
robot_urdf = 'Rotary_Pendulum_URDF.urdf'
# Construct the path to the URDF file
urdf_path = os.path.join(curr_dir, '..', '..', '..', 'simulation', 'urdf', robot_urdf)
self.robotId = p.loadURDF(urdf_path, cubeStartPos, cubeStartOrientation,
# flags=p.URDF_USE_INERTIA_FROM_FILE,
useFixedBase=True
)
# Define joint indices as per your URDF structure
self.motor_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_3')
self.bar_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_5')
# Define real robot parameters
self.steps_per_rev = 3200
self.max_speed_steps_per_sec = 4000.0
# Calculate radians per step
self.radians_per_step = (2 * np.pi) / self.steps_per_rev
# Calculate max speed in radians per second [rad/s]
self.max_motor_speed = self.max_speed_steps_per_sec * self.radians_per_step
# Admissible motor angle range [deg]
self.motor_angle_range = [-150, 150]
self.out_of_range = False
# Compensation angles for the URDF
self.motor_compensation_angle = 0.400
self.bar_compensation_angle = -0.264
def reset(self, seed=None, options=None):
"""
Reset the environment to a random state
Args:
None
Returns:
state (np.array): [bar_angle, bar_angular_velocity]
"""
super().reset(seed=seed, options=options)
# Reset the episode angles
self.episode_angles = []
self.episode_reward = 0.0
self.terminated = False
# Send command to pendulum to reset to random position
self.send_fake_serial([0, 1])
# get the state from the pendulum
self.observation_space, self.motorAngle, self.terminated = self.get_state()
# Reset iteration count
self.iterCount = 0
self.info = {"episode": {"r": 0.0, "l": self.iterCount}}
return self.observation_space.astype(np.float32), self.info
def step(self, action):
"""
Take a step in the environment
Args:
action (float): Motor speed percentage [-100, 100]
Returns:
state (np.array): [bar angle, bar angular velocity]
"""
# multiply the action by 100 to get the percentage
self.action = action*100.0
# Send action to pendulum over serial
self.send_fake_serial([self.action, 0])
# Read state and episode done flag from serial
self.observation_space, self.motorAngle, self.terminated = self.get_state()
# Store the angles of the episode for reward penalty
self.episode_angles.append(self.observation_space[0])
# Calculate reward
reward = self.calculate_reward(self.observation_space)
self.episode_reward += reward
self.iterCount += 1
self.reset_policy(self.maxIter)
self.info = {"episode": {"r": self.episode_reward, "l": self.iterCount}}
# return normalized_state, reward, self.done
return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info
def calculate_reward(self, state):
"""
Calculate the reward for the current state
Args:
state (np.array): [bar angle, bar angular velocity]
Returns:
reward (float): Reward for the current state
"""
# Constants to scale the bar and motor angle penalties
ANGLE_WEIGHT = 1.0
VELOCITY_WEIGHT = 0.1
MOTOR_ANGLE_WEIGHT = 0.001
ACTION_WEIGHT = 0.001
# Calculate the angle penalty
angle_penalty = ANGLE_WEIGHT * (state[0]) ** 2
# Calculate the velocity penalty
velocity_penalty = VELOCITY_WEIGHT * (state[1]) ** 2
# Calculate the motor angle penalty
# motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (self.motorAngle/self.motor_angle_range[1]) ** 2
# Calculate the action penalty
action_penalty = ACTION_WEIGHT * (self.action/100) ** 2
# Calculate the reward
reward = - (angle_penalty + velocity_penalty)
# NEW REWARD FUNCTION
# reward range [-1, 0]
# angle_target = 0.0
# angular_velocity_target = 0.0
# motor_angle_target = 0.0
# reward = -1/2 * (np.abs(state[0] - angle_target)/np.pi + np.abs(self.motorAngle - motor_angle_target)/self.motor_angle_range[1])
# reward = - 1/2 * (np.abs(state[0] - angle_target) + np.abs(state[1] - angular_velocity_target))
# if the episode is done with enough iterations
# if self.iterCount > int(self.maxIter/2) and self.done:
# # if the average of the bar angles is less than 90 degrees
# if np.abs(np.mean(self.episode_angles)) < np.deg2rad(90):
# reward += 100.0
# if the episode is done with not enough iterations
# if self.iterCount < int(self.maxIter/10) and self.terminated:
# # if the motor angle is out of range
# if self.out_of_range:
# reward -= 2000.0
return reward
def reset_policy(self, reset_count=200):
"""
Policy to reset the environment
Args:
reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.
Returns:
None
"""
if self.iterCount >= reset_count:
self.terminated = True
def send_fake_serial(self, command):
"""
Send a command to the pendulum, simulating a fake serial connection
Args:
command (list): [motor speed percentage, episode done flag]
Returns:
None
"""
motor_speed_percentage = command[0]
episode_done = command[1]
if episode_done:
self.terminated = True
self.reset_robot(mode="random")
else:
self.terminated = False
# Calculate the motor speed in steps per second
motor_speed = motor_speed_percentage * self.max_motor_speed / 100.0
# set the motor velocity
p.setJointMotorControl2(bodyUniqueId=self.robotId,
jointIndex=self.motor_joint_idx,
controlMode=p.VELOCITY_CONTROL,
targetVelocity=motor_speed,
)
# time.sleep(0.1)
def get_state(self):
"""
Read the state from the pendulum, simulating a fake serial connection
Args:
None
Returns:
state (np.array): [bar angle, bar angular velocity]
motor_angle (float): Motor angle in degrees
done (bool): Episode done flag
"""
# Get the bar angle
bar_angle = p.getJointState(self.robotId, self.bar_joint_idx)[0] + self.bar_compensation_angle
# Get bar angular velocity
bar_angular_velocity = p.getJointState(self.robotId, self.bar_joint_idx)[1]
# Get the motor angle
motor_angle = np.rad2deg(p.getJointState(self.robotId, self.motor_joint_idx)[0] + self.motor_compensation_angle)
# Map the motor angle to the correct range
if motor_angle > self.motor_angle_range[1] or motor_angle < self.motor_angle_range[0]:
self.out_of_range = True
else:
self.out_of_range = False
# Adjusting the bar angle to map correctly
bar_angle = bar_angle % (2 * np.pi) # Normalize the angle to be within 0 to 2π
if bar_angle > np.pi:
bar_angle -= 2 * np.pi # Adjust angles greater than π to be between -π to π
if bar_angle > 0:
bar_angle = np.pi - bar_angle
elif bar_angle < 0:
bar_angle = -np.pi - bar_angle
# round the states to 4 decimal places
bar_angle = round(bar_angle/np.pi, 4)
bar_angular_velocity = round(bar_angular_velocity/self.omega_max, 4)
motor_angle = round(motor_angle, 4)
return np.array([bar_angle, bar_angular_velocity]), motor_angle, self.out_of_range
def reset_robot(self, mode="random"):
"""
Reset the robot state
Args:
mode (str, optional): Mode to reset the robot. Defaults to "random".
Returns:
state (np.array): [bar angle, bar angular velocity]
"""
if mode == "random":
# Reset the robot to a random position
bar_angle = np.random.uniform(-np.pi, np.pi)
bar_angular_velocity = np.random.uniform(-self.omega_max, self.omega_max)
motor_angle = np.deg2rad(np.random.uniform(self.motor_angle_range[0], self.motor_angle_range[1]))
# Set the robot to the random position
p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=bar_angle)
# p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=motor_angle)
p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)
# set bar velocity with no force
p.setJointMotorControl2(bodyUniqueId=self.robotId,
jointIndex=self.bar_joint_idx,
controlMode=p.VELOCITY_CONTROL,
targetVelocity=bar_angular_velocity,
force=0
)
elif mode == "home":
# Reset the robot to the home position
p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=-self.bar_compensation_angle)
p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)
# set bar velocity with no force
p.setJointMotorControl2(bodyUniqueId=self.robotId,
jointIndex=self.bar_joint_idx,
controlMode=p.VELOCITY_CONTROL,
targetVelocity=0,
force=0
)
return self.get_state()[0]
def render(self, fps=240.0):
"""
Render the pendulum in PyBullet
Args:
fps (float, optional): Number of frames per second. Defaults to 240.0.
Returns:
None
"""
p.stepSimulation()
if self.render_mode == "human":
time.sleep(1./fps)
def close(self):
"""
Close the PyBullet connection
Args:
None
Returns:
None
"""
p.disconnect()
# Path: control/pid/classes/PIDController.py
class PIDController:
"""
PID Controller class
"""
def __init__(self, Kp, Ki, Kd):
"""
Initialize PID controller gains
Args:
Kp (float): Proportional gain
Ki (float): Integral gain
Kd (float): Derivative gain
"""
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.prev_error = 0
self.integral = 0
def compute(self, error):
"""
Compute PID control signal
Args:
error (float): Error signal
Returns:
control_input (float): PID control signal
"""
self.integral += error
derivative = error - self.prev_error
control_input = self.Kp * error + self.Ki * self.integral + self.Kd * derivative
self.prev_error = error
return control_input
def reset(self):
"""
Reset PID controller
"""
self.prev_error = 0
self.integral = 0
# Path: control/pid/src/main.py
from ...reinforcement_learning.Environments import RealPendulumEnv as real
from ...reinforcement_learning.Environments import PyBulletPendulumEnv as pybullet
from ..classes.PIDController import PIDController
import numpy as np
real_pendulum = False
# Example usage
pid = PIDController(Kp=18, Ki=0.01, Kd=0.001)
K_motor = 0.0
desired_bar_angle = 0
desired_bar_velocity = 0
desired_motor_angle = 0
if real_pendulum:
# initialize RealPendulum environment
env = real.RealPendulumEnv("COM3", 115200)
else:
# initialize PyBulletPendulum environment
env = pybullet.PyBulletPendulumEnv(render_mode='human')
env.maxIter = 1_000_000
# reset environment to home position
env.reset()
# get initial observation
observation, reward, done, _, _ = env.step(0)
while True:
# compute error and control signal
error = (desired_bar_angle - observation[0]) + (desired_bar_velocity - observation[1]) # + K_motor * (desired_motor_angle - env.motorAngle)
| control_signal = pid.compute(error) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JayYik/GAN_implementations
# Path: models/DCGAN.py
class DCGAN(nn.Module):
def __init__(self, args):
super(DCGAN, self).__init__()
self.G=DCGAN_G(args.hw,args.z_dim,args.in_channels)
self.D=DCGAN_D(args.hw,args.in_channels)
# self.G.weight_init()
# self.D.weight_init()
self.args=args
self.batch_size=args.batch_size
self.z_dim=args.z_dim
self.bce_loss = nn.BCELoss()
self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g,betas=args.betas)
self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d,betas=args.betas)
self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.9)
self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)
# Recording program start time for log directory naming
program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())
# Logging information
self.information=f'DCGAN-{program_begin_time}'
# TensorBoard SummaryWriter for logging
self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))
def warm_up(self,epoch):
"""
Learning rate warm-up function for the Adam optimizer.
Args:
epoch (int): Current epoch number.
Returns:
float: Adjusted learning rate based on the warm-up strategy.
"""
top_epoch = int(self.args.num_epochs*0.3)
if epoch<top_epoch:
#In the first 30% of epochs, slowly increase the LR to the preset LR
return (epoch+1) / top_epoch
else:
#Drop the LR to half of the preset
return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )
def save_model(self,epoch):
save_path=f'./save/{self.information}'
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')
torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')
self.save_args(save_path)
print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')
def save_args(self,save_path):
argsDict = self.args.__dict__
with open(f'{save_path}/setting.txt', 'w') as f:
f.writelines('------------------ start ------------------' + '\n')
for eachArg, value in argsDict.items():
f.writelines(eachArg + ' : ' + str(value) + '\n')
f.writelines('------------------- end -------------------')
def train(self,train_loader,device):
"""
Training function for the DCGAN model.
Args:
train_loader (DataLoader): DataLoader for training data.
device (torch.device): The device (CPU or GPU) to perform training.
Returns:
None
"""
# Move the model and loss to the specified device
self.G.to(device)
self.D.to(device)
self.bce_loss.to(device)
generator_iter = 0
descriminator_iter = 0
# Training loop
for epoch in range(self.args.num_epochs):
self.t_begin = t.time()
pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)
for i, (images, _) in pbar:
if i == train_loader.dataset.__len__() // self.batch_size:
break
# Generate random noise and labels
z = torch.randn((self.batch_size, self.z_dim, 1, 1))
real_labels = torch.ones(self.batch_size)
fake_labels = torch.zeros(self.batch_size)
# Move data to the specified device
images=images.to(device)
z=z.to(device)
real_labels=real_labels.to(device)
fake_labels=fake_labels.to(device)
# Train Discriminator
real_output = self.D(images)
#print('real_output:',real_output)
fake_images = self.G(z)
fake_output = self.D(fake_images)
d_real_loss = self.bce_loss(real_output.flatten(), real_labels)
d_fake_loss = self.bce_loss(fake_output.flatten(), fake_labels)
#print('real_loss:',d_real_loss.item(),' fake_loss:',d_fake_loss.item())
d_loss = d_real_loss + d_fake_loss
self.D.zero_grad()
d_loss.backward()
self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)
self.optim_d.step()
descriminator_iter+=1
# Train Generator
if i % self.args.des_iter == 0:
#print("i:",i)
self.D.zero_grad()
self.G.zero_grad()
z = torch.randn((self.batch_size, self.z_dim, 1, 1))
z = z.to(device)
fake_images = self.G(z)
fake_output = self.D(fake_images)
fake_score = fake_output.squeeze().mean()
#print('fake_output:',fake_output)
g_loss = self.bce_loss(fake_output.flatten(), real_labels)
g_loss.backward()
pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item(),'fake_socre':fake_score.item()})
#print('g_loss:',g_loss.item())
self.optim_g.step()
self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)
generator_iter+=1
# Save generated images
if generator_iter % 500 == 0:
if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):
os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')
z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))
z=z.to(device)
samples = self.G(z)
samples = samples.mul(0.5).add(0.5)
samples = samples.data.cpu()[:25]
grid = torchvision.utils.make_grid(samples,nrow=5)
torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))
# Learning rate scheduling
self.scheduler_optim_d.step()
self.scheduler_optim_g.step()
# Print and log training information
print(self.optim_d.state_dict()['param_groups'][0]['lr'])
print(self.optim_g.state_dict()['param_groups'][0]['lr'])
self.t_end = t.time()
print(
"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]"
% (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))
)
# Save the trained parameters
if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:
self.save_model(epoch)
# Path: models/GAN.py
class GAN(nn.Module):
def __init__(self, args):
super(GAN, self).__init__()
self.G=GAN_G(args.hw,args.z_dim,args.in_channels)
self.D=GAN_D(args.hw,args.in_channels)
self.args=args
self.batch_size=args.batch_size
self.z_dim=args.z_dim
self.bce_loss = nn.BCELoss()
self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g,betas=args.betas)
self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d,betas=args.betas)
# Recording program start time for log directory naming
program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())
# Logging information
self.information=f'GAN-{program_begin_time}'
# TensorBoard SummaryWriter for logging
self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))
def save_model(self,epoch):
save_path=f'./save/{self.information}'
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')
torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')
self.save_args(save_path)
print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')
def save_args(self,save_path):
argsDict = self.args.__dict__
with open(f'{save_path}/setting.txt', 'w') as f:
f.writelines('------------------ start ------------------' + '\n')
for eachArg, value in argsDict.items():
f.writelines(eachArg + ' : ' + str(value) + '\n')
f.writelines('------------------- end -------------------')
def train(self,train_loader,device):
"""
Training function for the GAN model.
Args:
train_loader (DataLoader): DataLoader for training data.
device (torch.device): The device (CPU or GPU) to perform training.
Returns:
None
"""
# Move the model and loss to the specified device
self.G.to(device)
self.D.to(device)
self.bce_loss.to(device)
generator_iter = 0
descriminator_iter = 0
# Training loop
for epoch in range(self.args.num_epochs):
self.t_begin = t.time()
pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)
for i, (images, _) in pbar:
if i == train_loader.dataset.__len__() // self.batch_size:
break
# Generate random noise and labels
z = torch.randn((self.batch_size, self.z_dim))
real_labels = torch.ones(self.batch_size)
fake_labels = torch.zeros(self.batch_size)
# Move data to the specified device
images=images.to(device)
z=z.to(device)
real_labels=real_labels.to(device)
fake_labels=fake_labels.to(device)
# Train Discriminator
real_output = self.D(images)
#print('real_output:',real_output)
fake_images = self.G(z)
fake_output = self.D(fake_images)
d_real_loss = self.bce_loss(real_output.flatten(), real_labels)
d_fake_loss = self.bce_loss(fake_output.flatten(), fake_labels)
#print('real_loss:',d_real_loss.item(),' fake_loss:',d_fake_loss.item())
d_loss = d_real_loss + d_fake_loss
self.D.zero_grad()
d_loss.backward()
self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)
self.optim_d.step()
descriminator_iter+=1
# Train Generator
if i % self.args.des_iter == 0:
#print("i:",i)
self.D.zero_grad()
self.G.zero_grad()
z = torch.randn((self.batch_size, self.z_dim))
z = z.to(device)
fake_images = self.G(z)
fake_output = self.D(fake_images)
fake_score = fake_output.squeeze().mean()
#print('fake_output:',fake_output)
g_loss = self.bce_loss(fake_output.flatten(), real_labels)
g_loss.backward()
pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item(),'fake_socre':fake_score.item()})
#print('g_loss:',g_loss.item())
self.optim_g.step()
self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)
generator_iter+=1
# Save generated images
if generator_iter % 500 == 0:
if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):
os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')
z = torch.randn((self.batch_size,self.args.z_dim))
z=z.to(device)
samples = self.G(z)
samples = samples.mul(0.5).add(0.5)
samples = samples.data.cpu()[:25]
grid = torchvision.utils.make_grid(samples,nrow=5)
torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))
# Print and log training information
print(self.optim_d.state_dict()['param_groups'][0]['lr'])
print(self.optim_g.state_dict()['param_groups'][0]['lr'])
self.t_end = t.time()
print(
"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]"
% (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))
)
# Save the trained parameters
if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:
self.save_model(epoch)
# Path: models/WGAN.py
class WGAN_CP(nn.Module):
def __init__(self, args):
super(WGAN_CP, self).__init__()
self.G=WGANCP_G(args.hw,args.z_dim,args.in_channels)
self.D=WGANCP_D(args.hw,args.in_channels)
# self.G.weight_init()
# self.D.weight_init()
self.args=args
self.batch_size=args.batch_size
self.z_dim=args.z_dim
# Attention!!! WGAN use RMSprop optimizer instead of Adam
self.optim_g = optim.RMSprop(self.G.parameters(), lr=args.lr_g)
self.optim_d = optim.RMSprop(self.D.parameters(), lr=args.lr_d)
self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.9)
self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)
# Recording program start time for log directory naming
program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())
# Logging information
self.information=f'WGAN-{program_begin_time}'
# TensorBoard SummaryWriter for logging
self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))
def warm_up(self,epoch):
"""
Learning rate warm-up function for the RMSprop optimizer.
Args:
epoch (int): Current epoch number.
Returns:
float: Adjusted learning rate based on the warm-up strategy.
"""
top_epoch = int(self.args.num_epochs*0.3)
if epoch<top_epoch:
#In the first 30% of epochs, slowly increase the LR to the preset LR
return (epoch+1) / top_epoch
else:
#Drop the LR to half of the preset
return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )
def save_model(self,epoch):
save_path=f'./save/{self.information}'
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')
torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')
self.save_args(save_path)
print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')
def save_args(self,save_path):
argsDict = self.args.__dict__
with open(f'{save_path}/setting.txt', 'w') as f:
f.writelines('------------------ start ------------------' + '\n')
for eachArg, value in argsDict.items():
f.writelines(eachArg + ' : ' + str(value) + '\n')
f.writelines('------------------- end -------------------')
def train(self,train_loader,device):
"""
Training function for the WGAN model.
Args:
train_loader (DataLoader): DataLoader for training data.
device (torch.device): The device (CPU or GPU) to perform training.
Returns:
None
"""
# Move the model and loss to the specified device
self.G.to(device)
self.D.to(device)
generator_iter = 0
descriminator_iter = 0
# Training loop
for epoch in range(self.args.num_epochs):
self.t_begin = t.time()
pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)
for i, (images, _) in pbar:
if i == train_loader.dataset.__len__() // self.batch_size:
break
# Generate random noise and labels
z = torch.randn((self.batch_size, self.z_dim, 1, 1))
real_labels = torch.ones(self.batch_size)
fake_labels = torch.zeros(self.batch_size)
# Move data to the specified device
images=images.to(device)
z=z.to(device)
real_labels=real_labels.to(device)
fake_labels=fake_labels.to(device)
# Train Discriminator
for p in self.D.parameters():
p.data.clamp_(-self.args.wc, self.args.wc)
d_loss_real = self.D(images)
d_loss_real = d_loss_real.mean(0).view(1)
fake_images = self.G(z)
d_loss_fake = self.D(fake_images)
d_loss_fake = d_loss_fake.mean(0).view(1)
d_loss = d_loss_fake - d_loss_real
Wasserstein_D = d_loss_real - d_loss_fake
self.D.zero_grad()
d_loss.backward()
self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)
self.writer.add_scalar('Wasserstein_D', Wasserstein_D.item(), descriminator_iter)
self.optim_d.step()
descriminator_iter+=1
# Train Generator
if i % self.args.des_iter == 0:
#print("i:",i)
self.D.zero_grad()
self.G.zero_grad()
z = torch.randn((self.batch_size, self.z_dim, 1, 1))
z = z.to(device)
fake_images = self.G(z)
#print('fake_output:',fake_output)
g_loss = self.D(fake_images)
g_loss = g_loss.mean(0).view(1).mul(-1)
g_loss.backward()
pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item()})
#print('g_loss:',g_loss.item())
self.optim_g.step()
self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)
generator_iter+=1
# Save generated images
if generator_iter % 500 == 0:
if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):
os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')
z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))
z=z.to(device)
samples = self.G(z)
samples = samples.mul(0.5).add(0.5)
samples = samples.data.cpu()[:25]
grid = torchvision.utils.make_grid(samples,nrow=5)
torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))
# Learning rate scheduling
self.scheduler_optim_d.step()
self.scheduler_optim_g.step()
# Print and log training information
print(self.optim_d.state_dict()['param_groups'][0]['lr'])
print(self.optim_g.state_dict()['param_groups'][0]['lr'])
self.t_end = t.time()
print(
"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]"
% (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))
)
# Save the trained parameters
if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:
self.save_model(epoch)
# Path: models/WGAN_GP.py
class WGAN_GP(nn.Module):
def __init__(self, args):
super(WGAN_GP, self).__init__()
self.G=WGANGP_G(args.hw,args.z_dim,args.in_channels)
self.D=WGANGP_D(args.hw,args.in_channels)
# self.G.weight_init()
# self.D.weight_init()
self.args=args
self.batch_size=args.batch_size
self.z_dim=args.z_dim
self.gp_lambda=args.gp_lambda
self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g, betas=args.betas)
self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d, betas=args.betas)
self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.95)
self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)
# Recording program start time for log directory naming
program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())
# Logging information
self.information=f'WGAN_GP-{program_begin_time}'
# TensorBoard SummaryWriter for logging
self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))
def warm_up(self,epoch):
"""
Learning rate warm-up function for the Adam optimizer.
Args:
epoch (int): Current epoch number.
Returns:
float: Adjusted learning rate based on the warm-up strategy.
"""
top_epoch = int(self.args.num_epochs*0.3)
if epoch<top_epoch:
#In the first 30% of epochs, slowly increase the LR to the preset LR
return (epoch+1) / top_epoch
else:
#Drop the LR to half of the preset
return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )
def save_model(self,epoch):
save_path=f'./save/{self.information}'
if not os.path.exists(save_path):
os.makedirs(save_path)
torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')
torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')
self.save_args(save_path)
print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')
def save_args(self,save_path):
argsDict = self.args.__dict__
with open(f'{save_path}/setting.txt', 'w') as f:
f.writelines('------------------ start ------------------' + '\n')
for eachArg, value in argsDict.items():
f.writelines(eachArg + ' : ' + str(value) + '\n')
f.writelines('------------------- end -------------------')
def train(self,train_loader,device):
"""
Training function for the WGAN-GP model.
Args:
train_loader (DataLoader): DataLoader for training data.
device (torch.device): The device (CPU or GPU) to perform training.
Returns:
None
"""
# Move the model and loss to the specified device
self.G.to(device)
self.D.to(device)
generator_iter = 0
descriminator_iter = 0
# Training loop
for epoch in range(self.args.num_epochs):
self.t_begin = t.time()
pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)
for i, (images, _) in pbar:
if i == train_loader.dataset.__len__() // self.batch_size:
break
for p in self.D.parameters():
p.requires_grad = True
# Generate random noise and labels
z = torch.randn((self.batch_size, self.z_dim, 1, 1))
real_labels = torch.ones(self.batch_size)
fake_labels = torch.zeros(self.batch_size)
# Move data to the specified device
images=images.to(device)
z=z.to(device)
real_labels=real_labels.to(device)
fake_labels=fake_labels.to(device)
# Train Discriminator
d_loss_real = self.D(images)
d_loss_real = d_loss_real.mean(0).view(1)
fake_images = self.G(z)
d_loss_fake = self.D(fake_images)
d_loss_fake = d_loss_fake.mean(0).view(1)
gradient_penalty = self.calculate_gradient_penalty(images.data, fake_images.data,device)
d_loss = d_loss_fake - d_loss_real + gradient_penalty
Wasserstein_D = d_loss_real - d_loss_fake
self.D.zero_grad()
d_loss.backward()
self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)
self.writer.add_scalar('Wasserstein_D', Wasserstein_D.item(), descriminator_iter)
self.optim_d.step()
descriminator_iter+=1
# Train Generator
if i % self.args.des_iter == 0:
for p in self.D.parameters():
p.requires_grad = False # to avoid computation
#print("i:",i)
self.D.zero_grad()
self.G.zero_grad()
z = torch.randn((self.batch_size, self.z_dim, 1, 1))
z = z.to(device)
fake_images = self.G(z)
#print('fake_output:',fake_output)
g_loss = self.D(fake_images)
g_loss = g_loss.mean(0).view(1).mul(-1)
g_loss.backward()
pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item()})
#print('g_loss:',g_loss.item())
self.optim_g.step()
self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)
generator_iter+=1
# Save generated images
if generator_iter % 500 == 0:
if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):
os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')
z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))
z=z.to(device)
samples = self.G(z)
samples = samples.mul(0.5).add(0.5)
samples = samples.data.cpu()[:25]
grid = torchvision.utils.make_grid(samples,nrow=5)
torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))
# Learning rate scheduling
self.scheduler_optim_d.step()
self.scheduler_optim_g.step()
# Print and log training information
print(self.optim_d.state_dict()['param_groups'][0]['lr'])
print(self.optim_g.state_dict()['param_groups'][0]['lr'])
self.t_end = t.time()
print(
"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]"
% (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))
)
# Save the trained parameters
if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:
self.save_model(epoch)
def calculate_gradient_penalty(self, real_images, fake_images, device):
eta = torch.FloatTensor(self.batch_size,1,1,1).uniform_(0,1)
eta = eta.expand(self.batch_size, real_images.size(1), real_images.size(2), real_images.size(3))
eta=eta.to(device)
interpolated = eta * real_images + ((1 - eta) * fake_images)
interpolated.requires_grad_(True)
# calculate probability of interpolated examples
prob_interpolated = self.D(interpolated)
grad_outputs=torch.ones(prob_interpolated.size()).to(device)
grad_outputs.requires_grad_(True)
# calculate gradients of probabilities with respect to examples
gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,
grad_outputs=grad_outputs,
create_graph=True, retain_graph=True)[0]
grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.gp_lambda
return grad_penalty
# Path: utils/get_model.py
import torch
from models.DCGAN import DCGAN
from models.GAN import GAN
from models.WGAN import WGAN_CP
from models.WGAN_GP import WGAN_GP
def get_model(args):
if args.model == 'DCGAN':
net=DCGAN(args)
| elif args.model == 'GAN': |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: anyquest/pyaq
# Path: aq/jobs/manager.py
class JobManager:
app_jobs: Dict[str, AppJob]
activity_jobs: Dict[str, Dict[str, List[ActivityJob]]]
def __init__(self):
self.app_jobs = {}
self.activity_jobs = {}
self._logger = logging.getLogger(self.__class__.__name__)
def create_app_job(self, app: App) -> AppJob:
app_job = AppJob(app)
self.app_jobs[app_job.id] = app_job
self.activity_jobs[app_job.id] = {}
return app_job
def create_activity_job(self, app_job: AppJob, activity_name: str) -> ActivityJob:
app = app_job.app
if activity_name not in app.activities:
raise AppJobError(f"Activity {activity_name} not found")
activity_job = ActivityJob(activity_name, app_job)
self.activity_jobs[app_job.id].setdefault(activity_name, []).append(activity_job)
return activity_job
def get_next_activities(self, activity_job: ActivityJob) -> List[str]:
app = activity_job.app_job.app
rv = []
for activity_name in app.activities:
activity = app.activities[activity_name]
for activity_input in activity.inputs or []:
if (activity_input.activity == activity_job.activity_name and
not self.is_waiting_for_jobs(activity_job.app_job, activity)):
rv.append(activity_name)
return rv
def is_waiting_for_jobs(self, app_job: AppJob, activity: Activity):
for activity_input in activity.inputs or []:
jobs = self.activity_jobs[app_job.id].get(activity_input.activity, None)
if not jobs or any(not job.finished for job in jobs):
return True
return False
def get_inputs_for_activity(self, app_job: AppJob, activity: Activity) -> List[Dict[str, Any]]:
inputs_for_activity = {}
if activity.inputs:
for activity_input in activity.inputs:
job_outputs = [job.output for job in self.activity_jobs[app_job.id].get(activity_input.activity, [])]
inputs_for_activity[activity_input.activity] = job_outputs if len(job_outputs) > 1 else job_outputs[0]
for activity_input in activity.inputs:
if activity_input.map:
try:
expr = parse(activity_input.map)
val = inputs_for_activity[activity_input.activity]
inputs = []
for match in expr.find(json.loads(val)):
if isinstance(match.value, list):
for input_value in match.value:
inputs.append({**inputs_for_activity, activity_input.activity: input_value})
return inputs
except Exception as e:
self._logger.error(f"Failed to parse a map expression {e}")
return []
return [inputs_for_activity]
def get_outputs(self, app_job: AppJob) -> Dict[str, Any]:
app = app_job.app
# Terminal activities have inputs and do not have any other activities that take their outputs
terminal_activities = [
activity_name
for activity_name, activity in app.activities.items()
if activity.inputs and not any(
activity_input.activity == activity_name
for some_other_activity in app.activities.values()
for activity_input in some_other_activity.inputs or []
)
]
# Collect outputs from terminal activities
outputs = {
activity_name: [job.output for job in self.activity_jobs[app_job.id].get(activity_name, [])]
for activity_name in terminal_activities
}
# Remove empty values and return
return {key: value for key, value in outputs.items() if value}
# Path: aq/jobs/manager.py
class AppJobError(Exception):
pass
# Path: aq/activities/read.py
class ReadActivity(BaseActivity):
def __init__(self, pdf_reader: PdfReader, file_reader: FileReader, image_reader: ImageReader):
self._logger = logging.getLogger(self.__class__.__name__)
self._handlers = {
"application/pdf": pdf_reader,
"application/json": file_reader,
"text/plain": file_reader,
"text/markdown": file_reader,
"image/jpeg": image_reader,
"image/jpg": image_reader,
"image/png": image_reader
}
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
file_path = inputs.get("file_path")
if not file_path or not os.path.exists(file_path):
raise ActivityError(f"Invalid file path: {file_path}")
content_type = mimetypes.guess_type(file_path)[0]
handler = self._handlers.get(content_type)
if handler:
content = await handler.read(file_path)
else:
raise ActivityError(f"Cannot read content of type: {content_type}")
# Add the file path to the app context
app_job = activity_job.app_job
app_job.context["file_path"] = file_path
activity_job.state = JobState.SUCCESS
activity_job.output = content
activity_job.output_type = "text/plain"
except Exception as e:
activity_job.state = JobState.ERROR
activity_job.output = str(e)
self._logger.error(f"Encountered an error {e}")
# Path: aq/activities/write.py
class WriteActivity(BaseActivity):
HTML_TEMPLATE = """<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<style>
.content {
width: 80%%;
margin: auto;
line-height: 1.4rem;
font-family: Helvetica, Arial, sans-serif;
font-size: 0.9rem;
}
</style>
</head>
<body>
<div class="content">
%s
</div>
</body>
</html>"""
def __init__(self):
self._logger = logging.getLogger(self.__class__.__name__)
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
# Collate the inputs
output_format = activity.parameters.get("format", "md")
template = activity.parameters.get("template", None)
if template:
text = self.render(template, inputs)
elif output_format == "json":
text = self.merge_inputs_json(inputs, indent=2)
else:
text = self.merge_inputs(inputs)
# Compute the file prefix based on the original file name
original_file_path = activity_job.app_job.context.get("file_path", None)
if original_file_path:
base_name = os.path.basename(original_file_path)
file_prefix, _ = os.path.splitext(base_name)
else:
file_prefix = "out"
# Apply formatting
if output_format == "html":
text = self.HTML_TEMPLATE % markdown.markdown(text, tab_length=2)
# Write content to file
file_name = self.generate_temp_filename(file_prefix, output_format)
# Create the out directory
path = "./out"
if not os.path.exists(path):
os.makedirs(path)
file_path = os.path.join(path, file_name)
async with aiofiles.open(file_path, mode='w', encoding='utf-8') as file:
await file.write(text)
activity_job.state = JobState.SUCCESS
activity_job.output = file_path
except Exception as e:
self._logger.error(e)
activity_job.state = JobState.ERROR
activity_job.output = str(e)
# Path: aq/activities/generate.py
class GenerateActivity(BaseActivity):
TOOL_NAME_DELIMITER = "__"
def __init__(self, provider_manager: ProviderManager, tool_manager: ToolManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._provider_manager = provider_manager
self._tool_manager = tool_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
if len(activity.models) < 1:
raise ActivityError(f"A model is required")
model = app.models[activity.models[0]]
temperature = float(activity.parameters.get("temperature", model.parameters.get("temperature", 0.5)))
max_tokens = int(activity.parameters.get("max_words", model.parameters.get("max_words", 500))*4/3)
messages = []
profile = app.info.profile
if profile:
messages.append(ChatCompletionMessage(role="system", content=profile))
json_format = activity.parameters.get("format", None) == "json"
if json_format:
messages.append(ChatCompletionMessage(
role="system",
content="Provide your response as a JSON object."))
else:
messages.append(ChatCompletionMessage(
role="system",
content="Use the tab length of two spaces when formatting nested lists in markdown."))
tools = await self.get_tools(app, activity)
if tools:
messages.append(ChatCompletionMessage(
role="system",
content="Think step-by-step. Perform as many iterations as necessary "
"to accomplish your goal using the tools provided."))
prompt_template = activity.parameters["prompt"]
prompt = self.render_prompt(prompt_template, inputs)
messages.append(ChatCompletionMessage(role="user", content=prompt))
parts = []
start_time = time.perf_counter()
provider = self._provider_manager.get_provider(model.provider)
for x in range(self.MAX_ITERATIONS):
request = ChatCompletionRequest(
model=model.model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
tools=tools if tools else None,
tool_choice="auto" if tools else None,
response_format=ResponseFormat(type="json_object") if json_format else None
)
response = await provider.create_completion(request)
choice: Choice = response.choices[0]
message: ChatCompletionMessage = choice.message
messages.append(message)
if choice.finish_reason == "tool_calls":
for tool_call in message.tool_calls:
tool_result = await self.process_tool_call(tool_call, app)
messages.append(tool_result)
else:
if message.content:
parts.append(message.content)
if choice.finish_reason:
self._logger.debug(f"Finished with reason {choice.finish_reason} "
f"in {int(time.perf_counter()-start_time)} sec.")
break
activity_job.state = JobState.SUCCESS
activity_job.output = "\n\n".join(parts)
activity_job.output_type = "text/markdown"
except Exception as e:
self._logger.error(e)
activity_job.state = JobState.ERROR
activity_job.output = str(e)
async def get_tools(self, app: App, activity: Activity) -> List[Tool]:
tools = []
if activity.tools:
for tool_name in activity.tools:
tool_def = app.tools[tool_name]
tool_obj = self._tool_manager.get_tool(tool_def.type)
metadata = await tool_obj.get_metadata(tool_def)
for tool in metadata:
func = tool.function
func.name = f"{tool_name}{self.TOOL_NAME_DELIMITER}{func.name}"
tools.append(tool)
return tools
async def process_tool_call(self, tool_call: ToolCall, app: App) -> ChatCompletionMessage:
self._logger.debug(f"Calling {tool_call.function.name}")
names = tool_call.function.name.split(self.TOOL_NAME_DELIMITER)
if len(names) < 2:
raise ActivityError(f"Invalid tool name {tool_call.function.name}")
if names[0] not in app.tools:
raise ActivityError(f"{names[0]} is not a valid tool name")
tool_def = app.tools[names[0]]
tool_obj = self._tool_manager.get_tool(tool_def.type)
arguments = json.loads(tool_call.function.arguments)
response = await tool_obj.invoke(names[1], arguments, tool_def)
return ChatCompletionMessage(
role="tool",
tool_call_id=tool_call.id,
name=tool_call.function.name,
content=response
)
# Path: aq/activities/summarize.py
class SummarizeActivity(BaseActivity):
def __init__(self, provider_manager: ProviderManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._provider_manager = provider_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
# Get the text for summarization
text = self.merge_inputs(inputs)
if not text:
raise ActivityError(f"Text is required")
# Get the model
if len(activity.models) < 1:
raise ActivityError(f"A model is required")
model = app.models[activity.models[0]]
# Get the model parameters
sentences = int(activity.parameters.get("sentences", model.parameters.get("sentences", 10)))
temperature = float(activity.parameters.get("temperature", model.parameters.get("temperature", 0.5)))
summary = await self.summarize(app.info.profile, text, model.provider,
model.model, sentences, temperature)
activity_job.output = summary
activity_job.state = JobState.SUCCESS
activity_job.output_type = "text/markdown"
except Exception as e:
activity_job.state = JobState.ERROR
activity_job.output = str(e)
self._logger.error(f"Encountered an error {e}")
async def summarize(self, context: str, text: str,
provider_type: ModelProvider, model: str,
sentences: int, temperature: float) -> str:
messages = []
if context:
messages.append(ChatCompletionMessage(role="system", content=context))
prompt = """
Summarize a block of text provided inside triple back ticks.
Your summary must be readable.
Your summary must include about %(sentences)d sentences.
```%(text)s```
"""
prompt = prompt % {"text": text, "sentences": sentences}
messages.append(ChatCompletionMessage(role="user", content=prompt))
request = ChatCompletionRequest(
model=model,
messages=messages,
temperature=temperature
)
provider = self._provider_manager.get_provider(provider_type)
start_time = time.perf_counter()
response = await provider.create_completion(request)
choice: Choice = response.choices[0]
message: ChatCompletionMessage = choice.message
self._logger.debug(f"Finished with reason {choice.finish_reason} "
f"in {int(time.perf_counter()-start_time)} sec.")
return message.content
# Path: aq/activities/extract.py
class ExtractActivity(BaseActivity):
def __init__(self, provider_manager: ProviderManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._provider_manager = provider_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
if len(activity.models) < 1:
raise ActivityError("A model is required")
model = app.models[activity.models[0]]
schema = activity.parameters.get("schema", None)
if not schema:
raise ActivityError("A schema is required")
if not isinstance(schema, list):
raise ActivityError("A schema must be a list of types")
tools = []
for schema_def in schema:
func_def = Function(**schema_def)
tools.append(Tool(function=func_def))
messages = []
profile = app.info.profile
if profile:
messages.append(ChatCompletionMessage(role="system", content=profile))
messages.append(ChatCompletionMessage(role="system", content="""
Use the tools provided with information extracted from the user prompt.
Use the entire prompt as the source of information. Do not exclude or omit anything.
"""))
messages.append(ChatCompletionMessage(role="user", content=self.merge_inputs(inputs)))
values: Dict[str, List[Any]] = {}
provider = self._provider_manager.get_provider(model.provider)
for x in range(self.MAX_ITERATIONS):
request = ChatCompletionRequest(
model=model.model,
messages=messages,
temperature=0.0,
tools=tools,
tool_choice="auto"
)
response = await provider.create_completion(request)
choice: Choice = response.choices[0]
message: ChatCompletionMessage = choice.message
messages.append(message)
if choice.finish_reason == "tool_calls":
for tool_call in message.tool_calls:
function_call = tool_call.function
values_for_type = values.get(function_call.name, None)
if not values_for_type:
values_for_type = []
values[function_call.name] = values_for_type
values_for_type.append(json.loads(function_call.arguments))
messages.append(ChatCompletionMessage(
role="tool",
tool_call_id=tool_call.id,
name=function_call.name,
content="Success"
))
elif choice.finish_reason:
break
activity_job.state = JobState.SUCCESS
activity_job.output = json.dumps(values)
activity_job.output_type = "application/json"
except Exception as e:
self._logger.error(e)
activity_job.state = JobState.ERROR
activity_job.output = str(e)
# Path: aq/activities/store.py
class StoreActivity(BaseActivity):
def __init__(self, memory_manager: MemoryManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._memory_manager = memory_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
if not app.memory:
raise ActivityError("A memory repository is required for this app")
activity = app.activities[activity_job.activity_name]
if not activity.memory:
raise ActivityError("A memory repository is required for this activity")
memory_def = app.memory[activity.memory[0]]
memory_repository = self._memory_manager.get_repository(memory_def.type)
file_id = activity_job.app_job.context.get("file_path", str(uuid.uuid4()))
chunk_size = activity.parameters.get("chunk_size", memory_def.parameters.get("chunk_size", 2000))
text = self.merge_inputs(inputs)
chunks = memory_repository.store(memory_def, app.info.id, file_id, text, chunk_size)
activity_job.state = JobState.SUCCESS
activity_job.output = str(chunks)
activity_job.output_type = "text/plain"
except Exception as e:
activity_job.state = JobState.ERROR
activity_job.output = str(e)
self._logger.error(f"Encountered an error {e}")
# Path: aq/activities/retrieve.py
class RetrieveActivity(BaseActivity):
def __init__(self, memory_manager: MemoryManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._memory_manager = memory_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
if not app.memory:
raise ActivityError("A memory repository is required for this app")
activity = app.activities[activity_job.activity_name]
if not activity.memory:
raise ActivityError("A memory repository is required for this activity")
memory_def = app.memory[activity.memory[0]]
memory_repository = self._memory_manager.get_repository(memory_def.type)
n_results = activity.parameters.get("n_results", 3)
chunks = memory_repository.retrieve(memory_def, app.info.id, self.merge_inputs(inputs), n_results)
activity_job.state = JobState.SUCCESS
activity_job.output = "\n\n".join(chunks)
activity_job.output_type = "text/plain"
except Exception as e:
activity_job.state = JobState.ERROR
activity_job.output = str(e)
self._logger.error(f"Encountered an error {e}")
# Path: aq/activities/function.py
class FunctionActivity(BaseActivity):
def __init__(self):
self._logger = logging.getLogger(self.__class__.__name__)
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
activity_job.state = JobState.SUCCESS
activity_job.output = self.merge_inputs(inputs)
activity_job.output_type = "text/plain"
# Path: aq/activities/function.py
class ReturnActivity(BaseActivity):
def __init__(self):
self._logger = logging.getLogger(self.__class__.__name__)
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
activity_job.state = JobState.SUCCESS
activity_job.output = self.merge_inputs_json(inputs)
activity_job.output_type = "application/json"
except Exception as e:
activity_job.state = JobState.ERROR
activity_job.output = str(e)
self._logger.error(f"Encountered an error {e}")
# Path: aq/types/app.py
class ActivityType(Enum):
ANY = "any"
READ = "read"
WRITE = "write"
STORE = "store"
RETRIEVE = "retrieve"
SUMMARIZE = "summarize"
EXTRACT = "extract"
GENERATE = "generate"
FUNCTION = "function"
CALL = "call"
RETURN = "return"
# Path: aq/types/job.py
class ActivityJob:
id: str
activity_name: str
app_job: AppJob
state: JobState
output: str
output_type: str = "text/plain"
def __init__(self, activity_name: str, app_job: AppJob):
self.id = str(uuid.uuid4())
self.activity_name = activity_name
self.app_job = app_job
self.state = JobState.CREATED
self.output = ""
@property
def finished(self) -> bool:
return self.state == JobState.SUCCESS or self.state == JobState.ERROR
# Path: aq/types/job.py
class JobState(Enum):
ANY = 0,
CREATED = 1,
RUNNING = 2,
SUCCESS = 3
ERROR = 4
# Path: aq/jobs/scheduler.py
import asyncio
import logging
import time
from typing import Dict, Any
from .manager import JobManager, AppJobError
from ..activities import (
ReadActivity,
WriteActivity,
SummarizeActivity,
GenerateActivity,
ExtractActivity,
StoreActivity,
RetrieveActivity,
FunctionActivity,
ReturnActivity
)
from ..types import ActivityType, ActivityJob, JobState
class WorkItem:
job: ActivityJob
inputs: Dict[str, Any]
def __init__(self, job: ActivityJob, inputs: Dict[str, Any]) -> None:
self.job = job
self.inputs = inputs
class JobScheduler:
def __init__(self, config: Dict[str, Any], job_manager: JobManager,
read_activity: ReadActivity, write_activity: WriteActivity,
summarize_activity: SummarizeActivity, generate_activity: GenerateActivity,
extract_activity: ExtractActivity, store_activity: StoreActivity, retrieve_activity: RetrieveActivity,
function_activity: FunctionActivity, return_activity: ReturnActivity):
self._config = config
self._logger = logging.getLogger(self.__class__.__name__)
logging.getLogger('asyncio').setLevel(logging.ERROR)
self._queue = asyncio.Queue()
self._workers = []
self._job_manager = job_manager
self._activity_handlers = {
ActivityType.READ: read_activity,
ActivityType.WRITE: write_activity,
ActivityType.SUMMARIZE: summarize_activity,
ActivityType.GENERATE: generate_activity,
ActivityType.EXTRACT: extract_activity,
ActivityType.STORE: store_activity,
ActivityType.RETRIEVE: retrieve_activity,
ActivityType.FUNCTION: function_activity,
ActivityType.RETURN: return_activity
}
async def start_workers(self):
num_workers = self._config.get("workers", 3)
self._workers = [asyncio.create_task(self.consume(n)) for n in range(num_workers)]
async def schedule(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
item = WorkItem(activity_job, inputs)
await self._queue.put(item)
async def consume(self, name: int) -> None:
while True:
item = await self._queue.get()
self._logger.debug(f"Worker {name} performing {item.job.activity_name}")
start_time = time.perf_counter()
await self.perform(item)
self._logger.debug(f"Finished {item.job.activity_name} in {int(time.perf_counter()-start_time)} sec.")
self._queue.task_done()
async def perform(self, item: WorkItem) -> None:
item.job.state = JobState.RUNNING
app = item.job.app_job.app
app_job = item.job.app_job
| activity = app.activities[item.job.activity_name] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: multimodallearning/DG-TTA
# Path: dg_tta/utils.py
def disable_internal_augmentation():
os.environ["DG_TTA_INTERNAL_AUGMENTATION"] = "false"
# Path: dg_tta/gin.py
def gin_aug(input):
cfg = dict(
IN_CHANNELS=1,
N_LAYER=4,
INTERM_CHANNELS=2,
)
gin_group_conv = GINGroupConv(cfg)
input = gin_group_conv(input)
return input
# Path: dg_tta/tta/torch_utils.py
def get_batch(tensor_list, batch_idx, patch_size, fixed_patch_idx=None, device="cpu"):
assert (
fixed_patch_idx in range(8)
or fixed_patch_idx == None
or fixed_patch_idx == "center"
)
device = torch.device(device)
B = len(batch_idx)
b_img = []
b_label = []
with torch.no_grad():
for b in range(B):
# Get patches
data = tensor_list[batch_idx[b]]
if fixed_patch_idx is None:
rand_patch_d = torch.randint(
max(data.shape[1] - patch_size[0], 0), (1,)
)
rand_patch_h = torch.randint(
max(data.shape[2] - patch_size[1], 0), (1,)
)
rand_patch_w = torch.randint(
max(data.shape[3] - patch_size[2], 0), (1,)
)
elif fixed_patch_idx == "center":
rand_patch_d = max((data.shape[1] - patch_size[0]) // 2, 0)
rand_patch_h = max((data.shape[2] - patch_size[1]) // 2, 0)
rand_patch_w = max((data.shape[3] - patch_size[2]) // 2, 0)
else:
p_idxs = f"{fixed_patch_idx:03b}"
p_idxs = [int(idx) for idx in [*p_idxs]]
rand_patch_d = p_idxs[0] * patch_size[0]
rand_patch_h = p_idxs[1] * patch_size[1]
rand_patch_w = p_idxs[2] * patch_size[2]
# print(rand_patch_d, rand_patch_h, rand_patch_w)
out_shape = (
1,
1,
max(data.shape[1], patch_size[0]),
max(data.shape[2], patch_size[1]),
max(data.shape[3], patch_size[2]),
)
grid = F.affine_grid(
torch.eye(3, 4).unsqueeze(0).to(device), out_shape, align_corners=False
)
patch_grid = grid[
:,
rand_patch_d : rand_patch_d + patch_size[0],
rand_patch_h : rand_patch_h + patch_size[1],
rand_patch_w : rand_patch_w + patch_size[2],
]
# Grid sample based patching (only useful if patch is augmented here)
# b_img.append(F.grid_sample(
# data[0:1].unsqueeze(0).to(device), patch_grid, align_corners=False
# ))
# Cut based patching
b_img.append(
data[0:1].unsqueeze(0)[
:,
:,
rand_patch_d : rand_patch_d + patch_size[0],
rand_patch_h : rand_patch_h + patch_size[1],
rand_patch_w : rand_patch_w + patch_size[2]
].to(device)
)
if data[1:].numel() == 0:
# No GT label is available for this sample
b_label.append(None)
else:
# Grid sample based patching (only useful if patch is augmented here)
# b_label.append(
# get_argmaxed_segs(
# F.grid_sample(
# data[1:].to(torch.float16).unsqueeze(0).to(device),
# patch_grid.to(torch.float16),
# align_corners=False,
# mode="nearest",
# )
# )
# )
# Cut based patching
b_label.append(
get_argmaxed_segs(
data[1:].unsqueeze(0)[
:,
:,
rand_patch_d : rand_patch_d + patch_size[0],
rand_patch_h : rand_patch_h + patch_size[1],
rand_patch_w : rand_patch_w + patch_size[2]
].to(device)
)
)
return b_img, b_label
# Path: dg_tta/tta/torch_utils.py
def map_label(label, map_idxs, input_format):
assert input_format in ["logits", "argmaxed"]
if input_format == "logits":
# We have a non argmaxed map, suppose that C dimension is label dimension
mapped_label = label
# Swap B,C and subselect
mapped_label = mapped_label.transpose(0, 1)[map_idxs].transpose(0, 1)
else:
mapped_label = torch.zeros_like(label)
for lbl_idx, map_idx in enumerate(map_idxs):
mapped_label[label == map_idx] = lbl_idx
return mapped_label
# Path: dg_tta/tta/torch_utils.py
def dice_coeff(outputs, labels, max_label):
dice = torch.FloatTensor(max_label - 1).fill_(0)
for label_num in range(1, max_label):
iflat = (outputs == label_num).view(-1).float()
tflat = (labels == label_num).view(-1).float()
intersection = torch.mean(iflat * tflat)
dice[label_num - 1] = (2.0 * intersection) / (
1e-8 + torch.mean(iflat) + torch.mean(tflat)
)
return dice
# Path: dg_tta/tta/torch_utils.py
def soft_dice_loss(smp_a, smp_b):
B, _, D, H, W = smp_a.shape
d = 2
nominator = (2.0 * smp_a * smp_b).reshape(B, -1, D * H * W).mean(2)
denominator = 1 / d * ((smp_a + smp_b) ** d).reshape(B, -1, D * H * W).mean(2)
if denominator.sum() == 0.0:
dice = (nominator * 0.0) + 1.0
else:
dice = (
nominator / denominator
) # Do not add an eps here, it disturbs the consistency
return dice
# Path: dg_tta/tta/torch_utils.py
def fix_all(m):
for p in m.parameters():
p.requires_grad_(False)
# Path: dg_tta/tta/torch_utils.py
def release_all(m):
for p in m.parameters():
p.requires_grad_(True)
# Path: dg_tta/tta/torch_utils.py
def release_norms(m):
if (
"instancenorm" in m.__class__.__name__.lower()
or "batchnorm" in m.__class__.__name__.lower()
):
print("Released", m.__class__.__name__)
for p in m.parameters():
p.requires_grad_(True)
# Path: dg_tta/tta/torch_utils.py
def get_map_idxs(label_mapping: dict, optimized_labels: list, input_type):
assert input_type in ["pretrain_labels", "tta_labels"]
assert optimized_labels[0] == "background"
# Generate idxs from label_mapping dict
map_idxs_list = []
for reduced_idx, eval_label in enumerate(optimized_labels):
src_idx, target_idx = label_mapping[eval_label]
# map_idxs_list = [tts_dict[k] for k,v in amos_bcv_dict.items()]
append_idx = src_idx if input_type == "pretrain_labels" else target_idx
map_idxs_list.append(append_idx)
map_idxs = torch.as_tensor(map_idxs_list)
return map_idxs
# Path: dg_tta/tta/torch_utils.py
def get_imgs(tta_sample):
imgs = tta_sample[:, 0:1]
return imgs
# Path: dg_tta/tta/augmentation_utils.py
def get_disp_field(
batch_num, size_3d, factor=0.1, interpolation_factor=5, device="cpu"
):
field = get_rf_field(
batch_num,
size_3d,
alternating_fields=False,
num_fields=3,
interpolation_factor=interpolation_factor,
device=device,
)
STEPS = 5
disp_field, inverse_disp_field = calc_consistent_diffeomorphic_field(
field * factor, torch.zeros_like(field), STEPS, ensure_inverse_consistency=True
)
return disp_field.permute(0, 2, 3, 4, 1), inverse_disp_field.permute(0, 2, 3, 4, 1)
# Path: dg_tta/tta/augmentation_utils.py
def get_rand_affine(batch_size, strength=0.05, flip=False):
affine = torch.cat(
(
torch.randn(batch_size, 3, 4) * strength + torch.eye(3, 4).unsqueeze(0),
torch.tensor([0, 0, 0, 1]).view(1, 1, 4).repeat(batch_size, 1, 1),
),
1,
)
if flip:
flip_affine = torch.diag(
torch.cat([(2 * (torch.rand(3) > 0.5).float() - 1), torch.tensor([1.0])])
)
affine = affine @ flip_affine
return affine[:, :3], affine.inverse()[:, :3]
# Path: dg_tta/tta/config_log_utils.py
def get_global_idx(list_of_tuple_idx_max):
# Get global index e.g. 2250 for ensemble_idx=2, epoch_idx=250 @ max_epochs<1000
global_idx = 0
next_multiplier = 1
# Smallest identifier tuple last!
for idx, max_of_idx in reversed(list_of_tuple_idx_max):
global_idx = global_idx + next_multiplier * idx
next_multiplier = next_multiplier * 10 ** len(str(int(max_of_idx)))
return global_idx
# Path: dg_tta/tta/config_log_utils.py
def wandb_run_is_available():
return (
importlib.util.find_spec("wandb") is not None
and wandb.run is not None
and not wandb.run.disabled
)
# Path: dg_tta/tta/config_log_utils.py
def plot_run_results(save_path, sample_id, ensemble_idx, tta_losses, eval_dices):
fig, ax_one = plt.subplots()
ax_two = ax_one.twinx()
cmap = get_dgtta_colormap()
c1, c2 = cmap(0.0), cmap(0.8)
ax_one.plot(tta_losses, label="loss", c=c1)
ax_one.set_yticks([tta_losses.min(), tta_losses.max()])
ax_one.set_xlim(0, len(tta_losses) - 1)
ax_one.set_ylabel("Soft-Dice Loss", c=c1)
ax_one.tick_params(axis="y", colors=c1)
ax_one.set_xlabel("TTA Epoch")
ax_one.grid(axis="y", linestyle="--", linewidth=0.5)
ax_one.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.3f"))
ax_two.plot(eval_dices * 100, label="eval_dices", c=c2)
ax_two.set_yticks([eval_dices.min() * 100, eval_dices.max() * 100])
ax_two.set_ylabel("Pseudo-Dice in %", c=c2)
ax_two.tick_params(axis="y", colors=c2)
ax_two.yaxis.set_major_formatter(matplotlib.ticker.FormatStrFormatter("%.1f"))
fig.suptitle(f"{sample_id} (ensemble_idx={ensemble_idx})")
split_sample_id = sample_id.split('/')[-1]
tta_plot_save_path = (
save_path / f"{split_sample_id}__ensemble_idx_{ensemble_idx}_tta_results.png"
)
fig.savefig(tta_plot_save_path)
fig.tight_layout()
plt.close(fig)
# Path: dg_tta/tta/config_log_utils.py
def get_parameters_save_path(save_path, sample_id, ensemble_idx):
sample_id = sample_id.split('/')[-1]
tta_parameters_save_path = (
save_path / f"{sample_id}__ensemble_idx_{ensemble_idx}_tta_parameters.pt"
)
return tta_parameters_save_path
# Path: dg_tta/tta/model_utils.py
def get_model_from_network(network, modifier_fn_module, parameters=None):
model = deepcopy(network)
if parameters is not None:
if not isinstance(model, OptimizedModule):
model.load_state_dict(parameters[0])
else:
model._orig_mod.load_state_dict(parameters[0])
# Register hook that modifies the input prior to custom augmentation
modify_tta_input_fn = modifier_fn_module.ModifierFunctions.modify_tta_input_fn
register_forward_pre_hook_at_beginning(
model, hookify(modify_tta_input_fn, "forward_pre_hook")
)
# Register hook that modifies the output of the model
modfify_tta_model_output_fn = (
modifier_fn_module.ModifierFunctions.modfify_tta_model_output_fn
)
register_forward_hook_at_beginning(
model, hookify(modfify_tta_model_output_fn, "forward_hook")
)
return model
# Path: dg_tta/tta/model_utils.py
def buffer_running_stats(m):
_id = id(m)
if (
hasattr(m, "running_mean")
and hasattr(m, "running_var")
and not _id in running_stats_buffer
):
if m.running_mean is not None and m.running_var is not None:
running_stats_buffer[_id] = [m.running_mean.data, m.running_var.data]
# Path: dg_tta/tta/model_utils.py
def apply_running_stats(m):
_id = id(m)
if (
hasattr(m, "running_mean")
and hasattr(m, "running_var")
and _id in running_stats_buffer
):
m.running_mean.data.copy_(other=running_stats_buffer[_id][0])
m.running_var.data.copy_(
other=running_stats_buffer[_id][1]
) # Copy into .data to prevent backprop errors
del running_stats_buffer[_id]
# Path: dg_tta/tta/nnunet_utils.py
def run_inference(tta_sample, model, predictor, all_tta_parameter_paths):
save_probabilities = False
tta_parameters = []
for _path in all_tta_parameter_paths:
tta_parameters.extend(torch.load(_path))
predictor.network = deepcopy(model)
predictor.list_of_parameters = tta_parameters
return predict_from_data_iterator(predictor, tta_sample, save_probabilities)
# Path: dg_tta/tta/nnunet_utils.py
def load_network(weights_file, device):
pretrained_weights_filepath = Path(*Path(weights_file).parts[:-2])
fold = Path(weights_file).parts[-2].replace("fold_", "")
use_folds = [int(fold)] if fold.isnumeric() else fold
checkpoint_name = Path(weights_file).parts[-1]
configuration = Path(weights_file).parts[-3].split("__")[-1]
perform_everything_on_gpu = True
verbose = False
predictor = nnUNetPredictor(
perform_everything_on_gpu=perform_everything_on_gpu,
device=device,
verbose_preprocessing=verbose,
)
predictor.initialize_from_trained_model_folder(
pretrained_weights_filepath, use_folds, checkpoint_name
)
parameters = predictor.list_of_parameters
plans_manager = predictor.plans_manager
network = predictor.network
patch_size = plans_manager.get_configuration(configuration).patch_size
return predictor, patch_size, network, parameters
# Path: dg_tta/tta/nnunet_utils.py
def load_tta_data(config, dataset_raw_path, predictor, tta_across_all_samples=False):
with suppress_stdout():
ts_iterator, ts_data_len = get_data_iterator(
config,
predictor,
config["tta_data_filepaths"],
dataset_raw_path,
"imagesTs",
)
tr_iterator, tr_data_len = get_data_iterator(
config,
predictor,
config["tta_data_filepaths"],
dataset_raw_path,
"imagesTr",
)
if tta_across_all_samples:
data = list(ts_iterator) + list(tr_iterator), ts_data_len + tr_data_len
else:
data = chain(ts_iterator, tr_iterator), ts_data_len + tr_data_len
return data
# Path: dg_tta/tta/tta.py
import re
import importlib
import shutil
import json
import json
import torch
import torch.nn.functional as F
import wandb
from itertools import tee
from pathlib import Path
from contextlib import nullcontext
from tqdm import trange, tqdm
from nnunetv2.evaluation.evaluate_predictions import compute_metrics_on_folder_simple
from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
from dg_tta.utils import disable_internal_augmentation
from dg_tta.gin import gin_aug
from dg_tta.tta.torch_utils import (
get_batch,
map_label,
dice_coeff,
soft_dice_loss,
fix_all,
release_all,
release_norms,
get_map_idxs,
get_imgs,
)
from dg_tta.tta.augmentation_utils import get_disp_field, get_rand_affine
from dg_tta.tta.config_log_utils import (
get_global_idx,
wandb_run_is_available,
plot_run_results,
get_parameters_save_path,
)
from dg_tta.tta.model_utils import (
get_model_from_network,
buffer_running_stats,
apply_running_stats,
)
from dg_tta.tta.nnunet_utils import (
run_inference,
load_network,
load_tta_data,
)
for epoch in tbar:
model.train()
global_idx = get_global_idx(
[
(smp_idx, num_samples),
(ensemble_idx, ensemble_count),
(epoch, num_epochs),
]
)
if wandb_run_is_available():
wandb.log({"ref_epoch_idx": epoch}, global_idx)
step_losses = []
if epoch == start_tta_at_epoch:
model.apply(fix_all)
if config["params_with_grad"] == "all":
model.apply(release_all)
elif config["params_with_grad"] == "norms":
model.apply(release_norms)
elif config["params_with_grad"] == "encoder":
model.encoder.apply(release_all)
else:
raise ValueError()
grad_params = {
id(p): p.numel() for p in model.parameters() if p.requires_grad
}
tqdm.write(
f"Released #{sum(list(grad_params.values()))/1e6:.2f} million trainable params"
)
for _ in range(patches_to_be_accumulated):
with torch.no_grad():
imgs, _ = get_batch(
tta_tens_list,
torch.randperm(len(tta_tens_list))[:B],
patch_size,
fixed_patch_idx=None,
device=device,
)
imgs = torch.cat(imgs, dim=0)
target_a = calc_branch(
"branch_a",
config,
model,
intensity_aug_func,
identity_grid,
patch_size,
B,
label_mapping,
optimized_labels,
modifier_fn_module,
imgs,
device,
)
target_b = calc_branch(
"branch_b",
config,
model,
intensity_aug_func,
identity_grid,
patch_size,
B,
label_mapping,
optimized_labels,
modifier_fn_module,
imgs,
device,
)
# Apply consistency loss
common_content_mask = (
target_a.sum(1, keepdim=True) > 0.0
).float() * (target_b.sum(1, keepdim=True) > 0.0).float()
sm_a = target_a.softmax(1) * common_content_mask
sm_b = target_b.softmax(1) * common_content_mask
loss = 1 - soft_dice_loss(sm_a, sm_b)[:, START_CLASS:].mean()
loss_accum = loss / patches_to_be_accumulated
step_losses.append(loss.detach().cpu())
if epoch >= start_tta_at_epoch:
loss_accum.backward()
if epoch >= start_tta_at_epoch:
optimizer.step()
optimizer.zero_grad()
tta_losses[epoch] = torch.stack(step_losses).mean().item()
with torch.inference_mode():
model.eval()
for _ in range(tta_eval_patches):
imgs, labels = get_batch(
tta_tens_list,
torch.randperm(len(tta_tens_list))[:B],
patch_size,
fixed_patch_idx="center", # This is just for evaluation purposes
device=device,
)
imgs = torch.cat(imgs, dim=0)
none_labels = [l is None for l in labels]
filtered_imgs = imgs[~torch.as_tensor(none_labels)]
filtered_labels = [
l for flag, l in zip(none_labels, labels) if not flag
]
if len(filtered_imgs) == 0:
eval_dices[epoch] = float("nan")
continue
else:
filtered_labels = torch.cat(filtered_labels, dim=0)
output_eval = model(filtered_imgs)
if isinstance(output_eval, tuple):
| output_eval = output_eval[0] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tommy-xq/SA2VP
# Path: vpt_main/src/models/vit_prompt/vit.py
class PromptedVisionTransformer(VisionTransformer):
def __init__(
self, prompt_cfg, model_type,
img_size=224, num_classes=21843, vis=False
):
assert prompt_cfg.VIT_POOL_TYPE == "original"
super(PromptedVisionTransformer, self).__init__(
model_type, img_size, num_classes, vis)
if prompt_cfg is None:
raise ValueError("prompt_cfg cannot be None if using PromptedVisionTransformer")
self.prompt_cfg = prompt_cfg
vit_cfg = CONFIGS[model_type]
self.transformer = PromptedTransformer(
prompt_cfg, vit_cfg, img_size, vis)
def forward(self, x, vis=False):
x, attn_weights = self.transformer(x)
x = x[:, 0]
logits = self.head(x)
if not vis:
return logits
return logits, attn_weights
# Path: vpt_main/src/models/vit_prompt/swin_transformer.py
class PromptedSwinTransformer(SwinTransformer):
def __init__(
self, prompt_config, img_size=224, patch_size=4, in_chans=3,
num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4., qkv_bias=True,
qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, **kwargs
):
if prompt_config.LOCATION == "pad":
img_size += 2 * prompt_config.NUM_TOKENS
super(PromptedSwinTransformer, self).__init__(
img_size, patch_size, in_chans, num_classes, embed_dim, depths,
num_heads, window_size, mlp_ratio, qkv_bias, qk_scale, drop_rate,
attn_drop_rate, drop_path_rate, norm_layer, ape, patch_norm,
use_checkpoint, **kwargs
)
self.prompt_config = prompt_config
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
if self.prompt_config.LOCATION == "add":
num_tokens = self.embeddings.position_embeddings.shape[1]
elif self.prompt_config.LOCATION == "add-1":
num_tokens = 1
else:
num_tokens = self.prompt_config.NUM_TOKENS
self.prompt_dropout = Dropout(self.prompt_config.DROPOUT)
# if project the prompt embeddings
if self.prompt_config.PROJECT > -1:
# only for prepend / add
prompt_dim = self.prompt_config.PROJECT
self.prompt_proj = nn.Linear(
prompt_dim, embed_dim)
nn.init.kaiming_normal_(
self.prompt_proj.weight, a=0, mode='fan_out')
else:
self.prompt_proj = nn.Identity()
# build layers
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
input_resolution=(
self.patches_resolution[0] // (2 ** i_layer),
self.patches_resolution[1] // (2 ** i_layer)
),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
block_module=PromptedSwinTransformerBlock,
downsample=PromptedPatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
num_prompts=num_tokens,
prompt_location=self.prompt_config.LOCATION,
deep_prompt=self.prompt_config.DEEP
)
self.layers.append(layer)
if self.prompt_config.INITIATION == "random":
val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + embed_dim)) # noqa
if self.prompt_config.LOCATION == "below":
self.patch_embed.proj = Conv2d(
in_channels=num_tokens+3,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
)
# add xavier_uniform initialization
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
self.prompt_embeddings = nn.Parameter(torch.zeros(
1, num_tokens, img_size[0], img_size[1]))
nn.init.uniform_(self.prompt_embeddings.data, -val, val)
elif self.prompt_config.LOCATION == "pad":
self.prompt_embeddings_tb = nn.Parameter(torch.zeros(
1, 3, 2 * num_tokens, img_size[0]
))
self.prompt_embeddings_lr = nn.Parameter(torch.zeros(
1, 3, img_size[0] - 2 * num_tokens, 2 * num_tokens
))
nn.init.uniform_(self.prompt_embeddings_tb.data, 0.0, 1.0)
nn.init.uniform_(self.prompt_embeddings_lr.data, 0.0, 1.0)
self.prompt_norm = tv.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
else:
# for "prepend"
self.prompt_embeddings = nn.Parameter(torch.zeros(
1, num_tokens, embed_dim))
nn.init.uniform_(self.prompt_embeddings.data, -val, val)
if self.prompt_config.DEEP:
# NOTE: only for 4 layers, need to be more flexible
self.deep_prompt_embeddings_0 = nn.Parameter(
torch.zeros(
depths[0] - 1, num_tokens, embed_dim
))
nn.init.uniform_(
self.deep_prompt_embeddings_0.data, -val, val)
self.deep_prompt_embeddings_1 = nn.Parameter(
torch.zeros(
depths[1], num_tokens, embed_dim * 2
))
nn.init.uniform_(
self.deep_prompt_embeddings_1.data, -val, val)
self.deep_prompt_embeddings_2 = nn.Parameter(
torch.zeros(
depths[2], num_tokens, embed_dim * 4
))
nn.init.uniform_(
self.deep_prompt_embeddings_2.data, -val, val)
self.deep_prompt_embeddings_3 = nn.Parameter(
torch.zeros(
depths[3], num_tokens, embed_dim * 8
))
nn.init.uniform_(
self.deep_prompt_embeddings_3.data, -val, val)
else:
raise ValueError("Other initiation scheme is not supported")
def incorporate_prompt(self, x):
# combine prompt embeddings with image-patch embeddings
B = x.shape[0]
if self.prompt_config.LOCATION == "prepend":
# after CLS token, all before image patches
x = self.get_patch_embeddings(x) # (batch_size, n_patches, hidden_dim)
prompt_embd = self.prompt_dropout(
self.prompt_embeddings.expand(B, -1, -1))
x = torch.cat((
prompt_embd, x
), dim=1)
# (batch_size, n_prompt + n_patches, hidden_dim)
elif self.prompt_config.LOCATION == "add":
# add to the input patches + CLS
# assert self.prompt_config.NUM_TOKENS == x.shape[1]
x = self.get_patch_embeddings(x) # (batch_size, 1 + n_patches, hidden_dim)
x = x + self.prompt_dropout(
self.prompt_embeddings.expand(B, -1, -1))
# (batch_size, n_patches, hidden_dim)
elif self.prompt_config.LOCATION == "add-1":
x = self.get_patch_embeddings(x) # (batch_size, 1 + n_patches, hidden_dim)
L = x.shape[1]
prompt_emb = self.prompt_dropout(
self.prompt_embeddings.expand(B, -1, -1))
x = x + prompt_emb.expand(-1, L, -1)
# (batch_size, cls_token + n_patches, hidden_dim)
elif self.prompt_config.LOCATION == "pad":
prompt_emb_lr = self.prompt_norm(
self.prompt_embeddings_lr).expand(B, -1, -1, -1)
prompt_emb_tb = self.prompt_norm(
self.prompt_embeddings_tb).expand(B, -1, -1, -1)
x = torch.cat((
prompt_emb_lr[:, :, :, :self.num_tokens],
x, prompt_emb_lr[:, :, :, self.num_tokens:]
), dim=-1)
x = torch.cat((
prompt_emb_tb[:, :, :self.num_tokens, :],
x, prompt_emb_tb[:, :, self.num_tokens:, :]
), dim=-2)
x = self.get_patch_embeddings(x) # (batch_size, n_patches, hidden_dim)
elif self.prompt_config.LOCATION == "below":
# (batch, 3, height, width)
x = torch.cat((
x,
self.prompt_norm(
self.prompt_embeddings).expand(B, -1, -1, -1),
), dim=1)
x = self.get_patch_embeddings(x)
# (batch_size, n_patches, hidden_dim)
else:
raise ValueError("Other prompt locations are not supported")
return x
def get_patch_embeddings(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
return x
def train(self, mode=True):
# set train status for this class: disable all but the prompt-related modules
if mode:
# training:
# first set all to eval and set the prompt to train later
for module in self.children():
module.train(False)
self.prompt_proj.train()
self.prompt_dropout.train()
else:
# eval:
for module in self.children():
module.train(mode)
def forward_features(self, x):
x = self.incorporate_prompt(x)
if self.prompt_config.LOCATION == "prepend" and self.prompt_config.DEEP:
for layer, deep_prompt_embd in zip(
self.layers, [
self.deep_prompt_embeddings_0,
self.deep_prompt_embeddings_1,
self.deep_prompt_embeddings_2,
self.deep_prompt_embeddings_3
]
):
deep_prompt_embd = self.prompt_dropout(deep_prompt_embd)
x = layer(x, deep_prompt_embd)
else:
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def load_state_dict(self, state_dict, strict):
if self.prompt_config.LOCATION == "below":
# modify state_dict first [768, 4, 16, 16]
conv_weight = state_dict["patch_embed.proj.weight"]
conv_weight = torch.cat(
(conv_weight, self.patch_embed.proj.weight[:, 3:, :, :]),
dim=1
)
state_dict["patch_embed.proj.weight"] = conv_weight
super(PromptedSwinTransformer, self).load_state_dict(state_dict, strict)
# Path: vpt_main/src/models/vit_prompt/vit_moco.py
def vit_base(prompt_cfg, **kwargs):
model = PromptedVisionTransformerMoCo(
prompt_cfg,
patch_size=16, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
# Path: vpt_main/src/models/vit_prompt/vit_mae.py
def build_model(model_type, prompt_cfg):
if "vitb" in model_type:
return vit_base_patch16(prompt_cfg)
elif "vitl" in model_type:
return vit_large_patch16(prompt_cfg)
elif "vith" in model_type:
return vit_huge_patch14(prompt_cfg)
# Path: vpt_main/src/models/vit_adapter/vit_mae.py
def build_model(model_type, adapter_cfg):
if "vitb" in model_type:
return vit_base_patch16(adapter_cfg)
elif "vitl" in model_type:
return vit_large_patch16(adapter_cfg)
elif "vith" in model_type:
return vit_huge_patch14(adapter_cfg)
# Path: vpt_main/src/models/vit_adapter/vit_moco.py
def vit_base(adapter_cfg, **kwargs):
model = ADPT_VisionTransformerMoCo(
adapter_cfg,
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
# Path: vpt_main/src/models/vit_adapter/vit.py
class ADPT_VisionTransformer(nn.Module):
def __init__(
self, model_type,
img_size=224, num_classes=21843, vis=False, adapter_cfg=None
):
super(ADPT_VisionTransformer, self).__init__()
config = CONFIGS[model_type]
self.num_classes = num_classes
self.classifier = config.classifier
self.transformer = ADPT_Transformer(config, img_size, vis, adapter_cfg)
self.head = Linear(config.hidden_size, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, vis=False):
x, attn_weights = self.transformer(x)
logits = self.head(x[:, 0])
if not vis:
return logits
return logits, attn_weights
def load_from(self, weights):
with torch.no_grad():
self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True))
self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"]))
self.transformer.embeddings.cls_token.copy_(np2th(weights["cls"]))
self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"]))
self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"]))
posemb = np2th(weights["Transformer/posembed_input/pos_embedding"])
posemb_new = self.transformer.embeddings.position_embeddings
if posemb.size() == posemb_new.size():
self.transformer.embeddings.position_embeddings.copy_(posemb)
else:
logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size()))
ntok_new = posemb_new.size(1)
if self.classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))
for bname, block in self.transformer.encoder.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=uname)
if self.transformer.embeddings.hybrid:
self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights["conv_root/kernel"], conv=True))
gn_weight = np2th(weights["gn_root/scale"]).view(-1)
gn_bias = np2th(weights["gn_root/bias"]).view(-1)
self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)
self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)
for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=bname, n_unit=uname)
# Path: vpt_main/src/models/build_vit_backbone.py
import numpy as np
import torch
import os
from .vit_backbones.swin_transformer_conv import SwinTransformer
from .vit_backbones.vit import VisionTransformer
from .vit_backbones.vit_moco import vit_base
from .vit_backbones.vit_mae import build_model as mae_vit_model
from .vit_prompt.vit import PromptedVisionTransformer
from .vit_prompt.swin_transformer import PromptedSwinTransformer
from .vit_prompt.vit_moco import vit_base as prompt_vit_base
from .vit_prompt.vit_mae import build_model as prompt_mae_vit_model
from .vit_adapter.vit_mae import build_model as adapter_mae_vit_model
from .vit_adapter.vit_moco import vit_base as adapter_vit_base
from .vit_adapter.vit import ADPT_VisionTransformer
embed_dim = 96
num_layers = 4
elif model_type == "swins_imagenet":
model = SwinTransformer(
img_size=crop_size,
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
drop_path_rate=0.3,
num_classes=-1,
)
embed_dim = 96
num_layers = 4
elif model_type == "swinb_imagenet_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
drop_path_rate=0.2, # try to from 0.5 -> 0, 0.1 is best on cifar.
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinb_imagenet22k_384":
model = SwinTransformer(
img_size=384,
embed_dim=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=12,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 128
num_layers = 4
elif model_type == "swinl_imagenet22k_224":
model = SwinTransformer(
img_size=crop_size,
embed_dim=192,
depths=[2, 2, 18, 2],
num_heads=[6, 12, 24, 48],
window_size=7,
drop_path_rate=0.5,
num_classes=-1,
)
embed_dim = 192
num_layers = 4
feat_dim = int(embed_dim * 2 ** (num_layers - 1))
# load checkpoint
model_w = os.path.join(model_root, MODEL_ZOO[model_type])
checkpoint = torch.load(model_w, map_location='cpu')
state_dict = checkpoint['model']
if crop_size == 448:
for k in list(state_dict.keys()):
if "attn_mask" not in k:
# remove prefix
state_dict[k] = state_dict[k]
# delete renamed or unused k
else:
del state_dict[k]
# rename some keys for ssl models
if model_type.endswith("ssl"):
# rename moco pre-trained keys
for k in list(state_dict.keys()):
# retain only encoder_q up to before the embedding layer
if k.startswith('encoder.'):
# remove prefix
state_dict[k[len("encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model, feat_dim
def build_vit_sup_models(
model_type, crop_size, prompt_cfg=None, model_root=None, adapter_cfg=None, load_pretrain=True, vis=False
):
# image size is the size of actual image
m2featdim = {
"sup_vitb16_224": 768,
"sup_vitb16": 768,
"sup_vitl16_224": 1024,
"sup_vitl16": 1024,
"sup_vitb8_imagenet21k": 768,
"sup_vitb16_imagenet21k": 768,
"sup_vitb32_imagenet21k": 768,
| "sup_vitl16_imagenet21k": 1024, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: crashdev226/freelancer-create-account-bot
# Path: FreelancerBot.py
class FreelancerBot:
# Constructor
def __init__(self):
pass
def create(self):
profile = None
with open("./profile.json", "r+") as file:
profile = json.load(file)["freelancer"]
print(profile["skills"])
pag.FAILSAFE = False
if pag.confirm("Are you ready? Please go to browser.") != "OK":
exit()
pag.hotkey("ctrl", "t")
pag.typewrite("freelancer.com\n")
time.sleep(3)
pag.click(1348, 107)
pag.hotkey("ctrl", "t")
pag.typewrite("yopmail.com\n")
time.sleep(3)
pag.click(523, 552)
time.sleep(2)
pag.click(831, 572)
time.sleep(1)
pag.click(1289, 710)
time.sleep(1)
pag.click(1073, 570)
time.sleep(0.5)
pag.hotkey("ctrl", "shift", "tab")
time.sleep(0.5)
pag.click(870, 396)
pag.hotkey("ctrl", "v")
pag.click(859, 477)
pag.typewrite("pwd1234!@#$")
time.sleep(0.5)
pag.click(807, 544)
time.sleep(0.5)
pag.click(807, 544)
time.sleep(0.5)
pag.click(959, 611)
# Page 2
time.sleep(1.5)
pag.click(912, 395)
time.sleep(1)
pag.click(939, 484)
time.sleep(1)
pag.click(963, 336)
if pag.confirm("Continue?") != "OK":
exit()
time.sleep(5)
# pag.click(557, 450)
# time.sleep(4)
# pag.click(557, 450)
# skill input for loop
for skill in profile["skills"]:
time.sleep(0.5)
pag.click(472, 295, 3)
pag.typewrite(skill)
time.sleep(1)
pag.click(853, 444)
if pag.confirm("Continue?") != "OK":
exit()
pag.click(1466, 968)
# Page 3
time.sleep(2)
pag.click(1093, 742)
# Page 4
time.sleep(2)
pag.click(784, 614)
pag.typewrite(profile["firstName"])
# time.sleep(0.5)
pag.click(775, 712)
pag.typewrite(profile["lastName"])
# time.sleep(2)
pag.click(1189, 848)
# Page 5
time.sleep(0.5)
pag.click(1187, 856)
# Page 5-2
time.sleep(0.5)
pag.click(725, 549)
pyperclip.copy(profile["heading"])
pag.hotkey("ctrl", "v")
# time.sleep(2)
pag.click(795, 677)
pyperclip.copy(profile["description"])
pag.hotkey("ctrl", "v")
time.sleep(0.5)
pag.click(1189, 848)
# Page 6
time.sleep(2.5)
pag.click(739, 667)
pag.typewrite(profile["birth"])
time.sleep(0.5)
pag.click(1185, 611)
time.sleep(0.5)
pag.click(1185, 811)
# Page 7
if pag.confirm("Continue?") != "OK":
exit()
time.sleep(0.8)
pag.click(1189, 924)
# Page 8
time.sleep(0.5)
pag.hotkey("ctrl", "tab")
time.sleep(4)
pag.click(525, 211)
time.sleep(3)
pag.click(653, 454)
# Emali Verify
time.sleep(4)
pag.hotkey("ctrl", "w")
time.sleep(0.5)
pag.hotkey("ctrl", "w")
time.sleep(0.5)
pag.click(1185, 666)
# Page 9
time.sleep(1)
pag.click(1432, 894)
# Page 10
time.sleep(1)
pag.click(1452, 1015)
# Page 11
time.sleep(5)
pag.click(816, 544)
time.sleep(5)
pag.click(1480, 358) # showcase
pag.alert("Done")
# while True:
# print(pag.position())
# Path: UpworkBot.py
class UpworkBot:
# Constructor
def __init__(self):
pass
def create(self):
profile = None
with open("./profile.json", "r+") as file:
profile = json.load(file)["upwork"]
print(profile["skills"])
pag.FAILSAFE = False
if pag.confirm("Are you ready? Please go to browser.") != "OK":
exit()
pag.hotkey("ctrl", "t")
pag.typewrite("upwork.com\n")
time.sleep(1)
pag.click(1770,53)
time.sleep(1)
pag.click(1750,260)
time.sleep(5)
pag.click(1285,739)
time.sleep(1)
pag.click(1675,110)
time.sleep(1)
pag.click(1100,380)
time.sleep(1)
pag.click(952,514)
pag.hotkey("ctrl", "t")
pag.typewrite("addy.io\n")
time.sleep(1)
pag.click(1438,122)
time.sleep(1)
pag.click(52,272)
time.sleep(1)
pag.click(1674,195)
time.sleep(1)
pag.click(891,469)
time.sleep(1)
pag.click(833,631)
time.sleep(1)
pag.click(773,724)
time.sleep(0.5)
pag.typewrite("louis")
time.sleep(0.5)
pag.click(830,760)
time.sleep(1)
pag.click(787,797)
time.sleep(1)
pag.click(648,401)
time.sleep(0.5)
pag.hotkey("ctrl", "w")
time.sleep(0.5)
pag.click(718,513)
pag.hotkey("ctrl", "v")
pag.click(708,453)
pag.typewrite("Louis")
pag.click(1085,450)
pag.typewrite("Winkler")
pag.click(830,569)
pag.typewrite("pwd1234!@#$")
time.sleep(0.5)
pag.click(661,690)
time.sleep(0.5)
pag.click(665,735)
time.sleep(0.5)
pag.click(969,808)
if pag.confirm("Verify email and click Next") != "OK":
exit()
pag.click(383,704)
time.sleep(1)
pag.click(1393,587)
time.sleep(1)
pag.click(1860,1000)
time.sleep(1)
pag.click(800,600)
time.sleep(1)
pag.click(1860,1000)
time.sleep(1)
pag.click(512,541)
time.sleep(1)
pag.click(343,745)
time.sleep(1)
pag.click(1860,1000)
time.sleep(1)
pag.click(572,545)
time.sleep(1)
pag.click(412,483)
pag.typewrite(profile["heading"])
pag.click(1860,1000)
time.sleep(1)
#Experience - 1
pag.click(570,600)
time.sleep(1)
pag.click(722,265)
pag.typewrite("web")
time.sleep(1)
pag.click(674,358)
time.sleep(0.5)
pag.click(660,362)
pag.typewrite(profile["experience"][0]["title"])
pag.click(690,457)
pag.typewrite(profile["experience"][0]["city"])
# if isinstance(profile["experience"][0]["country"], str):
pag.click(1075,458)
time.sleep(0.5)
pag.click(1080,520)
pag.typewrite(profile["experience"][0]["country"])#end if
time.sleep(1)
pag.click(1039,565)
time.sleep(0.5)
pag.click(626,497)
time.sleep(0.5)
pag.click(730,584)
time.sleep(0.5)
pag.click(680,637)
time.sleep(0.5)
pag.click(878,583)
time.sleep(0.5)
pag.click(840,690)
time.sleep(0.5)
pag.click(705,714)
pag.typewrite(profile["experience"][0]["description"])
time.sleep(0.5)
pag.click(1257,937)
#Experience - 2
time.sleep(0.5)
pag.click(337,605)
time.sleep(1)
pag.click(737,270)
time.sleep(0.5)
pag.click(715,357)
time.sleep(0.5)
pag.click(697,365)
pag.typewrite(profile["experience"][1]["title"])
pag.click(668,455)
pag.typewrite(profile["experience"][1]["city"])
time.sleep(0.5)
pag.click(684,600)
time.sleep(0.5)
pag.click(684,645)
time.sleep(0.5)
pag.click(865,603)
time.sleep(0.5)
pag.click(845,809)
time.sleep(0.5)
pag.click(1024,604)
time.sleep(0.5)
pag.click(1012,646)
time.sleep(0.5)
pag.click(1189,597)
time.sleep(0.5)
pag.click(1183,711)
time.sleep(0.5)
pag.click(822,719)
pag.typewrite(profile["experience"][1]["description"])
time.sleep(0.5)
pag.click(1264,935)
time.sleep(1)
pag.click(1778,1002)#Next Pgae
time.sleep(1)
pag.click(544,548)
time.sleep(1)
pag.click(772,293)
pyperclip.copy(profile["education"]["university"])
pag.hotkey("ctrl", "v")
time.sleep(1.5)
pag.click(711,352)
time.sleep(0.5)
pag.click(688,388)
time.sleep(0.5)
pag.typewrite(profile["education"]["degree"])
pag.click(721,423)
time.sleep(0.5)
pag.click(716,480)
time.sleep(0.5)
pag.typewrite(profile["education"]["field"])
time.sleep(1)
pag.click(752,529)
time.sleep(0.5)
pag.click(735,571)
time.sleep(0.5)
pag.typewrite(profile["education"]["start"])
time.sleep(0.5)
pag.click(685,685)
time.sleep(0.5)
pag.click(1092,576)
time.sleep(0.5)
pag.typewrite(profile["education"]["end"])
time.sleep(0.5)
pag.click(1054,691)
time.sleep(0.5)
pag.click(826,716)
pag.typewrite(profile["education"]["description"])
time.sleep(0.5)
pag.click(1263,905)
time.sleep(0.5)
pag.click(1820,999)
time.sleep(0.5)
pag.click(1039,541)
time.sleep(0.5)
pag.click(1054,712)
time.sleep(0.5)
pag.click(1818,1004)#Next Page
time.sleep(1)
pag.click(478,509)
time.sleep(0.5)
for i in range(0,profile["skills"].len()):
pag.typewrite(profile["skills"][i])
time.sleep(0.5)
pag.press('down')
time.sleep(0.5)
pag.typewrite('\n')
time.sleep(0.5)
pag.click(1800,1003)#Next Page
time.sleep(0.5)
pag.click(534,543)
pyperclip.copy(profile["description"])
pag.hotkey("ctrl", "v")
time.sleep(0.5)
pag.click(1793,1001)
time.sleep(0.5)
pag.click(394,537)
time.sleep(0.5)
pag.click(1800,1000)#Next Page
time.sleep(0.5)
pag.click(1509,445)
time.sleep(0.5)
pag.click(1800,1000)#Next Page
time.sleep(0.5)
pag.click(682,599)
pag.typewrite(profile["address"])
time.sleep(0.5)
pag.click(633,687)
time.sleep(0.5)
pag.typewrite(profile["city"])
time.sleep(1)
pag.click(637,748)
time.sleep(0.5)
pag.click(1310,686)
pag.typewrite(profile["zip"])
pag.click(781,775)
pag.typewrite(profile["phone"])
time.sleep(0.5)
pag.click(424,507)
time.sleep(0.5)
pag.click(773,477)
time.sleep(3.5)
pag.click(897,170,2)#here
time.sleep(1.5)
pag.click(1226,830)
time.sleep(1.5)
pag.click(1800,1000)#Next Page
time.sleep(1)
pag.click(471,408)
time.sleep(1)
pag.click(900,550)
time.sleep(5)
for j in range(profile["portfolio"].len()):
pag.click(1582,315)#blank void
time.sleep(0.5)
for i in range(25):
pag.hotkey("tab")
time.sleep(0.2)
pag.typewrite("\n")
time.sleep(1)
pag.click(867,318)
pag.typewrite(profile["portfolio"][j]["title"])
pag.click(791,516)
pag.typewrite(profile["portfolio"][j]["date"])
time.sleep(0.5)
pag.click(1296,641)
time.sleep(2.5)
pag.click(837,445)
time.sleep(0.5)
pag.click(1339,776)
time.sleep(2)
pag.click(827,982)
pag.typewrite(profile["portfolio"][j]["heading"])
pag.click(825,871)
pag.typewrite(profile["portfolio"][j]["url"])
pag.click(1290,697)
time.sleep(0.5)
pag.click(967,731)
time.sleep(0.5)
pag.click(865,766)
time.sleep(0.5)
pag.click(865,766) #repeat?
time.sleep(0.5)
pag.click(872,799)
time.sleep(0.5)
pag.click(907,331) #browse click
time.sleep(2.5)
pag.click(362,472)
pag.typewrite(profile["portfolio"][j]["file"])
time.sleep(0.5)
pag.click(791,508)
if pag.confirm("Continue?") != "OK":#7~20s
exit()
pag.hotkey("end")
time.sleep(0.5)
pag.click(1333,551)
time.sleep(2)
pag.hotkey("end")
time.sleep(0.5)
pag.click(1360,548)
time.sleep(6)
for i in range(profile["certificate"].len()):
pag.hotkey("end")
time.sleep(0.5)
pag.hotkey("pageup")
time.sleep(0.5)
pag.click(839,486)
time.sleep(2)
pag.hotkey("f5")
time.sleep(6)
pag.click(787,489)
time.sleep(1)
pag.typewrite(profile["certificate"][i])
pag.click(788,582)
time.sleep(0.5)
pag.click(708,629)
pag.typewrite(profile["certificate"][i])
time.sleep(0.5)
pag.click(1240,728)
time.sleep(7)
pag.hotkey("end")
time.sleep(0.5)
pag.click(979,554)
time.sleep(2)
pag.click(897,409)
pag.typewrite(profile["other_exp"]["title"])
pag.click(776,524)
pag.typewrite(profile["other_exp"]["description"])
time.sleep(0.5)
pag.click(1249,823)
time.sleep(3)
pag.alert("Done")
# while True:
# print(pag.position())
# Path: bot_create.py
import argparse
from FreelancerBot import FreelancerBot
from UpworkBot import UpworkBot
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", help="Specify whether freelancer or upwork.")
args = parser.parse_args()
| account_type=args.type |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ChatClue/ChatClue
# Path: utils/os/helpers.py
class OSHelper:
"""
Provides utility methods for operating system level operations, particularly file management.
This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.
"""
@staticmethod
def find_closest_image(directory, target_time):
"""
Finds the closest image file in a directory based on the target time.
This function searches through all JPG files in the specified directory and
selects the one whose creation time is closest to, but not earlier than,
the target time.
Args:
directory (str): The directory path where the image files are stored.
target_time (float): The target time (in seconds since epoch) to compare the file creation times against.
Returns:
str: The path of the closest image file. Returns None if no suitable file is found.
"""
closest_file = None
closest_time_diff = None
# Iterate over each file in the specified directory
for filename in os.listdir(directory):
if filename.lower().endswith(".jpg"): # Check if the file is a JPG image
filepath = os.path.join(directory, filename)
filetime = os.path.getmtime(filepath) # Get the modification time of the file
# Check if the file's time is later than the target time and if it's the closest so far
if filetime > target_time:
logging.info(f"File is close: {filepath} - Time: {filetime}")
time_diff = filetime - target_time
if closest_time_diff is None or time_diff < closest_time_diff:
closest_file = filepath
closest_time_diff = time_diff
return closest_file
@staticmethod
def convert_image_to_base64(filepath):
"""
Converts an image file to a Base64 encoded string.
This function reads the image file from the given filepath, encodes it in Base64,
and then decodes it to a UTF-8 string, which can be easily used for data transfer
or embedding in web pages.
Args:
filepath (str): The path of the image file to be converted.
Returns:
str: The Base64 encoded string of the image.
"""
with open(filepath, "rb") as image_file:
# Read the file and encode it in Base64
return base64.b64encode(image_file.read()).decode("utf-8")
@staticmethod
def clear_orphaned_audio_files():
"""
Removes all audio files in a specific directory.
This method is used to clear out any leftover audio files in the 'tmp/audio' directory.
It iterates through all files in the specified directory and deletes them.
"""
# Specify the directory path for audio files
directory_path = 'tmp/audio'
# Iterate through and remove each file in the directory
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
try:
os.remove(file_path)
logging.info(f"Removed file: {file_path}")
except OSError as e:
logging.info(f"Error removing file {file_path}: {e}")
@staticmethod
def clear_orphaned_video_files():
"""
Removes all video files in a specific directory.
This method is used to clear out any leftover video files in the 'tmp/video' directory.
It iterates through all files in the specified directory and deletes them.
"""
# Specify the directory path for video files
directory_path = 'tmp/video'
# Iterate through and remove each file in the directory
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
try:
os.remove(file_path)
logging.info(f"Removed file: {file_path}")
except OSError as e:
logging.info(f"Error removing file {file_path}: {e}")
@staticmethod
def system_file_cleanup():
"""
Performs a general cleanup of system files.
Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.
"""
# Clear orphaned audio files
OSHelper.clear_orphaned_audio_files()
OSHelper.clear_orphaned_video_files()
@staticmethod
def configure_tmp_directories():
"""
Ensures that the required directories (tmp/audio and tmp/video) exist.
Creates them if they do not exist.
"""
directories = ['tmp/audio', 'tmp/video']
for directory in directories:
os.makedirs(directory, exist_ok=True)
logging.info(f"Checked and ensured directory exists: {directory}")
# Path: celery_config.py
def get_celery_app():
return celery_app
# Path: database/setup.py
class DatabaseSetup:
"""
This class is responsible for database setup tasks, particularly
for ensuring that all defined tables in SQLAlchemy models are created in the database.
"""
@staticmethod
def initial_setup():
"""
Creates tables in the database based on the SQLAlchemy models.
This method uses the SQLAlchemy engine to connect to the database and creates
any tables that haven't been created yet as defined in the SQLAlchemy model classes.
It's intended to be run during the initial setup phase of the application.
"""
# Obtain the SQLAlchemy engine
engine = get_engine()
# Ensure vector extension is enabled.
with engine.begin() as connection:
# Create extension 'pgvector' if it is not created yet
# Remember, you may need to install pgvector on your system before this will work properly.
# https://github.com/pgvector/pgvector.git for instructions.
connection.execute(text("CREATE EXTENSION IF NOT EXISTS vector"))
# Create all tables in the database defined in the SQLAlchemy models
# This will have no effect on existing tables that match the model definitions
Base.metadata.create_all(engine)
# Path: broadcast/broadcaster.py
class Broadcaster:
def __init__(self):
def send_message(self, message):
def start(self):
def shutdown(self):
# Path: audio/audio_processor.py
class AudioProcessor:
"""
A class to handle audio processing, including capturing audio input,
processing it with Vosk for speech recognition, and responding using OpenAI's GPT model.
Attributes:
model (Vosk.Model): Vosk speech recognition model.
samplerate (int): The sample rate for audio capture.
device (str): The name of the audio input device.
blocksize (int): The block size for audio processing.
dump_filename (str): Filename to dump the audio input, if provided.
"""
def __init__(self):
self.model = Model(lang=AUDIO_SETTINGS.get('VOSK_MODEL', "en-us"))
self.samplerate = AUDIO_SETTINGS.get('SOUND_DEVICE_SAMPLERATE')
self.device = AUDIO_SETTINGS.get('SOUND_DEVICE_DEVICE')
self.blocksize = AUDIO_SETTINGS.get('SOUND_DEVICE_BLOCK_SIZE', 28000)
self.dump_filename = AUDIO_SETTINGS.get('AUDIO_IN_DUMP_FILENAME')
self.audio_queue = queue.Queue()
self.openai_client = OpenAIClient()
self.openai_conversation_builder = OpenAIConversationBuilder()
self.tool_processor = ToolProcessor()
self.broadcaster = broadcaster
self.audio_out = get_audio_out()
self.audio_out_response_buffer = ''
self.full_assistant_response = ''
self.last_wake_time = 0
self.last_response_end_time = 0
self.processing_openai_request = False
self.shutdown_event = threading.Event()
def open_dump_file(self):
"""Opens the file to dump audio input if a filename is provided."""
if self.dump_filename is not None:
self.dump_filename = open(self.dump_filename, "wb")
def close_dump_file(self):
"""Closes the audio dump file if it was opened."""
if self.dump_filename is not None:
self.dump_filename.close()
def should_process(self, result, current_time):
"""
Determines whether the robot should process the input based on wake phrases or elapsed time.
Args:
result (str): The recognized text from the audio input.
current_time (float): The current time in seconds.
Returns:
bool: True if the input should be processed, False otherwise.
"""
return (not contains_quiet_please_phrase(result) and contains_wake_phrase(result)) or \
(not contains_quiet_please_phrase(result) and (current_time - self.last_wake_time <= 10) or (current_time - self.last_response_end_time <= 10) and not self.audio_out.is_playing) \
def update_wake_time(self):
"""Updates the time when a wake phrase was last heard."""
self.last_wake_time = time.time()
self.save_system_state()
def update_response_end_time(self):
"""Updates the time when the robot's last response ended."""
self.last_response_end_time = time.time()
def callback(self, indata, frames, time, status):
"""
Callback function for audio input stream.
Args:
indata: The buffer containing the incoming sound.
frames: The number of frames.
time: Current stream time.
status: Status of the stream.
"""
if status:
logging.warning(status)
self.audio_queue.put(bytes(indata))
def process_stream(self):
"""
Processes the audio stream by recognizing speech and generating responses.
Continuously captures audio, performs speech recognition, and generates responses using OpenAI.
"""
self.open_dump_file()
try:
with sd.RawInputStream(samplerate=self.samplerate, blocksize=self.blocksize, device=self.device,
dtype="int16", channels=1, callback=self.callback):
rec = KaldiRecognizer(self.model, self.samplerate)
openai_stream_thread = None
while not self.shutdown_event.is_set():
data, current_time = self.get_audio_data()
result = self.process_recognition(data, rec)
if result:
openai_stream_thread = self.handle_speech(result, openai_stream_thread, current_time)
self.handle_partial_results(rec)
self.write_to_dump_file(data)
self.process_openai_response()
# except Exception as e:
# logging.error(f"An error occurred: {e}")
finally:
self.close_dump_file()
def get_audio_data(self):
"""
Retrieves audio data from the queue.
Returns:
tuple: A tuple containing the audio data and the current time.
"""
data = self.audio_queue.get()
current_time = time.time()
return data, current_time
def process_recognition(self, data, rec):
"""
Processes the recognition of speech from audio data.
Args:
data: The audio data to be processed.
rec (KaldiRecognizer): The Vosk recognizer instance.
Returns:
str or None: Recognized text or None if no significant speech is recognized.
"""
if rec.AcceptWaveform(data):
result = json.loads(rec.Result())["text"]
if result not in ['', 'huh']:
self.broadcaster.send_message(result)
logging.info("ROBOT HEARD: " + result)
return result
return None
def handle_speech(self, result, openai_stream_thread, current_time):
"""
Processes the recognized speech and determines the appropriate response.
Args:
result (str): Recognized speech text.
openai_stream_thread (threading.Thread): The current OpenAI stream thread.
current_time (float): Current time in seconds.
Returns:
threading.Thread: Updated or new OpenAI stream thread.
"""
try:
if self.should_process(result, current_time) and not self.processing_openai_request:
self.update_wake_time()
self.processing_openai_request = True
if not openai_stream_thread or not openai_stream_thread.is_alive():
self.openai_client.stop_signal.clear()
is_tool_request, conversation = self.determine_tool_request(result)
if is_tool_request:
self.handle_tool_request(result, conversation)
else:
self.continue_conversation(result, conversation)
else:
logging.info("ROBOT THOUGHT: Ignoring Conversation, it doesn't appear to be relevant.")
finally:
self.processing_openai_request = False
return openai_stream_thread
def determine_tool_request(self, result):
"""
Determines whether the given input text is a tool request.
Args:
result (str): The recognized text to evaluate.
Returns:
Tuple[bool, list]: A tuple containing a boolean indicating whether it's a tool request,
and the conversation array for further processing.
"""
call_type_messages = self.openai_conversation_builder.create_check_if_tool_call_messages(result)
openai_is_tool_response = self.openai_client.create_completion(call_type_messages, False, {"type": "json_object"}, openai_functions, True)
is_tool_request = False
conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)
try:
if openai_is_tool_response and openai_is_tool_response.choices:
is_tool_request = json.loads(openai_is_tool_response.choices[0].message.content).get("is_tool", False)
except (TypeError, AttributeError, json.JSONDecodeError):
print("Error parsing OpenAI response or response not in expected format.")
return is_tool_request, conversation
def handle_tool_request(self, result, conversation):
"""
Handles the processing of a tool request.
Args:
result (str): The recognized text.
conversation (list): The conversation array built up to this point.
"""
tool_response = self.openai_client.create_completion(conversation, False, None, openai_functions)
tool_response_message = tool_response.choices[0].message
tool_calls = tool_response_message.tool_calls
if tool_calls:
self.process_tool_calls(tool_calls, result, conversation, tool_response_message)
else:
self.continue_conversation(result, conversation)
def process_tool_calls(self, tool_calls, result, conversation, tool_response_message):
"""
Processes the tool calls received from OpenAI.
Args:
tool_calls (list): List of tool calls from OpenAI response.
result (str): The recognized text.
conversation (list): The conversation array.
tool_response_message (Message): The tool response message from OpenAI.
"""
tool_call = tool_calls[0]
tool_processor_response = self.tool_processor.process_tool_request(tool_call)
if tool_processor_response["success"]:
self.handle_successful_tool_response(tool_processor_response, result, conversation, tool_response_message)
else:
self.audio_out.add_to_queue(get_tool_not_found_phrase())
def handle_successful_tool_response(self, tool_processor_response, result, conversation, tool_response_message):
"""
Handles a successful tool response.
Args:
tool_processor_response (dict): The response from the tool processor.
result (str): The recognized text.
conversation (list): The conversation array.
tool_response_message (Message): The tool response message from OpenAI.
"""
if tool_processor_response["is_conversational"]:
conversation.append(tool_response_message)
tool_call_response_message = self.openai_conversation_builder.create_tool_call_response_message(tool_processor_response)
conversation.append(tool_call_response_message)
openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))
openai_stream_thread.start()
else:
self.store_conversation(speaker_type=CONVERSATIONS_CONFIG["user"], response=result)
def continue_conversation(self, result, conversation):
"""
Continues the conversation with OpenAI based on the given result.
Args:
result (str): The recognized text to continue the conversation with.
conversation (list): The existing conversation array.
"""
self.openai_client.stop_processing_request()
conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)
openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))
openai_stream_thread.start()
logging.info("ROBOT ACTION: Committing user input to memory.")
self.store_conversation(speaker_type=CONVERSATIONS_CONFIG["user"], response=result)
def handle_partial_results(self, rec):
"""
Handles partial results from speech recognition.
Args:
rec (KaldiRecognizer): The Vosk recognizer instance.
"""
partial_result_json = json.loads(rec.PartialResult())
if 'partial' in partial_result_json and contains_quiet_please_phrase(partial_result_json['partial']):
self.stop_conversation_and_audio()
def stop_conversation_and_audio(self):
"""
Stops the conversation and any ongoing audio processing.
"""
logging.info("ROBOT THOUGHT: Request to stop talking recognized. Stopping stream.")
self.stop_all_audio()
if self.full_assistant_response:
logging.info("ROBOT ACTION: Committing my partial response to memory")
self.store_full_assistant_response()
def stop_all_audio(self):
self.audio_out_response_buffer = ''
self.openai_client.stop_processing_request()
self.audio_out.stop_all_audio()
def write_to_dump_file(self, data):
"""
Writes audio data to the dump file if it's open.
Args:
data: The audio data to be written to the file.
"""
if self.dump_filename is not None:
self.dump_filename.write(data)
def process_openai_response(self):
"""
Processes responses from OpenAI's GPT model.
Retrieves and handles the responses generated by OpenAI.
"""
while not self.openai_client.response_queue.empty():
chunk = self.openai_client.response_queue.get()
if chunk.choices[0].delta.content is not None:
response_text = chunk.choices[0].delta.content
print(response_text, end='', flush=True)
self.update_response_end_time()
self.audio_out_response_buffer += response_text
if self.audio_out_response_buffer.endswith(('.', '?', '!', ';')):
self.audio_out.add_to_queue(self.audio_out_response_buffer)
self.audio_out_response_buffer = ""
self.full_assistant_response += response_text
if self.full_assistant_response and self.openai_client.streaming_complete:
logging.info("ROBOT ACTION: Committing my full response to memory")
self.store_full_assistant_response()
def store_full_assistant_response(self):
"""
Stores the full assistant response in the database.
"""
self.store_conversation(speaker_type=CONVERSATIONS_CONFIG["assistant"], response=self.full_assistant_response)
self.full_assistant_response = ''
def store_conversation(self, speaker_type, response):
"""
Stores the conversation part in the database asynchronously using a Celery task.
Args:
speakerType (str): "user" or "assistant", indicating who is speaking.
response (str): The text of the response.
"""
get_celery_app().send_task('background.memory.tasks.store_conversation_task', args=[speaker_type, response])
logging.info("Store conversation task submitted to background")
def save_system_state(self):
"""
Saves the system state in the database asynchronously using a Celery task.
"""
get_celery_app().send_task('background.memory.tasks.update_system_state_task', args=[self.last_wake_time])
logging.info("Update system state task submitted to background")
def shutdown(self):
self.shutdown_event.set()
# Path: video/video_processor.py
class VideoProcessor:
"""
A class to handle video processing, including capturing video input and
processing it with MediaPipe for pose estimation.
"""
def __init__(self):
# MediaPipe Pose solution initialization
self.mp_pose = mp.solutions.pose
self.pose = self.mp_pose.Pose()
self.cap = None
# Video capture settings
self.frame_rate = VIDEO_SETTINGS.get('FRAME_RATE', 30)
self.device = VIDEO_SETTINGS.get('VIDEO_DEVICE', 0)
self.capture_interval = VIDEO_SETTINGS.get('CAPTURE_INTERVAL', 1)
self.frame_counter = 0
self.last_capture_time = time.time()
self.frame_queue = queue.Queue()
# Check and create tmp directory for storing frames
self.tmp_folder = 'tmp/video'
if not os.path.exists(self.tmp_folder):
os.makedirs(self.tmp_folder)
self.shutdown_event = threading.Event()
def process_stream(self):
"""
Captures and processes the video stream.
"""
if VIDEO_SETTINGS.get('CAPTURE_VIDEO', False):
self.cap = cv2.VideoCapture(self.device)
while not self.shutdown_event.is_set():
ret, frame = self.cap.read()
if not ret:
continue
# Process the frame
#self.process_frame(frame)
# Capture frames at a set interval for saving
if time.time() - self.last_capture_time > self.capture_interval:
frame_name = os.path.join(self.tmp_folder, f"frame_{self.frame_counter}.jpg")
cv2.imwrite(frame_name, frame)
logging.debug(f"Frame saved as {frame_name}")
self.frame_counter += 1
self.last_capture_time = time.time()
self.clean_up()
def clean_up(self):
"""
Releases resources and closes windows.
"""
if self.cap:
self.cap.release()
cv2.destroyAllWindows()
OSHelper.clear_orphaned_video_files()
def process_frame(self, frame):
"""
Processes a single video frame.
"""
self.frame_queue.put(frame)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = self.pose.process(frame_rgb)
if results.pose_landmarks:
# Draw pose landmarks
mp.solutions.drawing_utils.draw_landmarks(frame, results.pose_landmarks, self.mp_pose.POSE_CONNECTIONS)
# Additional processing can be added here
def shutdown(self):
"""
Signals the thread to terminate.
"""
self.shutdown_event.set()
# Path: audio/audio_out.py
def get_audio_out():
"""
Returns the instance of AudioOutput for use.
Returns:
AudioOutput: The instance of the AudioOutput class.
"""
return audio_out
# Path: utils/os/helpers.py
class OSHelper:
"""
Provides utility methods for operating system level operations, particularly file management.
This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.
"""
@staticmethod
def find_closest_image(directory, target_time):
"""
Finds the closest image file in a directory based on the target time.
This function searches through all JPG files in the specified directory and
selects the one whose creation time is closest to, but not earlier than,
the target time.
Args:
directory (str): The directory path where the image files are stored.
target_time (float): The target time (in seconds since epoch) to compare the file creation times against.
Returns:
str: The path of the closest image file. Returns None if no suitable file is found.
"""
closest_file = None
closest_time_diff = None
# Iterate over each file in the specified directory
for filename in os.listdir(directory):
if filename.lower().endswith(".jpg"): # Check if the file is a JPG image
filepath = os.path.join(directory, filename)
filetime = os.path.getmtime(filepath) # Get the modification time of the file
# Check if the file's time is later than the target time and if it's the closest so far
if filetime > target_time:
logging.info(f"File is close: {filepath} - Time: {filetime}")
time_diff = filetime - target_time
if closest_time_diff is None or time_diff < closest_time_diff:
closest_file = filepath
closest_time_diff = time_diff
return closest_file
@staticmethod
def convert_image_to_base64(filepath):
"""
Converts an image file to a Base64 encoded string.
This function reads the image file from the given filepath, encodes it in Base64,
and then decodes it to a UTF-8 string, which can be easily used for data transfer
or embedding in web pages.
Args:
filepath (str): The path of the image file to be converted.
Returns:
str: The Base64 encoded string of the image.
"""
with open(filepath, "rb") as image_file:
# Read the file and encode it in Base64
return base64.b64encode(image_file.read()).decode("utf-8")
@staticmethod
def clear_orphaned_audio_files():
"""
Removes all audio files in a specific directory.
This method is used to clear out any leftover audio files in the 'tmp/audio' directory.
It iterates through all files in the specified directory and deletes them.
"""
# Specify the directory path for audio files
directory_path = 'tmp/audio'
# Iterate through and remove each file in the directory
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
try:
os.remove(file_path)
logging.info(f"Removed file: {file_path}")
except OSError as e:
logging.info(f"Error removing file {file_path}: {e}")
@staticmethod
def clear_orphaned_video_files():
"""
Removes all video files in a specific directory.
This method is used to clear out any leftover video files in the 'tmp/video' directory.
It iterates through all files in the specified directory and deletes them.
"""
# Specify the directory path for video files
directory_path = 'tmp/video'
# Iterate through and remove each file in the directory
for filename in os.listdir(directory_path):
file_path = os.path.join(directory_path, filename)
try:
os.remove(file_path)
logging.info(f"Removed file: {file_path}")
except OSError as e:
logging.info(f"Error removing file {file_path}: {e}")
@staticmethod
def system_file_cleanup():
"""
Performs a general cleanup of system files.
Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.
"""
# Clear orphaned audio files
OSHelper.clear_orphaned_audio_files()
OSHelper.clear_orphaned_video_files()
@staticmethod
def configure_tmp_directories():
"""
Ensures that the required directories (tmp/audio and tmp/video) exist.
Creates them if they do not exist.
"""
directories = ['tmp/audio', 'tmp/video']
for directory in directories:
os.makedirs(directory, exist_ok=True)
logging.info(f"Checked and ensured directory exists: {directory}")
# Path: utils/text/welcome.py
def welcome_message():
print("""
ChatClue: Osiris
/\_/\
( o.o )
> ^ <
Optimized System for Integrated Real-Time Interaction and Sensing
""")
# Path: utils/logging/colors.py
class ColorFormatter(logging.Formatter):
def format(self, record):
levelname = record.levelname
message = logging.Formatter.format(self, record)
return COLORS.get(levelname, '') + message + COLORS['ENDC']
# Path: osiris.py
from config import CELERY_CONFIG, LOG_LEVEL, VIDEO_SETTINGS
from utils.os.helpers import OSHelper
from celery import Celery
from celery_config import get_celery_app
from database.setup import DatabaseSetup
from broadcast.broadcaster import broadcaster
from audio.audio_processor import AudioProcessor
from video.video_processor import VideoProcessor
from audio.audio_out import get_audio_out
from utils.os.helpers import OSHelper
from utils.text.welcome import welcome_message
from utils.logging.colors import ColorFormatter
from background.memory.tasks import *
from tools import * # Import all openai tool functions
import logging
import subprocess
import atexit
import sys
import threading
import time
import cv2
import queue
# Configure basic logging for the application
logging.basicConfig(level=LOG_LEVEL)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s'))
# Ensure the necessary tmp/ directories exist
OSHelper.configure_tmp_directories()
# Configure background processor / subconcious systems
celery_app = get_celery_app()
# Configure audio output
audio_out = get_audio_out()
| def start_celery_worker(): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: GXNU-ZhongLab/ODTrack
# Path: lib/models/odtrack/odtrack.py
def build_odtrack(cfg, training=True):
current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root
pretrained_path = os.path.join(current_dir, '../../../pretrained_networks')
if cfg.MODEL.PRETRAIN_FILE and ('OSTrack' not in cfg.MODEL.PRETRAIN_FILE) and training:
pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE)
else:
pretrained = ''
if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224':
backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,
add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,
attn_type=cfg.MODEL.BACKBONE.ATTN_TYPE,)
elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224':
backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,
add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,
attn_type=cfg.MODEL.BACKBONE.ATTN_TYPE,
)
elif cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224_ce':
backbone = vit_base_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,
ce_loc=cfg.MODEL.BACKBONE.CE_LOC,
ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO,
add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,
)
elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224_ce':
backbone = vit_large_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,
ce_loc=cfg.MODEL.BACKBONE.CE_LOC,
ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO,
add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,
)
else:
raise NotImplementedError
hidden_dim = backbone.embed_dim
patch_start_index = 1
backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index)
box_head = build_box_head(cfg, hidden_dim)
model = ODTrack(
backbone,
box_head,
aux_loss=False,
head_type=cfg.MODEL.HEAD.TYPE,
token_len=cfg.MODEL.BACKBONE.TOKEN_LEN,
)
return model
# Path: lib/test/tracker/basetracker.py
class BaseTracker:
"""Base class for all trackers."""
def __init__(self, params):
self.params = params
self.visdom = None
def predicts_segmentation_mask(self):
return False
def initialize(self, image, info: dict) -> dict:
"""Overload this function in your tracker. This should initialize the model."""
raise NotImplementedError
def track(self, image, info: dict = None) -> dict:
"""Overload this function in your tracker. This should track in the frame and update the model."""
raise NotImplementedError
def visdom_draw_tracking(self, image, box, segmentation=None):
if isinstance(box, OrderedDict):
box = [v for k, v in box.items()]
else:
box = (box,)
if segmentation is None:
self.visdom.register((image, *box), 'Tracking', 1, 'Tracking')
else:
self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking')
def transform_bbox_to_crop(self, box_in, resize_factor, device, box_extract=None, crop_type='template'):
# box_in: list [x1, y1, w, h], not normalized
# box_extract: same as box_in
# out bbox: Torch.tensor [1, 1, 4], x1y1wh, normalized
if crop_type == 'template':
crop_sz = torch.Tensor([self.params.template_size, self.params.template_size])
elif crop_type == 'search':
crop_sz = torch.Tensor([self.params.search_size, self.params.search_size])
else:
raise NotImplementedError
box_in = torch.tensor(box_in)
if box_extract is None:
box_extract = box_in
else:
box_extract = torch.tensor(box_extract)
template_bbox = transform_image_to_crop(box_in, box_extract, resize_factor, crop_sz, normalize=True)
template_bbox = template_bbox.view(1, 1, 4).to(device)
return template_bbox
def _init_visdom(self, visdom_info, debug):
visdom_info = {} if visdom_info is None else visdom_info
self.pause_mode = False
self.step = False
self.next_seq = False
if debug > 0 and visdom_info.get('use_visdom', True):
try:
self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'},
visdom_info=visdom_info)
# # Show help
# help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \
# 'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \
# 'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \
# 'block list.'
# self.visdom.register(help_text, 'text', 1, 'Help')
except:
time.sleep(0.5)
print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\n'
'!!! Start Visdom in a separate terminal window by typing \'visdom\' !!!')
def _visdom_ui_handler(self, data):
if data['event_type'] == 'KeyPress':
if data['key'] == ' ':
self.pause_mode = not self.pause_mode
elif data['key'] == 'ArrowRight' and self.pause_mode:
self.step = True
elif data['key'] == 'n':
self.next_seq = True
# Path: lib/test/tracker/vis_utils.py
def gen_visualization(image, mask_indices, patch_size=16):
# image [224, 224, 3]
# mask_indices, list of masked token indices
# mask mask_indices need to cat
# mask_indices = mask_indices[::-1]
num_stages = len(mask_indices)
for i in range(1, num_stages):
mask_indices[i] = np.concatenate([mask_indices[i-1], mask_indices[i]], axis=1)
# keep_indices = get_keep_indices(decisions)
image = np.asarray(image)
H, W, C = image.shape
Hp, Wp = H // patch_size, W // patch_size
image_tokens = image.reshape(Hp, patch_size, Wp, patch_size, 3).swapaxes(1, 2).reshape(Hp * Wp, patch_size, patch_size, 3)
stages = [
recover_image(gen_masked_tokens(image_tokens, mask_indices[i]), H, W, Hp, Wp, patch_size)
for i in range(num_stages)
]
imgs = [image] + stages
imgs = [pad_img(img) for img in imgs]
viz = np.concatenate(imgs, axis=1)
return viz
# Path: lib/test/utils/hann.py
def hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor:
"""2D cosine window."""
return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1)
# Path: lib/train/data/processing_utils.py
def sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None):
""" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area
args:
im - cv image
target_bb - target box [x, y, w, h]
search_area_factor - Ratio of crop size to target size
output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.
returns:
cv image - extracted crop
float - the factor by which the crop has been resized to make the crop size equal output_size
"""
if not isinstance(target_bb, list):
x, y, w, h = target_bb.tolist()
else:
x, y, w, h = target_bb
# Crop image
crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)
if crop_sz < 1:
raise Exception('Too small bounding box.')
# x1, y1, x2, y2 of crop image
x1 = round(x + 0.5 * w - crop_sz * 0.5)
x2 = x1 + crop_sz
y1 = round(y + 0.5 * h - crop_sz * 0.5)
y2 = y1 + crop_sz
x1_pad = max(0, -x1)
x2_pad = max(x2 - im.shape[1] + 1, 0)
y1_pad = max(0, -y1)
y2_pad = max(y2 - im.shape[0] + 1, 0)
# Crop target
im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]
if mask is not None:
mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]
# Pad
im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)
# deal with attention mask
H, W, _ = im_crop_padded.shape
att_mask = np.ones((H,W))
end_x, end_y = -x2_pad, -y2_pad
if y2_pad == 0:
end_y = None
if x2_pad == 0:
end_x = None
att_mask[y1_pad:end_y, x1_pad:end_x] = 0 # mask is 0 for non-padding areas (image content)
if mask is not None:
mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)
if output_sz is not None:
resize_factor = output_sz / crop_sz
im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))
att_mask = cv.resize(att_mask, (output_sz, output_sz)).astype(np.bool_)
if mask is None:
return im_crop_padded, resize_factor, att_mask
mask_crop_padded = \
F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0]
return im_crop_padded, resize_factor, att_mask, mask_crop_padded
else:
if mask is None:
return im_crop_padded, att_mask.astype(np.bool_), 1.0
return im_crop_padded, 1.0, att_mask.astype(np.bool_), mask_crop_padded
# Path: lib/test/tracker/data_utils.py
class Preprocessor(object):
def __init__(self):
self.mean = torch.tensor([0.485, 0.456, 0.406]).view((1, 3, 1, 1)).cuda()
self.std = torch.tensor([0.229, 0.224, 0.225]).view((1, 3, 1, 1)).cuda()
def process(self, img_arr: np.ndarray, amask_arr: np.ndarray):
# Deal with the image patch
img_tensor = torch.tensor(img_arr).cuda().float().permute((2,0,1)).unsqueeze(dim=0)
img_tensor_norm = ((img_tensor / 255.0) - self.mean) / self.std # (1,3,H,W)
# Deal with the attention mask
amask_tensor = torch.from_numpy(amask_arr).to(torch.bool).cuda().unsqueeze(dim=0) # (1,H,W)
return NestedTensor(img_tensor_norm, amask_tensor)
# Path: lib/utils/box_ops.py
def clip_box(box: list, H, W, margin=0):
x1, y1, w, h = box
x2, y2 = x1 + w, y1 + h
x1 = min(max(0, x1), W-margin)
x2 = min(max(margin, x2), W)
y1 = min(max(0, y1), H-margin)
y2 = min(max(margin, y2), H)
w = max(margin, x2-x1)
h = max(margin, y2-y1)
return [x1, y1, w, h]
# Path: lib/utils/ce_utils.py
def generate_mask_cond(cfg, bs, device, gt_bbox):
template_size = cfg.DATA.TEMPLATE.SIZE
stride = cfg.MODEL.BACKBONE.STRIDE
template_feat_size = template_size // stride
if cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'ALL':
box_mask_z = None
elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_POINT':
if template_feat_size == 8:
index = slice(3, 4)
elif template_feat_size == 12:
index = slice(5, 6)
elif template_feat_size == 16:
index = slice(7, 8)
elif template_feat_size == 24:
index = slice(11, 12)
elif template_feat_size == 7:
index = slice(3, 4)
elif template_feat_size == 14:
index = slice(6, 7)
else:
raise NotImplementedError
box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)
box_mask_z[:, index, index] = 1
box_mask_z = box_mask_z.flatten(1).to(torch.bool)
elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_REC':
# use fixed 4x4 region, 3:5 for 8x8
# use fixed 4x4 region 5:6 for 12x12
if template_feat_size == 8:
index = slice(3, 5)
elif template_feat_size == 12:
index = slice(5, 7)
elif template_feat_size == 7:
index = slice(3, 4)
else:
raise NotImplementedError
box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)
box_mask_z[:, index, index] = 1
box_mask_z = box_mask_z.flatten(1).to(torch.bool)
elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'GT_BOX':
box_mask_z = torch.zeros([bs, template_size, template_size], device=device)
# box_mask_z_ori = data['template_seg'][0].view(-1, 1, *data['template_seg'].shape[2:]) # (batch, 1, 128, 128)
box_mask_z = generate_bbox_mask(box_mask_z, gt_bbox * template_size).unsqueeze(1).to(
torch.float) # (batch, 1, 128, 128)
# box_mask_z_vis = box_mask_z.cpu().numpy()
box_mask_z = F.interpolate(box_mask_z, scale_factor=1. / cfg.MODEL.BACKBONE.STRIDE, mode='bilinear',
align_corners=False)
box_mask_z = box_mask_z.flatten(1).to(torch.bool)
# box_mask_z_vis = box_mask_z[:, 0, ...].cpu().numpy()
# gaussian_maps_vis = generate_heatmap(data['template_anno'], self.cfg.DATA.TEMPLATE.SIZE, self.cfg.MODEL.STRIDE)[0].cpu().numpy()
else:
raise NotImplementedError
return box_mask_z
# Path: lib/test/tracker/odtrack.py
import math
import numpy as np
import torch
import cv2
import os
from lib.models.odtrack import build_odtrack
from lib.test.tracker.basetracker import BaseTracker
from lib.test.tracker.vis_utils import gen_visualization
from lib.test.utils.hann import hann2d
from lib.train.data.processing_utils import sample_target
from lib.test.tracker.data_utils import Preprocessor
from lib.utils.box_ops import clip_box
from lib.utils.ce_utils import generate_mask_cond
# for debug
class ODTrack(BaseTracker):
def __init__(self, params):
super(ODTrack, self).__init__(params)
network = build_odtrack(params.cfg, training=False)
network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True)
self.cfg = params.cfg
self.network = network.cuda()
self.network.eval()
self.preprocessor = Preprocessor()
self.state = None
self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE
# motion constrain
self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda()
# for debug
self.debug = params.debug
self.use_visdom = params.debug
self.frame_id = 0
if self.debug:
if not self.use_visdom:
self.save_dir = "debug"
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
else:
# self.add_hook()
self._init_visdom(None, 1)
# for save boxes from all queries
self.save_all_boxes = params.save_all_boxes
self.z_dict1 = {}
def initialize(self, image, info: dict):
# forward the template once
z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor,
output_sz=self.params.template_size)
self.z_patch_arr = z_patch_arr
template = self.preprocessor.process(z_patch_arr, z_amask_arr)
with torch.no_grad():
# self.z_dict1 = template
self.memory_frames = [template.tensors]
self.memory_masks = []
if self.cfg.MODEL.BACKBONE.CE_LOC: # use CE module
template_bbox = self.transform_bbox_to_crop(info['init_bbox'], resize_factor,
template.tensors.device).squeeze(1)
self.memory_masks.append(generate_mask_cond(self.cfg, 1, template.tensors.device, template_bbox))
# save states
| self.state = info['init_bbox'] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Tlntin/booking_simulator
# Path: modelscope_agent/llm/base.py
class LLM:
name = ''
def __init__(self, cfg):
self.cfg = cfg
self.agent_type = None
self.model = None
self.model_id = self.model
def set_agent_type(self, agent_type):
self.agent_type = agent_type
@abstractmethod
def generate(self, prompt: str, functions: list = [], **kwargs) -> str:
"""each llm should implement this function to generate response
Args:
prompt (str): prompt
functions (list): list of functions object including: name, description, parameters
Returns:
str: response
"""
raise NotImplementedError
@abstractmethod
def stream_generate(self,
prompt: str,
functions: list = [],
**kwargs) -> str:
"""stream generate response, which yields a generator of response in each step
Args:
prompt (str): prompt
functions (list): list of functions object including: name, description, parameters
Yields:
Iterator[str]: iterator of step response
"""
raise NotImplementedError
def tokenize(self, input_text: str) -> List[int]:
"""tokenize is used to calculate the length of the text to meet the model's input length requirements
Args:
input_text (str): input text
Returns:
list[int]: token_ids
"""
raise NotImplementedError
def detokenize(self, input_ids: List[int]) -> str:
"""detokenize
Args:
input_ids (list[int]): input token_ids
Returns:
str: text
"""
raise NotImplementedError
# Path: modelscope_agent/output_parser.py
class OutputParser:
"""Output parser for llm response
"""
def parse_response(self, response):
raise NotImplementedError
# use to handle the case of false parsing the action_para result, if there is no valid action then
# throw Error
@staticmethod
def handle_fallback(action: str, action_para: str):
if action is not None and action != '':
parameters = {'fallback': action_para}
return action, parameters
else:
raise ValueError('Wrong response format for output parser')
# Path: modelscope_agent/prompt/prompt.py
class PromptGenerator:
def __init__(self,
system_template: str = '',
instruction_template: str = '',
user_template: str = '<user_input>',
exec_template: str = '',
assistant_template: str = '',
sep='\n\n',
llm=None,
length_constraint=LengthConstraint()):
"""
prompt genertor
Args:
system_template (str, optional): System template, normally the role of LLM.
instruction_template (str, optional): Indicate the instruction for LLM.
user_template (str, optional): Prefix before user input. Defaults to ''.
exec_template (str, optional): A wrapper str for exec result.
assistant_template (str, optional): Prefix before assistant response.
Some LLM need to manully concat this prefix before generation.
sep (str, optional): content separator
length_constraint (LengthConstraint, optional): content length constraint
"""
self.system_template = system_template
self.instruction_template = instruction_template
self.user_template = user_template
self.assistant_template = assistant_template
self.exec_template = exec_template
self.sep = sep
if isinstance(llm, LLM) and llm.model_id:
self.prompt_preprocessor = build_raw_prompt(llm.model_id)
self.prompt_max_length = length_constraint.prompt_max_length
self.reset()
def reset(self):
self.prompt = ''
self.history = []
self.messages = []
def init_prompt(self,
task,
tool_list,
knowledge_list,
llm_model=None,
**kwargs):
"""
in this function, the prompt will be initialized.
"""
prompt = self.sep.join(
[self.system_template, self.instruction_template])
prompt += '<knowledge><history>'
knowledge_str = self.get_knowledge_str(
knowledge_list, file_name=kwargs.get('file_name', ''))
# knowledge
prompt = prompt.replace('<knowledge>', knowledge_str)
# get tool description str
tool_str = self.get_tool_str(tool_list)
prompt = prompt.replace('<tool_list>', tool_str)
history_str = self.get_history_str()
prompt = prompt.replace('<history>', history_str)
self.system_prompt = copy.deepcopy(prompt)
# user input
user_input = self.user_template.replace('<user_input>', task)
prompt += f'{self.sep}{user_input}'
# assistant input
prompt += f'{self.sep}{self.assistant_template}'
# store history
self.history.append({'role': 'user', 'content': user_input})
self.history.append({
'role': 'assistant',
'content': self.assistant_template
})
self.prompt = prompt
self.function_calls = self.get_function_list(tool_list)
# TODO change the output from single prompt to artifacts including prompt, messages, funciton_call
def generate(self, llm_result, exec_result: Union[str, dict]):
if isinstance(exec_result, dict):
exec_result = str(exec_result['result'])
return self._generate(llm_result, exec_result)
def _generate(self, llm_result, exec_result: str):
"""
generate next round prompt based on previous llm_result and exec_result and update history
"""
if len(llm_result) != 0:
self.prompt = f'{self.prompt}{llm_result}'
self.history[-1]['content'] += f'{llm_result}'
if len(exec_result) != 0:
exec_result = self.exec_template.replace('<exec_result>',
str(exec_result))
self.prompt = f'{self.prompt}{self.sep}{exec_result}'
self.history[-1]['content'] += f'{self.sep}{exec_result}'
return self.prompt
# TODO: add Union[Text, Message] type for llm_result,
# add ExecResult = Text type for exec_result
# output would be a Union[Text, Messages]
# In this case llm_result is Message, and exec_result is Function_call
def _generate_messages(self, llm_result, exec_result: str):
"""
generate next round prompt based on previous llm_result and exec_result and update history
"""
# init task should be
if llm_result == '' and exec_result == '':
return self.history
# make sure set content '' not null
function_call = llm_result.get('function_call', None)
if function_call is not None:
llm_result['content'] = ''
self.history.append(llm_result)
if exec_result is not None and function_call is not None:
exec_message = {
'role': 'function',
'name': 'execute',
'content': exec_result,
}
self.history.append(exec_message)
return self.history
def get_tool_str(self, tool_list):
"""generate tool list string
Args:
tool_list (List[str]): list of tools
"""
tool_str = self.sep.join(
[f'{i + 1}. {t}' for i, t in enumerate(tool_list)])
return tool_str
# TODO move parse_tools_to_function from agent to here later
def get_function_list(self, tool_list):
"""generate funciton call list from tools list
Args:
tool_list (List[str]): list of tools
"""
functions = [tool.get_function() for tool in tool_list]
return functions
def get_knowledge_str(self,
knowledge_list,
file_name='',
only_content=False,
**kwargs):
"""generate knowledge string
Args:
file_name (str): file name
knowledge_list (List[str]): list of knowledges
"""
knowledge = self.sep.join(
[f'{i + 1}. {k}' for i, k in enumerate(knowledge_list)])
knowledge_content = KNOWLEDGE_CONTENT_PROMPT.replace(
'<knowledge_content>', knowledge)
if only_content:
return knowledge_content
else:
knowledge_introduction = KNOWLEDGE_INTRODUCTION_PROMPT.replace(
'<file_name>', file_name)
knowledge_str = f'{KNOWLEDGE_PROMPT}{self.sep}{knowledge_introduction}{self.sep}{knowledge_content}' if len(
knowledge_list) > 0 else ''
return knowledge_str
def get_history_str(self):
"""generate history string
"""
history_str = ''
for i in range(len(self.history)):
history_item = self.history[len(self.history) - i - 1]
text = history_item['content']
if len(history_str) + len(text) + len(
self.prompt) > self.prompt_max_length:
break
history_str = f'{self.sep}{text.strip()}{history_str}'
return history_str
# Path: modelscope_agent/tools/tool.py
class Tool:
"""
a base class for tools.
when you inherit this class and implement new tool, you should provide name, description
and parameters of tool that conforms with schema.
each tool may have two call method: _local_call(execute tool in your local environment)
and _remote_call(construct a http request to remote server).
corresponding to preprocess and postprocess method may need to be overrided to get correct result.
"""
name: str = 'tool'
description: str = 'This is a tool that ...'
parameters: list = []
def __init__(self, cfg={}):
self.cfg = cfg.get(self.name, {})
self.is_remote_tool = self.cfg.get('is_remote_tool', False)
self.project_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
self.db_path = os.path.join(self.project_dir, "data", "sqlite.db")
# remote call
self.url = self.cfg.get('url', '')
self.token = self.cfg.get('token', '')
self.header = {
'Authorization': self.token or f'Bearer {MODELSCOPE_API_TOKEN}'
}
try:
all_para = {
'name': self.name,
'description': self.description,
'parameters': self.parameters
}
self.tool_schema = ToolSchema(**all_para)
except ValidationError:
raise ValueError(f'Error when parsing parameters of {self.name}')
self._str = self.tool_schema.model_dump_json()
self._function = self.parse_pydantic_model_to_openai_function(all_para)
def __call__(self, remote=False, *args, **kwargs):
if self.is_remote_tool or remote:
return self._remote_call(*args, **kwargs)
else:
return self._local_call(*args, **kwargs)
def _remote_call(self, *args, **kwargs):
if self.url == '':
raise ValueError(
f"Could not use remote call for {self.name} since this tool doesn't have a remote endpoint"
)
remote_parsed_input = json.dumps(
self._remote_parse_input(*args, **kwargs))
origin_result = None
retry_times = MAX_RETRY_TIMES
while retry_times:
retry_times -= 1
try:
response = requests.request(
'POST',
self.url,
headers=self.header,
data=remote_parsed_input)
if response.status_code != requests.codes.ok:
response.raise_for_status()
origin_result = json.loads(
response.content.decode('utf-8'))['Data']
final_result = self._parse_output(origin_result, remote=True)
return final_result
except Timeout:
continue
except RequestException as e:
raise ValueError(
f'Remote call failed with error code: {e.response.status_code},\
error message: {e.response.content.decode("utf-8")}')
raise ValueError(
'Remote call max retry times exceeded! Please try to use local call.'
)
def _local_call(self, *args, **kwargs):
return
def _remote_parse_input(self, *args, **kwargs):
return kwargs
def _local_parse_input(self, *args, **kwargs):
return args, kwargs
def _parse_output(self, origin_result, *args, **kwargs):
return {'result': origin_result}
def __str__(self):
return self._str
def get_function(self):
return self._function
def parse_pydantic_model_to_openai_function(self, all_para: dict):
'''
this method used to convert a pydantic model to openai function schema
such that convert
all_para = {
'name': get_current_weather,
'description': Get the current weather in a given location,
'parameters': [{
'name': 'image',
'description': '用户输入的图片',
'required': True
}, {
'name': 'text',
'description': '用户输入的文本',
'required': True
}]
}
to
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"image": {
"type": "string",
"description": "用户输入的图片",
},
"text": {
"type": "string",
"description": "用户输入的文本",
},
"required": ["image", "text"],
},
}
'''
function = {
'name': all_para['name'],
'description': all_para['description'],
'parameters': {
'type': 'object',
'properties': {},
'required': [],
},
}
for para in all_para['parameters']:
function['parameters']['properties'][para['name']] = {
'type': 'string',
'description': para['description']
}
if para['required']:
function['parameters']['required'].append(para['name'])
return function
# Path: tests/utils.py
from modelscope_agent.llm import LLM
from modelscope_agent.output_parser import OutputParser
from modelscope_agent.prompt import PromptGenerator
from modelscope_agent.tools import Tool
class MockLLM(LLM):
def __init__(self, responses=['mock_llm_response']):
super().__init__({})
self.responses = responses
self.idx = -1
self.model_id = 'mock_llm'
def generate(self, prompt: str, function_list=[], **kwargs) -> str:
self.idx += 1
return self.responses[self.idx] if self.idx < len(
self.responses) else 'mock llm response'
def stream_generate(self, prompt: str, function_list=[], **kwargs) -> str:
yield 'mock llm response'
class MockPromptGenerator(PromptGenerator):
def __init__(self):
super().__init__()
class MockOutParser(OutputParser):
def __init__(self, action, args, count=1):
super().__init__()
self.action = action
self.args = args
self.count = count
def parse_response(self, response: str):
if self.count > 0:
self.count -= 1
return self.action, self.args
else:
return None, None
class MockTool(Tool):
| def __init__(self, name, func, description, parameters=[]): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: finned-tech/sportsbookreview-scraper
# Path: scrapers/sportsbookreview.py
class NFLOddsScraper(OddsScraper):
def __init__(self, years):
super().__init__("nfl", years)
self.base = (
"https://www.sportsbookreviewsonline.com/scoresoddsarchives/nfl-odds-"
)
self.schema = {
"season": [],
"date": [],
"home_team": [],
"away_team": [],
"home_1stQtr": [],
"away_1stQtr": [],
"home_2ndQtr": [],
"away_2ndQtr": [],
"home_3rdQtr": [],
"away_3rdQtr": [],
"home_4thQtr": [],
"away_4thQtr": [],
"home_final": [],
"away_final": [],
"home_close_ml": [],
"away_close_ml": [],
"home_open_spread": [],
"away_open_spread": [],
"home_close_spread": [],
"away_close_spread": [],
"home_2H_spread": [],
"away_2H_spread": [],
"2H_total": [],
"open_over_under": [],
"close_over_under": [],
}
def _reformat_data(self, df, season):
new_df = pd.DataFrame()
new_df["season"] = [season] * len(df)
new_df["date"] = df[0].apply(lambda x: self._make_datestr(x, season))
new_df["name"] = df[3]
new_df["1stQtr"] = df[4]
new_df["2ndQtr"] = df[5]
new_df["3rdQtr"] = df[6]
new_df["4thQtr"] = df[7]
new_df["final"] = df[8]
_open = df[9].apply(lambda x: 0 if x in self.blacklist else x)
new_df["open_odds"] = _open
close = df[10].apply(lambda x: 0 if x in self.blacklist else x)
new_df["close_odds"] = close
new_df["close_ml"] = df[11]
h2 = df[12].apply(lambda x: 0 if x in self.blacklist else x)
new_df["2H_odds"] = h2
return new_df
def _to_schema(self, df):
new_df = self.schema.copy()
df = df.fillna(0)
progress = df.iterrows()
for (i1, row), (i2, next_row) in self._pairwise(progress):
home_ml = int(next_row["close_ml"])
away_ml = int(row["close_ml"])
odds1 = float(row["open_odds"])
odds2 = float(next_row["open_odds"])
if odds1 < odds2:
open_spread = odds1
close_spread = float(row["close_odds"])
h2_spread = float(row["2H_odds"])
h2_total = float(next_row["2H_odds"])
open_ou = odds2
close_ou = float(next_row["close_odds"])
else:
open_spread = odds2
close_spread = float(next_row["close_odds"])
h2_spread = float(next_row["2H_odds"])
h2_total = float(row["2H_odds"])
open_ou = odds1
close_ou = float(row["close_odds"])
home_open_spread = -open_spread if home_ml < away_ml else open_spread
away_open_spread = -home_open_spread
home_close_spread = -close_spread if home_ml < away_ml else close_spread
away_close_spread = -home_close_spread
h2_home_spread = -h2_spread if home_ml < away_ml else h2_spread
h2_away_spread = -h2_home_spread
new_df["season"].append(row["season"])
new_df["date"].append(row["date"])
new_df["home_team"].append(self._translate(next_row["name"]))
new_df["away_team"].append(self._translate(row["name"]))
new_df["home_1stQtr"].append(next_row["1stQtr"])
new_df["away_1stQtr"].append(row["1stQtr"])
new_df["home_2ndQtr"].append(next_row["2ndQtr"])
new_df["away_2ndQtr"].append(row["2ndQtr"])
new_df["home_3rdQtr"].append(next_row["3rdQtr"])
new_df["away_3rdQtr"].append(row["3rdQtr"])
new_df["home_4thQtr"].append(next_row["4thQtr"])
new_df["away_4thQtr"].append(row["4thQtr"])
new_df["home_final"].append(next_row["final"])
new_df["away_final"].append(row["final"])
new_df["home_close_ml"].append(home_ml)
new_df["away_close_ml"].append(away_ml)
new_df["home_open_spread"].append(home_open_spread)
new_df["away_open_spread"].append(away_open_spread)
new_df["home_close_spread"].append(home_close_spread)
new_df["away_close_spread"].append(away_close_spread)
new_df["home_2H_spread"].append(h2_home_spread)
new_df["away_2H_spread"].append(h2_away_spread)
new_df["2H_total"].append(h2_total)
new_df["open_over_under"].append(open_ou)
new_df["close_over_under"].append(close_ou)
return pd.DataFrame(new_df)
# Path: scrapers/sportsbookreview.py
class NBAOddsScraper(NFLOddsScraper):
def __init__(self, years):
super().__init__(years)
self.sport = "nba"
self.base = (
"https://www.sportsbookreviewsonline.com/scoresoddsarchives/nba-odds-"
)
self.schema = {
"season": [],
"date": [],
"home_team": [],
"away_team": [],
"home_1stQtr": [],
"away_1stQtr": [],
"home_2ndQtr": [],
"away_2ndQtr": [],
"home_3rdQtr": [],
"away_3rdQtr": [],
"home_4thQtr": [],
"away_4thQtr": [],
"home_final": [],
"away_final": [],
"home_close_ml": [],
"away_close_ml": [],
"home_open_spread": [],
"away_open_spread": [],
"home_close_spread": [],
"away_close_spread": [],
"home_2H_spread": [],
"away_2H_spread": [],
"2H_total": [],
"open_over_under": [],
"close_over_under": [],
}
# Path: scrapers/sportsbookreview.py
class NHLOddsScraper(OddsScraper):
def __init__(self, years):
super().__init__("nhl", years)
self.base = (
"https://www.sportsbookreviewsonline.com/scoresoddsarchives/nhl-odds-"
)
self.schema = {
"season": [],
"date": [],
"home_team": [],
"away_team": [],
"home_1stPeriod": [],
"away_1stPeriod": [],
"home_2ndPeriod": [],
"away_2ndPeriod": [],
"home_3rdPeriod": [],
"away_3rdPeriod": [],
"home_final": [],
"away_final": [],
"home_open_ml": [],
"away_open_ml": [],
"home_close_ml": [],
"away_close_ml": [],
"home_close_spread": [],
"away_close_spread": [],
"home_close_spread_odds": [],
"away_close_spread_odds": [],
"open_over_under": [],
"open_over_under_odds": [],
"close_over_under": [],
"close_over_under_odds": [],
}
def _reformat_data(self, df, season, covid=False):
new_df = pd.DataFrame()
new_df["season"] = [season] * len(df)
new_df["date"] = df[0].apply(
lambda x: self._make_datestr(x, season)
if not covid
else self._make_datestr(x, season, start=1, yr_end=3)
)
new_df["name"] = df[3]
new_df["1stPeriod"] = df[4]
new_df["2ndPeriod"] = df[5]
new_df["3rdPeriod"] = df[6]
new_df["final"] = df[7]
new_df["open_ml"] = df[8]
new_df["open_ml"] = new_df["open_ml"].apply(
lambda x: 0 if x in self.blacklist else x
)
new_df["close_ml"] = df[9]
new_df["close_ml"] = new_df["close_ml"].apply(
lambda x: 0 if x in self.blacklist else x
)
new_df["close_spread"] = df[10] if season > 2013 else 0
new_df["close_spread"] = new_df["close_spread"].apply(
lambda x: 0 if x in self.blacklist else float(x)
)
new_df["close_spread_odds"] = df[11] if season > 2013 else 0
new_df["close_spread_odds"] = new_df["close_spread_odds"].apply(
lambda x: 0 if x in self.blacklist else float(x)
)
new_df["open_over_under"] = df[12] if season > 2013 else df[10]
new_df["open_over_under"] = new_df["open_over_under"].apply(
lambda x: 0 if x in self.blacklist else float(x)
)
new_df["open_over_under_odds"] = df[13] if season > 2013 else df[11]
new_df["open_over_under_odds"] = new_df["open_over_under_odds"].apply(
lambda x: 0 if x in self.blacklist else float(x)
)
new_df["close_over_under"] = df[14] if season > 2013 else df[12]
new_df["close_over_under"] = new_df["close_over_under"].apply(
lambda x: 0 if x in self.blacklist else float(x)
)
new_df["close_over_under_odds"] = df[15] if season > 2013 else df[13]
new_df["close_over_under_odds"] = new_df["close_over_under_odds"].apply(
lambda x: 0 if x in self.blacklist else float(x)
)
return new_df
def _to_schema(self, df):
new_df = self.schema.copy()
df = df.fillna(0)
progress = df.iterrows()
for (i1, row), (i2, next_row) in self._pairwise(progress):
new_df["season"].append(row["season"])
new_df["date"].append(row["date"])
new_df["home_team"].append(self._translate(next_row["name"]))
new_df["away_team"].append(self._translate(row["name"]))
new_df["home_1stPeriod"].append(next_row["1stPeriod"])
new_df["away_1stPeriod"].append(row["1stPeriod"])
new_df["home_2ndPeriod"].append(next_row["2ndPeriod"])
new_df["away_2ndPeriod"].append(row["2ndPeriod"])
new_df["home_3rdPeriod"].append(next_row["3rdPeriod"])
new_df["away_3rdPeriod"].append(row["3rdPeriod"])
new_df["home_final"].append(next_row["final"])
new_df["away_final"].append(row["final"])
new_df["home_open_ml"].append(int(next_row["open_ml"]))
new_df["away_open_ml"].append(int(row["open_ml"]))
new_df["home_close_ml"].append(int(next_row["close_ml"]))
new_df["away_close_ml"].append(int(row["close_ml"]))
new_df["home_close_spread"].append(next_row["close_spread"])
new_df["away_close_spread"].append(row["close_spread"])
new_df["home_close_spread_odds"].append(next_row["close_spread_odds"])
new_df["away_close_spread_odds"].append(row["close_spread_odds"])
new_df["open_over_under"].append(next_row["open_over_under"])
new_df["open_over_under_odds"].append(next_row["open_over_under_odds"])
new_df["close_over_under"].append(next_row["close_over_under"])
new_df["close_over_under_odds"].append(next_row["close_over_under_odds"])
return pd.DataFrame(new_df)
def driver(self):
dfs = pd.DataFrame()
for season in self.seasons:
# compensate for the COVID shortened season in 2021
season_str = self._make_season(season) if season != 2020 else "2021"
is_cov = True if season == 2020 else False
url = self.base + season_str
# Sportsbookreview has scraper protection, so we need to set a user agent
# to get around this.
headers = {"User-Agent": "Mozilla/5.0"}
r = requests.get(url, headers=headers)
dfs = pd.concat(
[dfs, self._reformat_data(pd.read_html(r.text)[0][1:], season, is_cov)],
axis=0,
)
return self._to_schema(dfs)
# Path: scrapers/sportsbookreview.py
class MLBOddsScraper(OddsScraper):
def __init__(self, years):
super().__init__("mlb", years)
self.base = "https://www.sportsbookreviewsonline.com/wp-content/uploads/sportsbookreviewsonline_com_737/mlb-odds-"
self.ext = ".xlsx"
self.schema = {
"season": [],
"date": [],
"home_team": [],
"away_team": [],
"home_1stInn": [],
"away_1stInn": [],
"home_2ndInn": [],
"away_2ndInn": [],
"home_3rdInn": [],
"away_3rdInn": [],
"home_4thInn": [],
"away_4thInn": [],
"home_5thInn": [],
"away_5thInn": [],
"home_6thInn": [],
"away_6thInn": [],
"home_7thInn": [],
"away_7thInn": [],
"home_8thInn": [],
"away_8thInn": [],
"home_9thInn": [],
"away_9thInn": [],
"home_final": [],
"away_final": [],
"home_open_ml": [],
"away_open_ml": [],
"home_close_ml": [],
"away_close_ml": [],
"home_close_spread": [],
"away_close_spread": [],
"home_close_spread_odds": [],
"away_close_spread_odds": [],
"open_over_under": [],
"open_over_under_odds": [],
"close_over_under": [],
"close_over_under_odds": [],
}
def _reformat_data(self, df, season):
new_df = pd.DataFrame()
new_df["season"] = [season] * len(df)
new_df["date"] = df[0].apply(
lambda x: self._make_datestr(x, season, start=3, yr_end=10)
)
new_df["name"] = df[3]
new_df["1stInn"] = df[5]
new_df["2ndInn"] = df[6]
new_df["3rdInn"] = df[7]
new_df["4thInn"] = df[8]
new_df["5thInn"] = df[9]
new_df["6thInn"] = df[10]
new_df["7thInn"] = df[11]
new_df["8thInn"] = df[12]
new_df["9thInn"] = df[13]
new_df["final"] = df[14]
new_df["open_ml"] = df[15]
new_df["close_ml"] = df[16]
new_df["close_spread"] = df[17] if season > 2013 else 0
new_df["close_spread_odds"] = df[18] if season > 2013 else 0
new_df["open_over_under"] = df[19] if season > 2013 else df[17]
new_df["open_over_under_odds"] = df[20] if season > 2013 else df[18]
new_df["close_over_under"] = df[21] if season > 2013 else df[19]
new_df["close_over_under_odds"] = df[22] if season > 2013 else df[20]
return new_df
def _to_schema(self, df):
new_df = self.schema.copy()
progress = df.iterrows()
for (i1, row), (i2, next_row) in self._pairwise(progress):
new_df["season"].append(row["season"])
new_df["date"].append(row["date"])
new_df["home_team"].append(self._translate(next_row["name"]))
new_df["away_team"].append(self._translate(row["name"]))
new_df["home_1stInn"].append(next_row["1stInn"])
new_df["away_1stInn"].append(row["1stInn"])
new_df["home_2ndInn"].append(next_row["2ndInn"])
new_df["away_2ndInn"].append(row["2ndInn"])
new_df["home_3rdInn"].append(next_row["3rdInn"])
new_df["away_3rdInn"].append(row["3rdInn"])
new_df["home_4thInn"].append(next_row["4thInn"])
new_df["away_4thInn"].append(row["4thInn"])
new_df["home_5thInn"].append(next_row["5thInn"])
new_df["away_5thInn"].append(row["5thInn"])
new_df["home_6thInn"].append(next_row["6thInn"])
new_df["away_6thInn"].append(row["6thInn"])
new_df["home_7thInn"].append(next_row["7thInn"])
new_df["away_7thInn"].append(row["7thInn"])
new_df["home_8thInn"].append(next_row["8thInn"])
new_df["away_8thInn"].append(row["8thInn"])
new_df["home_9thInn"].append(next_row["9thInn"])
new_df["away_9thInn"].append(row["9thInn"])
new_df["home_final"].append(next_row["final"])
new_df["away_final"].append(row["final"])
new_df["home_open_ml"].append(next_row["open_ml"])
new_df["away_open_ml"].append(row["open_ml"])
new_df["home_close_ml"].append(next_row["close_ml"])
new_df["away_close_ml"].append(row["close_ml"])
new_df["home_close_spread"].append(next_row["close_spread"])
new_df["away_close_spread"].append(row["close_spread"])
new_df["home_close_spread_odds"].append(next_row["close_spread_odds"])
new_df["away_close_spread_odds"].append(row["close_spread_odds"])
new_df["open_over_under"].append(next_row["open_over_under"])
new_df["open_over_under_odds"].append(next_row["open_over_under_odds"])
new_df["close_over_under"].append(next_row["close_over_under"])
new_df["close_over_under_odds"].append(next_row["close_over_under_odds"])
return pd.DataFrame(new_df)
def driver(self):
dfs = pd.DataFrame()
for season in self.seasons:
url = self.base + str(season) + self.ext
headers = {"User-Agent": "Mozilla/5.0"}
r = requests.get(url, headers=headers)
with io.BytesIO(r.content) as fh:
df = pd.read_excel(fh, header=None, sheet_name=None)
dfs = pd.concat(
[dfs, self._reformat_data(df["Sheet1"][1:], season)], axis=0
)
return self._to_schema(dfs)
# Path: cli.py
import argparse
import config
from scrapers.sportsbookreview import (
NFLOddsScraper,
NBAOddsScraper,
NHLOddsScraper,
MLBOddsScraper,
)
parser = argparse.ArgumentParser()
parser.add_argument("--sport", type=str, required=True)
# start and end years
parser.add_argument("--start", type=int, required=True)
parser.add_argument("--end", type=int, required=True)
# filename for output
parser.add_argument("--filename", type=str, required=True)
# output format (csv or json), default is json
parser.add_argument("--format", type=str, default="json")
args = parser.parse_args()
| if __name__ == "__main__": |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chenchenygu/watermark-learnability
# Path: aar_watermark.py
class AarWatermark:
def __init__(
self,
vocab_size: int,
k: int,
seed: int = DEFAULT_SEED,
eps: float = 1e-20,
device: Optional[str] = None,
):
if not device:
device = "cuda" if torch.cuda.is_available() else "cpu"
generator = torch.Generator() # generator is always cpu for reproducibility
generator.manual_seed(seed)
# clamp to avoid NaNs
uniform = torch.clamp(torch.rand((vocab_size * k, vocab_size), generator=generator, dtype=torch.float32), min=eps)
self.gumbel = (-torch.log(torch.clamp(-torch.log(uniform), min=eps))).to(device)
self.k = k
self.vocab_size = vocab_size
self.seed = seed
self.eps = eps
self.device = device
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
if input_ids.shape[-1] < self.k:
return scores
prev_token = torch.sum(input_ids[:, -self.k:], dim=-1) # (batch_size,)
gumbel = self.gumbel[prev_token] # (batch_size, vocab_size)
return scores[..., :gumbel.shape[-1]] + gumbel
def watermark_logits(
self,
input_ids: torch.LongTensor, # (batch, seq_len)
logits: torch.FloatTensor, # (batch, seq_len, vocab_size)
) -> torch.FloatTensor:
"""Returns watermarked logits to be used as distillation target."""
hashes = torch.sum(input_ids.unfold(-1, self.k, 1), dim=-1) # (batch, seq_len - k + 1)
gumbel = self.gumbel[hashes] # (batch, seq_len - k + 1, vocab_size)
# tokenizer vocab size and model outputs vocab size may be different
logits[..., self.k - 1:, :gumbel.shape[-1]] += gumbel
return logits
def watermark_logits_argmax(
self,
input_ids: torch.LongTensor, # (batch, seq_len)
logits: torch.FloatTensor, # (batch, seq_len, vocab_size)
) -> torch.LongTensor:
"""Finds argmax token for watermark, returns token indexes to be used for cross-entropy loss.
Returns tensor of shape (batch, seq_len), where each element is a token index.
"""
hashes = torch.sum(input_ids.unfold(-1, self.k, 1), dim=-1) # (batch, seq_len - k + 1)
gumbel = self.gumbel[hashes] # (batch, seq_len - k + 1, vocab_size)
# tokenizer vocab size and model outputs vocab size may be different
logits[..., self.k - 1:, :gumbel.shape[-1]] += gumbel # (batch, seq_len, vocab_size)
tokens = torch.argmax(logits, dim=-1) # (batch, seq_len)
return tokens
# Path: kgw_watermarking/watermark_reliability_release/watermark_processor.py
class WatermarkLogitsProcessor(WatermarkBase, LogitsProcessor):
"""LogitsProcessor modifying model output scores in a pipe. Can be used in any HF pipeline to modify scores to fit the watermark,
but can also be used as a standalone tool inserted for any model producing scores inbetween model outputs and next token sampler.
"""
def __init__(self, *args, store_spike_ents: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.store_spike_ents = store_spike_ents
self.spike_entropies = None
if self.store_spike_ents:
self._init_spike_entropies()
def _init_spike_entropies(self):
alpha = torch.exp(torch.tensor(self.delta)).item()
gamma = self.gamma
self.z_value = ((1 - gamma) * (alpha - 1)) / (1 - gamma + (alpha * gamma))
self.expected_gl_coef = (gamma * alpha) / (1 - gamma + (alpha * gamma))
# catch for overflow when bias is "infinite"
if alpha == torch.inf:
self.z_value = 1.0
self.expected_gl_coef = 1.0
def _get_spike_entropies(self):
spike_ents = [[] for _ in range(len(self.spike_entropies))]
for b_idx, ent_tensor_list in enumerate(self.spike_entropies):
for ent_tensor in ent_tensor_list:
spike_ents[b_idx].append(ent_tensor.item())
return spike_ents
def _get_and_clear_stored_spike_ents(self):
spike_ents = self._get_spike_entropies()
self.spike_entropies = None
return spike_ents
def _compute_spike_entropy(self, scores):
# precomputed z value in init
probs = scores.softmax(dim=-1)
denoms = 1 + (self.z_value * probs)
renormed_probs = probs / denoms
sum_renormed_probs = renormed_probs.sum()
return sum_renormed_probs
def _calc_greenlist_mask(
self, scores: torch.FloatTensor, greenlist_token_ids
) -> torch.BoolTensor:
# Cannot lose loop, greenlists might have different lengths
green_tokens_mask = torch.zeros_like(scores, dtype=torch.bool)
for b_idx, greenlist in enumerate(greenlist_token_ids):
if len(greenlist) > 0:
green_tokens_mask[b_idx][greenlist] = True
return green_tokens_mask
def _bias_greenlist_logits(
self, scores: torch.Tensor, greenlist_mask: torch.Tensor, greenlist_bias: float
) -> torch.Tensor:
scores[greenlist_mask] = scores[greenlist_mask] + greenlist_bias
return scores
def _score_rejection_sampling(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor, tail_rule="fixed_compute"
) -> list[int]:
"""Generate greenlist based on current candidate next token. Reject and move on if necessary. Method not batched.
This is only a partial version of Alg.3 "Robust Private Watermarking", as it always assumes greedy sampling. It will still (kinda)
work for all types of sampling, but less effectively.
To work efficiently, this function can switch between a number of rules for handling the distribution tail.
These are not exposed by default.
"""
sorted_scores, greedy_predictions = scores.sort(dim=-1, descending=True)
final_greenlist = []
for idx, prediction_candidate in enumerate(greedy_predictions):
greenlist_ids = self._get_greenlist_ids(
torch.cat([input_ids, prediction_candidate[None]], dim=0)
) # add candidate to prefix
if prediction_candidate in greenlist_ids: # test for consistency
final_greenlist.append(prediction_candidate)
# What follows below are optional early-stopping rules for efficiency
if tail_rule == "fixed_score":
if sorted_scores[0] - sorted_scores[idx + 1] > self.delta:
break
elif tail_rule == "fixed_list_length":
if len(final_greenlist) == 10:
break
elif tail_rule == "fixed_compute":
if idx == 40:
break
else:
pass # do not break early
return torch.as_tensor(final_greenlist, device=input_ids.device)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
"""Call with previous context as input_ids, and scores for next token."""
# this is lazy to allow us to co-locate on the watermarked model's device
self.rng = torch.Generator(device=input_ids.device) if self.rng is None else self.rng
# NOTE, it would be nice to get rid of this batch loop, but currently,
# the seed and partition operations are not tensor/vectorized, thus
# each sequence in the batch needs to be treated separately.
list_of_greenlist_ids = [None for _ in input_ids] # Greenlists could differ in length
for b_idx, input_seq in enumerate(input_ids):
if self.self_salt:
greenlist_ids = self._score_rejection_sampling(input_seq, scores[b_idx])
else:
greenlist_ids = self._get_greenlist_ids(input_seq)
list_of_greenlist_ids[b_idx] = greenlist_ids
# logic for computing and storing spike entropies for analysis
if self.store_spike_ents:
if self.spike_entropies is None:
self.spike_entropies = [[] for _ in range(input_ids.shape[0])]
self.spike_entropies[b_idx].append(self._compute_spike_entropy(scores[b_idx]))
green_tokens_mask = self._calc_greenlist_mask(
scores=scores, greenlist_token_ids=list_of_greenlist_ids
)
scores = self._bias_greenlist_logits(
scores=scores, greenlist_mask=green_tokens_mask, greenlist_bias=self.delta
)
return scores
# Path: kth_watermark.py
class KTHWatermark:
def __init__(
self,
vocab_size: int,
key_len: int,
seed: int = DEFAULT_SEED,
device: Optional[str] = None,
eps: float = 1e-20,
random_shift: bool = False,
num_shifts: Optional[int] = None,
):
if not device:
device = "cuda" if torch.cuda.is_available() else "cpu"
generator = torch.Generator() # generator is always cpu for reproducibility
generator.manual_seed(seed)
uniform = torch.clamp(torch.rand((key_len, vocab_size), generator=generator, dtype=torch.float32), min=eps)
self.gumbel = (-torch.log(torch.clamp(-torch.log(uniform), min=eps))).to(device)
if random_shift:
if num_shifts is not None:
self.possible_shifts = [i * (key_len // num_shifts) for i in range(num_shifts)]
else:
self.possible_shifts = list(range(key_len))
self.random = random.Random(seed) # for random shift
self.seed = seed
self.eps = eps
self.vocab_size = vocab_size
self.device = device
self.key_len = key_len
self.cur_shift = 0
self.random_shift = random_shift
self.num_shifts = num_shifts
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
index = (input_ids.shape[1] + self.cur_shift) % self.key_len
gumbel = self.gumbel[index] # (batch_size, vocab_size)
return scores[..., :gumbel.shape[-1]] + gumbel
def watermark_logits(
self,
input_ids: torch.LongTensor, # (batch, seq_len)
logits: torch.FloatTensor, # (batch, seq_len, vocab_size)
) -> torch.FloatTensor:
"""Returns watermarked logits to be used as distillation target."""
index = torch.arange(input_ids.shape[1], device=input_ids.device) % self.key_len # (seq_len,)
gumbel = self.gumbel[index] # (seq_len, vocab_size)
# tokenizer vocab size and model outputs vocab size may be different
logits[..., :gumbel.shape[-1]] += gumbel # (batch, seq_len, vocab_size)
return logits
def watermark_logits_argmax(
self,
input_ids: torch.LongTensor, # (batch, seq_len)
logits: torch.FloatTensor, # (batch, seq_len, vocab_size)
random_shift: bool = False,
) -> torch.LongTensor:
"""Finds argmax token for watermark, returns token indexes to be used for cross-entropy loss.
Returns tensor of shape (batch, seq_len), where each element is a token index.
"""
shift = 0
if self.random_shift:
shift = self.random.choice(self.possible_shifts)
index = (torch.arange(input_ids.shape[1], device=input_ids.device) + shift) % self.key_len # (seq_len,)
gumbel = self.gumbel[index] # (seq_len, vocab_size)
# tokenizer vocab size and model outputs vocab size may be different
logits[..., :gumbel.shape[-1]] += gumbel # (batch, seq_len, vocab_size)
tokens = torch.argmax(logits, dim=-1) # (batch, seq_len)
return tokens
# Path: experiments/generate_samples_decoding_watermark.py
import argparse
import os
import random
import json
import torch
from typing import Dict
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, LogitsProcessorList
from aar_watermark import AarWatermark
from kgw_watermarking.watermark_reliability_release.watermark_processor import WatermarkLogitsProcessor
from kth_watermark import KTHWatermark
DEFAULT_PAD_TOKEN = "[PAD]"
DEFAULT_EOS_TOKEN = "</s>"
DEFAULT_BOS_TOKEN = "<s>"
DEFAULT_UNK_TOKEN = "<unk>"
device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser()
parser.add_argument("--model_names", type=str, nargs="+", required=True)
parser.add_argument("--watermark_config_filename", type=str, default="watermark_config.json")
parser.add_argument("--dataset_name", type=str, required=True)
parser.add_argument("--tokenizer_name", type=str, default=None)
parser.add_argument("--dataset_config_name", type=str, default=None)
parser.add_argument("--dataset_split", type=str, default="test")
parser.add_argument("--dataset_num_skip", type=int, default=0)
parser.add_argument("--data_field", type=str, default="text")
parser.add_argument("--num_samples", type=int, default=1000)
parser.add_argument("--min_new_tokens", type=int, default=None)
parser.add_argument("--max_new_tokens", type=int, default=None)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--prompt_length", type=int, default=10)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--streaming", action="store_true", default=True)
parser.add_argument("--output_file", type=str, required=True)
parser.add_argument("--overwrite_output_file", action="store_true", default=False)
parser.add_argument("--fp16", action="store_true", default=False)
parser.add_argument("--watermark_configs_file", type=str, required=True)
args = parser.parse_args()
def get_prompts(args) -> Dict:
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_names[0])
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
dataset = load_dataset(args.dataset_name, args.dataset_config_name, split=args.dataset_split, streaming=args.streaming)
max_length = args.prompt_length + args.max_new_tokens
min_length = args.prompt_length + args.min_new_tokens
def filter_length(example):
return len(tokenizer(example[args.data_field], truncation=True, max_length=max_length)["input_ids"]) >= min_length
def encode(examples):
trunc_tokens = tokenizer(
examples[args.data_field],
truncation=True,
padding=True,
max_length=max_length,
return_tensors="pt"
).to(device)
examples["text"] = tokenizer.batch_decode(trunc_tokens["input_ids"], skip_special_tokens=True)
prompt = tokenizer(
examples["text"], truncation=True, padding=True, max_length=args.prompt_length, return_tensors="pt",
).to(device)
examples["prompt_text"] = tokenizer.batch_decode(prompt["input_ids"], skip_special_tokens=True)
examples["input_ids"] = prompt["input_ids"]
examples["attention_mask"] = prompt["attention_mask"]
examples["text_completion"] = tokenizer.batch_decode(
trunc_tokens["input_ids"][:, args.prompt_length:], skip_special_tokens=True
)
return examples
dataset = dataset.filter(filter_length)
dataset = dataset.skip(args.dataset_num_skip)
dataset = dataset.map(encode, batched=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size)
prompts = []
human_text = []
prompt_text = []
full_human_text = []
for batch in dataloader:
if len(human_text) >= args.num_samples:
break
prompts.append(batch)
human_text.extend(batch["text_completion"])
prompt_text.extend(batch["prompt_text"])
full_human_text.extend(batch["text"])
return {
"prompts": prompts,
"human_text": human_text,
"prompt_text": prompt_text,
"full_human_text": full_human_text,
}
def generate_samples(model, tokenizer, args, prompts, watermark, do_sample=True) -> Dict:
model_text = []
full_model_text = []
for batch in tqdm(prompts):
if len(model_text) >= args.num_samples:
break
with torch.no_grad():
| outputs = model.generate( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: skyoux/SemAIM
# Path: datasets/datasets.py
class ImageListFolder(datasets.ImageFolder):
def __init__(self, root, transform=None, target_transform=None,
ann_file=None, loader=default_loader):
self.root = root
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.nb_classes = 1000
assert ann_file is not None
print('load info from', ann_file)
self.samples = []
ann = open(ann_file)
for elem in ann.readlines():
cut = elem.split(' ')
path_current = os.path.join(root, cut[0])
target_current = int(cut[1])
self.samples.append((path_current, target_current))
ann.close()
print('load finish')
# Path: datasets/datasets.py
def build_transform(is_train, args):
mean = IMAGENET_DEFAULT_MEAN
std = IMAGENET_DEFAULT_STD
# train transform
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation='bicubic',
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
return transform
# eval transform
t = []
if args.input_size <= 224:
crop_pct = 224 / 256
else:
crop_pct = 1.0
size = int(args.input_size / crop_pct)
t.append(
transforms.Resize(size, interpolation=torchvision.transforms.InterpolationMode.BICUBIC), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
# Path: util/pos_embed.py
def interpolate_pos_embed(model, checkpoint_model):
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
# Path: util/misc.py
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
# Path: models/models_vit.py
class VisionTransformer(timm.models.vision_transformer.VisionTransformer):
def __init__(self, global_pool=False, **kwargs):
def forward_features(self, x):
def forward_head(self, x):
def vit_small_patch16(**kwargs):
def vit_base_patch16(**kwargs):
def vit_large_patch16(**kwargs):
def vit_huge_patch14(**kwargs):
B = x.shape[0]
# Path: engines/engine_finetune.py
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
mixup_fn: Optional[Mixup] = None, log_writer=None,
args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if log_writer is not None:
print('log_dir: {}'.format(log_writer.log_dir))
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# we use a per iteration (instead of per epoch) lr scheduler
if data_iter_step % accum_iter == 0:
lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(outputs, targets)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=False,
update_grad=(data_iter_step + 1) % accum_iter == 0)
if (data_iter_step + 1) % accum_iter == 0:
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:
""" We use epoch_1000x as the x-axis in tensorboard.
This calibrates different curves when batch size changes.
"""
epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
# Path: engines/engine_finetune.py
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
# Path: main_finetune.py
import argparse
import datetime
import json
import numpy as np
import os
import time
import builtins
import torch
import torch.backends.cudnn as cudnn
import timm
import util.lr_decay as lrd
import util.misc as misc
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
from timm.models.layers import trunc_normal_
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from datasets.datasets import ImageListFolder, build_transform
from util.pos_embed import interpolate_pos_embed
from util.misc import NativeScalerWithGradNormCount as NativeScaler
from models import models_vit
from engines.engine_finetune import train_one_epoch, evaluate
print(dataset_train)
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
dataset_val = ImageListFolder(os.path.join(args.data_path, 'train'), transform=transform_val,
ann_file=os.path.join(args.data_path, 'train.txt'))
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False
)
print("Sampler_val = %s" % str(sampler_val))
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
shuffle=False,
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = models_vit.__dict__[args.model](
num_classes=args.nb_classes,
drop_path_rate=args.drop_path,
global_pool=args.global_pool,
)
if args.finetune and not args.eval:
# load pretrained model
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load pre-trained checkpoint from: %s" % args.finetune)
if 'state_dict' in checkpoint:
checkpoint_model = checkpoint['state_dict']
else:
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
checkpoint_model = {k.replace("module.", ""): v for k, v in checkpoint_model.items()}
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
print("global_pool = ", args.global_pool)
if args.global_pool:
assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
else:
assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# manually initialize fc layer
trunc_normal_(model.head.weight, std=2e-5)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
# print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
# build optimizer with layer-wise lr decay (lrd)
param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay,
no_weight_decay_list=model_without_ddp.no_weight_decay(),
layer_decay=args.layer_decay
)
optimizer = torch.optim.AdamW(param_groups, lr=args.lr)
loss_scaler = NativeScaler()
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
| else: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: boweniac/autogan
# Path: autogan/oai/config_utils.py
class LLMConfig:
"""LLM config object
"""
def __init__(
self,
api_key_list: ConfigList,
max_messages_tokens: str,
request_interval_time: int,
request_timeout: int,
max_retries: int
):
self._api_key_list = api_key_list
self._max_messages_tokens = max_messages_tokens
self._request_interval_time = request_interval_time
self._request_timeout = request_timeout
self._max_retries = max_retries
def api_key(self, index):
"""Get the one configuration in the api_key_list.
"""
return self._api_key_list.get_config(index)
@property
def next_api_key(self):
"""Get the next configuration in the api_key_list.
"""
return self._api_key_list.get_next_config
@property
def len_of_api_key_list(self) -> int:
"""Get the first configuration in the api_key_list list.
"""
return self._api_key_list.len
@property
def model(self):
"""Get the model of the first configuration in the api_key_list list.
"""
return self._api_key_list.get_first_config["model"]
@property
def max_messages_tokens(self):
"""Limit the maximum tokens of the context in each dialogue.
"""
return self._max_messages_tokens
@property
def request_interval_time(self):
return self._request_interval_time
@property
def request_timeout(self):
return self._request_timeout
@property
def max_retries(self):
return self._max_retries
# Path: autogan/oai/count_tokens_utils.py
def count_text_tokens(text: str, model: Optional[str] = "gpt-3.5-turbo") -> int:
"""Calculate the tokens of the text.
:param text: The text to be tokenized
:param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.
:return: tokens
"""
if not text:
return 0
model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']
if model not in model_list:
model = "gpt-3.5-turbo"
try:
encoding = tiktoken.encoding_for_model(model)
num_tokens = len(encoding.encode(text))
except Exception as e:
print(e)
num_tokens = 0
return num_tokens
# Path: autogan/utils/environment_utils.py
def environment_info() -> str:
"""Current environment information
:return: --current_time: Y.m.d H:M:S week:%w
"""
info = f'current time: {get_time()}'
return info
# Path: autogan/oai/generate_utils.py
def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\
-> tuple[Optional[str], Optional[int]]:
"""Call the LLM interface
Currently, only the chatgpt model of openai (including azure) is adapted.
:param llm_config: LLM configuration.
:param messages:
:param agent_name:
:param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries
- main: agent replies
- idea: deep thoughts
- messages_summary: context compression
- text_summary: general summaries
- clue_summary: clue summaries
:param response_func: Used to return results to the interface or terminal.
:param stream_mode:
"""
# When a certain configuration in the configuration list fails to request,
# continue to try the next configuration until all configurations in the list are attempted.
loop = llm_config.len_of_api_key_list
for i in range(loop):
time.sleep(llm_config.request_interval_time)
api_key = llm_config.next_api_key
try:
completion_content = ""
completion_tokens = 0
index = 1
for message in chat_completions(messages, api_key, llm_config.request_timeout,
llm_config.max_retries, stream_mode):
content = ""
if stream_mode:
if (message and "choices" in message and "delta" in message["choices"][0]
and "content" in message["choices"][0]["delta"]
and message["choices"][0]["delta"]["content"]):
content = message["choices"][0]["delta"]["content"]
completion_content += content
else:
if (message and "choices" in message and "message" in message["choices"][0]
and "content" in message["choices"][0]["message"]
and message["choices"][0]["message"]["content"]):
content = message["choices"][0]["message"]["content"]
completion_content = content
if message and "usage" in message and "completion_tokens" in message["usage"]:
completion_tokens = message["usage"]["completion_tokens"]
response_func(agent_name, gen, api_key["model"], stream_mode, index, content, completion_tokens, message)
if content:
index += 1
if completion_content:
if completion_tokens == 0:
completion_tokens = count_text_tokens(completion_content, api_key['model'])
return completion_content, completion_tokens
else:
raise ValueError("The return value is empty.")
except Exception as e:
if i == loop - 1:
print(f"generate_chat_completion Exception: {e}")
return None, None
# Path: autogan/utils/response.py
def colored(x, *args, **kwargs):
def default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,
content: Optional[str], tokens: Optional[int], response: any):
def obj_to_dict(obj):
# Path: autogan/utils/compressed_text_utils.py
import math
import re
from typing import Optional, List
from autogan.oai.config_utils import LLMConfig
from autogan.oai.count_tokens_utils import count_text_tokens
from autogan.utils.environment_utils import environment_info
from autogan.oai.generate_utils import generate_chat_completion
from autogan.utils.response import ResponseFuncType
待压缩的文本。
:param safe_size: The target size of the text after compression.
文本压缩后的目标尺寸。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
compressed_text = ""
total_tokens = 0
split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model)
# Calculate the approximate size of the text slices proportionally
split_safe_size = int(safe_size / len(split_texts))
for st in split_texts:
content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode,
split_safe_size)
if content:
compressed_text += content + "\n"
total_tokens += tokens
if compressed_text:
return compressed_text, total_tokens
else:
return None, None
def generate_text_summary(text: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType,
stream_mode: Optional[bool] = None, safe_size: Optional[int] = None) \
-> tuple[str, int]:
"""Generate a general summary of the text
生成文本普通摘要
:param text: Text to be compressed.
待压缩的文本。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param safe_size: The target size of the text after compression, if not provided there is no limit.
文本压缩后的目标尺寸,如果为空则不做限制。
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
if safe_size:
system_prompt = """I hope you are an article filter and refiner, filtering and refining the articles sent by users. Please ensure that your summary does not exceed the limit of max_tokens.
When the content of the article is not enough to refine, please omit other polite language and only output one word: None.
If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article.
Please note that the description perspective and chapter structure of the extracted content should be as consistent as possible with the original text, and try to retain details for subsequent reasoning. Please omit other polite language and only output the refined content."""
chat_prompt = f"max_tokens: {safe_size}\n\nArticle content:\n{text}"
# system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章,请确保您的总结不超过 max_tokens 的限制.
# 当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。
# 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容.
# 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理,请省略其他客套用语,仅输出提炼好的内容。"""
# chat_prompt = f"max_tokens: {safe_size}\n\n文章内容:\n\n{text}"
else:
system_prompt = """I hope you can serve as an article filter and refiner, filtering and refining the articles sent by users. If the content of the article is insufficient for refinement, please omit other polite phrases and output only one word: None.
If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article.
Please note that the perspective and chapter structure of the extracted content should be as consistent with the original as possible, and retain as many details as possible for subsequent reasoning. Please omit other polite phrases and only output the refined content."""
chat_prompt = f"Article content:\n{text}"
# system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章。当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。
# 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容。
# 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理。请省略其他客套用语,仅输出提炼好的内容。"""
# chat_prompt = f"文章内容:\n{text}"
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': chat_prompt}]
return generate_chat_completion(summary_model_config, chat_messages, agent_name, "text_summary", response_func,
stream_mode)
def generate_text_clues(text: str, focus: str, summary_model_config: LLMConfig, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]:
"""Generate a clue summary of the text
生成文本线索摘要
:param text: Text to be compressed.
待压缩的文本。
:param focus: The focus direction when compressing text.
压缩文本时的专注方向。
:param summary_model_config: LLM configuration used for text compression.
用于压缩文本的 LLM 配置。
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--compressed_text: The text after compression.
压缩后的文本。
--total_tokens: Total tokens after compression.
压缩后的整体tokens。
"""
info = environment_info()
system_prompt = """I hope you are an agent who is good at discovering the truth in real-time, capable of finding content that helps infer the answer to the question from the information sent by users.
Please note that if the content of the information has no extractable value, please omit other polite expressions and output only one word: None. Also, please help me filter out sensitive content related to politics, geopolitics, violence, and sex in the information."""
# system_prompt = """我希望你是一个善于发现实时真相的探员, 能从用户发送的资料中帮我找到有助于推断出问题答案的内容。
# 需要注意的是,如果资料内容没有可提取的价值,请省略其他客套用语,仅输出一个单词:None。另外还请帮我过滤掉资料中与政治、地缘政治、暴力、性等有关的敏感内容。"""
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user',
| 'content': f'The current question is:{focus}\n\nEnvironmental information:\n{info}\n\nMaterial content:\n\n{text}'}] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation
# Path: utils/metric_util.py
class AverageMeter():
""" Computes and stores the average and current value """
def __init__(self):
self.reset()
def reset(self):
""" Reset all statistics """
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
""" Update statistics """
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# Path: utils/tensor_op.py
def save_img_tensor(restored,result_dir,ippath):
'''
:param restored: (1,C,H,W)
:param result_dir:
:param ippath:
:return:
'''
restored = torch.clamp(restored, 0, 1).cpu().detach().permute(0, 2, 3, 1).squeeze(0).numpy()
util.save_img(img_as_ubyte(restored),util.Generate_rp(result_dir,ippath))
# Path: utils/tensor_op.py
def save_image_tensor(image_tensor, output_path="output/"):
image_np = torch_to_np(image_tensor)
p = np_to_pil(image_np)
p.save(output_path)
# Path: utils/util.py
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
# Path: utils/util.py
def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):
'''
util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,
screen=True, tofile=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
'''
lg = logging.getLogger(logger_name)
fmt = '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s'
color_fmt = colored('%(asctime)s.%(msecs)03d','green') + '- %(levelname)s: %(message)s'
formatter = logging.Formatter(fmt=color_fmt,
datefmt='%y-%m-%d %H:%M:%S')
lg.setLevel(level)
lg.propagate = False
if tofile:
log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
lg.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
lg.addHandler(sh)
# Path: utils/data_util.py
def crop_HWC_img(image, base=64):
"""
裁切到multiple of base的size上
:param image: H,W,C
:param base: (int)
:return:
"""
h = image.shape[0]
w = image.shape[1]
crop_h = h % base
crop_w = w % base
return image[crop_h // 2:h - crop_h + crop_h // 2, crop_w // 2:w - crop_w + crop_w // 2, :]
# Path: utils/data_util.py
def random_augmentation(*args):
out = []
flag_aug = random.randint(0,7)
for data in args:
out.append(data_augmentation(data, flag_aug).copy())
return out
# Path: utils/data_util.py
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
# Path: metrics/psnr_ssim.py
def compute_psnr_ssim(recoverd, clean):
"""
model.output输入
"""
assert recoverd.shape == clean.shape
recoverd = np.clip(recoverd.detach().cpu().numpy(), 0, 1)
clean = np.clip(clean.detach().cpu().numpy(), 0, 1)
recoverd = recoverd.transpose(0, 2, 3, 1)
clean = clean.transpose(0, 2, 3, 1)
psnr = 0
ssim = 0
for i in range(recoverd.shape[0]):
# psnr_val += compare_psnr(clean[i], recoverd[i])
# ssim += compare_ssim(clean[i], recoverd[i], multichannel=True)
psnr += peak_signal_noise_ratio(clean[i], recoverd[i], data_range=1)
ssim += structural_similarity(clean[i], recoverd[i], data_range=1, multichannel=True)
return psnr / recoverd.shape[0], ssim / recoverd.shape[0], recoverd.shape[0]
# Path: metrics/psnr_ssim.py
def calculate_psnr(img1, img2, crop_border=0, test_y_channel=False):
"""img1 and img2 have range [0, 255] np.uint8
tensor2img后输入
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
# Path: metrics/psnr_ssim.py
def calculate_ssim(img1, img2):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
# Path: models/archs/IDR_restormer_arch.py
class IDR_restormer(nn.Module):
def __init__(self,
inp_channels=3,
out_channels=3,
dim=48,
num_blocks=[4, 6, 6, 8],
num_refinement_blocks=4,
heads=[1, 2, 4, 8],
ffn_expansion_factor=2.66,
bias=False,
LayerNorm_type='WithBias', ## Other option 'BiasFree'
num_degra_queries = 24,
keep_degra = 48,
degra_type = 5,
sam = True,
ops_type = 5,
pred = True
):
super(IDR_restormer, self).__init__()
self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}
self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)
self.encoder_level1 = nn.Sequential(*[
MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,
LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
self.down1_2 = Downsample(dim) ## From Level 1 to Level 2
self.encoder_level2 = nn.Sequential(*[
MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,
bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])
self.down2_3 = Downsample(int(dim * 2 ** 1)) ## From Level 2 to Level 3
self.encoder_level3 = nn.Sequential(*[
MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,
bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])
self.down3_4 = Downsample(int(dim * 2 ** 2)) ## From Level 3 to Level 4
self.latent = nn.Sequential(*[
MDTA_TransformerBlock(dim=int(dim * 2 ** 3), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor,
bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[3])])
self.up4_3 = Upsample(int(dim * 2 ** 3)) ## From Level 4 to Level 3
self.reduce_chan_level3 = nn.Conv2d(int(dim * 2 ** 3), int(dim * 2 ** 2), kernel_size=1, bias=bias)
self.decoder_level3 = nn.Sequential(*[
MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,
bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])
self.up3_2 = Upsample(int(dim * 2 ** 2)) ## From Level 3 to Level 2
self.reduce_chan_level2 = nn.Conv2d(int(dim * 2 ** 2), int(dim * 2 ** 1), kernel_size=1, bias=bias)
self.decoder_level2 = nn.Sequential(*[
MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,
bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])
self.up2_1 = Upsample(int(dim * 2 ** 1)) ## From Level 2 to Level 1 (NO 1x1 conv to reduce channels)
self.decoder_level1 = nn.Sequential(*[
MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,
bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
self.refinement = nn.Sequential(*[
MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,
bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_refinement_blocks)])
self.output = nn.Conv2d(int(dim * 2 ** 1), out_channels, kernel_size=3, stride=1, padding=1, bias=bias)
self.degra_key = nn.Parameter(torch.randn(degra_type, num_degra_queries, int(dim * 2 ** 3)), requires_grad=True)
self.dmixer = PI_MLP_Mixer(dim=int(dim * 2 ** 3),num_degra=num_degra_queries*degra_type,keep_degra=keep_degra,init='pca')
self.kdp_level1 = Key_TransformerBlock(dim=dim, dimkey=int(dim * 2 ** 3), num_heads=heads[0], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)
self.kdp_level2 = Key_TransformerBlock(dim=int(dim * 2 ** 1), dimkey=int(dim * 2 ** 3), num_heads=heads[1], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)
self.kdp_level3 = Key_TransformerBlock(dim=int(dim * 2 ** 2), dimkey=int(dim * 2 ** 3), num_heads=heads[2], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)
self.cri_pix = nn.L1Loss().cuda()
def forward(self, inp_img, degra_type=None, gt=None, epoch=None):
"""
only input_image is required during inference
"""
flag=0
batch_size,c,h,w = inp_img.shape
if epoch and epoch <= 550:
# stage 1 training - Task-oriented knowledge collection
de_type = degra_type[0]
degra_id = self.de_dict[de_type]
degra_key = self.degra_key[degra_id,:,:].unsqueeze(0).expand(batch_size,-1,-1)
else:
# stage 2 training - Ingredients-oriented knowedge intergation
if flag==0:
U,S,V = process_USV(self.degra_key.detach())
flag=1
U,V = self.dmixer(U,V,batch_size)
degra_key = [U,S,V]
de_type = None
inp_enc_level1 = self.patch_embed(inp_img)
out_enc_level1 = self.encoder_level1(inp_enc_level1)
torch_resize1 = Resize([out_enc_level1.shape[2],out_enc_level1.shape[3]])
inp_img1 = torch_resize1(inp_img)
out_enc_level1,output_img1,pred1 = self.kdp_level1(out_enc_level1,degra_key,inp_img1,degra_type=de_type)
inp_enc_level2 = self.down1_2(out_enc_level1)
out_enc_level2 = self.encoder_level2(inp_enc_level2)
torch_resize2 = Resize([out_enc_level2.shape[2],out_enc_level2.shape[3]])
inp_img2 = torch_resize2(inp_img)
out_enc_level2,output_img2,pred2 = self.kdp_level2(out_enc_level2,degra_key,inp_img2,degra_type=de_type)
inp_enc_level3 = self.down2_3(out_enc_level2)
out_enc_level3 = self.encoder_level3(inp_enc_level3)
torch_resize3 = Resize([out_enc_level3.shape[2],out_enc_level3.shape[3]])
inp_img3 = torch_resize3(inp_img)
out_enc_level3,output_img3,pred3 = self.kdp_level3(out_enc_level3,degra_key,inp_img3,degra_type=de_type)
inp_enc_level4 = self.down3_4(out_enc_level3)
latent = self.latent(inp_enc_level4)
inp_dec_level3 = self.up4_3(latent)
inp_dec_level3 = torch.cat([inp_dec_level3, out_enc_level3], 1)
inp_dec_level3 = self.reduce_chan_level3(inp_dec_level3)
out_dec_level3 = self.decoder_level3(inp_dec_level3)
inp_dec_level2 = self.up3_2(out_dec_level3)
inp_dec_level2 = torch.cat([inp_dec_level2, out_enc_level2], 1)
inp_dec_level2 = self.reduce_chan_level2(inp_dec_level2)
out_dec_level2 = self.decoder_level2(inp_dec_level2)
inp_dec_level1 = self.up2_1(out_dec_level2)
inp_dec_level1 = torch.cat([inp_dec_level1, out_enc_level1], 1)
out_dec_level1 = self.decoder_level1(inp_dec_level1)
out_dec_level1 = self.refinement(out_dec_level1)
out_dec_level1 = self.output(out_dec_level1) + inp_img
if gt is not None:
gt_img1 = torch_resize1(gt)
gt_img2 = torch_resize2(gt)
gt_img3 = torch_resize3(gt)
output_img = [output_img1,output_img2,output_img3]
gt_img = [gt_img1,gt_img2,gt_img3]
loss = np.sum([self.cri_pix(output_img[j],gt_img[j]) for j in range(len(output_img))])
return [out_dec_level1,loss,pred1,pred2,pred3]
else:
return out_dec_level1
# Path: inference.py
import argparse
import subprocess
import numpy as np
import os
import torch
import torch.nn as nn
import logging
from tqdm import tqdm
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor
from utils.metric_util import AverageMeter
from utils.tensor_op import save_img_tensor, save_image_tensor
from utils.util import mkdir, setup_logger
from utils.data_util import crop_HWC_img, random_augmentation, tensor2img
from metrics.psnr_ssim import compute_psnr_ssim, calculate_psnr, calculate_ssim
from models.archs.IDR_restormer_arch import IDR_restormer
class DenoiseTestDataset(Dataset):
def __init__(self, args, dataset="CBSD68"):
super(DenoiseTestDataset, self).__init__()
self.args = args
self.clean_ids = []
self.sigma = 15
self.dataset_dict = {'CBSD68': 0, 'urban100': 1, 'Kodak24':2}
self.set_dataset(dataset)
self.toTensor = ToTensor()
def _init_clean_ids(self):
if self.task_idx == 0:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_CBSD68_path)
self.clean_ids += [self.args.denoise_CBSD68_path + id_ for id_ in name_list]
elif self.task_idx == 1:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_urban100_path)
self.clean_ids += [self.args.denoise_urban100_path + id_ for id_ in name_list]
elif self.task_idx == 2:
self.clean_ids = []
name_list = os.listdir(self.args.denoise_Kodak24_path)
self.clean_ids += [self.args.denoise_Kodak24_path + id_ for id_ in name_list]
self.num_clean = len(self.clean_ids)
def set_dataset(self, dataset):
self.task_idx = self.dataset_dict[dataset]
self._init_clean_ids()
def _add_gaussian_noise(self, clean_patch):
noise = np.random.randn(*clean_patch.shape)
noisy_patch = np.clip(clean_patch + noise * self.sigma, 0, 255).astype(np.uint8)
return noisy_patch, clean_patch
def _edgeComputation(self,x):
x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:])
x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:])
y = np.zeros_like(x)
y[:,1:,:] += x_diffx
y[:,:-1,:] += x_diffx
y[1:,:,:] += x_diffy
y[:-1,:,:] += x_diffy
y = np.sum(y,2)/3
y /= 4
| return y[:,:,None].astype(np.float32) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: TACJu/Compositor
# Path: Compositor_Mask2Former/mask2former/modeling/transformer_decoder/position_encoding.py
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask=None):
if mask is None:
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def __repr__(self, _repr_indent=4):
head = "Positional encoding " + self.__class__.__name__
body = [
"num_pos_feats: {}".format(self.num_pos_feats),
"temperature: {}".format(self.temperature),
"normalize: {}".format(self.normalize),
"scale: {}".format(self.scale),
]
# _repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
# Path: Compositor_Mask2Former/mask2former/modeling/transformer_decoder/transformer.py
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# Path: Compositor_Mask2Former/mask2former/modeling/transformer_decoder/transformer.py
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
# Path: Compositor_Mask2Former/mask2former/modeling/pixel_decoder/ops/modules/ms_deform_attn.py
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 128
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
try:
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
except:
# CPU
output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
# # For FLOPs calculation only
# output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
# Path: Compositor_Mask2Former/mask2former/modeling/pixel_decoder/msdeformattn.py
import logging
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from ..transformer_decoder.transformer import _get_clones, _get_activation_fn
from .ops.modules import MSDeformAttn
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, pos_embeds):
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs]
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
return memory, spatial_shapes, level_start_index
class MSDeformAttnTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class MSDeformAttnTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
return output
@SEM_SEG_HEADS_REGISTRY.register()
class MSDeformAttnPixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
conv_dim: int,
mask_dim: int,
| norm: Optional[Union[str, Callable]] = None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: happyapplehorse/ai-care
# Path: src/ai_care/abilities.py
class Ability:
def __init__(self, ai_care: AICare):
self.ai_care = ai_care
self.abilities: dict = {}
self._register_abilities()
def _register_abilities(self) -> None:
for name, method in inspect.getmembers(self, predicate=inspect.ismethod):
if getattr(method, '_ability_', False):
self.abilities[name] = method
@_ability(
description="Remain silent.",
)
def stay_silent(self) -> None:
return
@_ability(
description="Speak to the user right now.",
)
@_ability_parameter(
name="content",
description="The content you want to say.",
)
def speak_now(self, content: str | Generator[str, None, None]) -> None:
self.ai_care.to_user_method(content)
@_ability(
description=(
"Set a message to be delivered to the user after a certain period of time. "
"This choice means you decide to observe for a duration, and if the user does "
"not speak to you during this time, you will then convey the message content you have set. "
"However, if the user speaks to you within this time frame, the operation will automatically "
"be cancelled. This option is recommended."
)
)
@_ability_parameter(
name="delay",
description="Set how long until your message is sent. Unit in seconds.",
)
@_ability_parameter(
name="message",
description="Set what you want to say to the user after a certain period of time.",
)
def speak_after(self, delay: float | int, message: str) -> None:
if self.ai_care._stream_mode is True:
message_wrap = (string for string in [message])
else:
message_wrap = message
self.ai_care.set_timer(interval=delay, function=self.ai_care.to_user_method, args=(message_wrap,))
@_ability(
description=(
"Detect environmental conditions. This choice means that you decide to first obtain the results "
"from the sensors, observe the environmental situation, and then decide what to choose based on "
"this information. You can only choose which sensors to use from the list of available sensors."
),
)
@_ability_parameter(
name="delay",
description="Set how long to wait before using sensors to obtain readings. Unit in seconds.",
)
@_ability_parameter(
name="sensors",
description="The list of names of the sensors to be used.",
param_type="list[str]",
)
@_auto_depth(depth_param_name="_depth_left")
def detect_env(self, delay: float | int, sensors: list[str], _depth_left: int) -> None:
def detect_env_callback(sensors_list: list[str]):
sensor_data = {}
for sensor in sensors_list:
data = self.ai_care.get_sensor_data(sensor)
sensor_data[sensor] = data
self.ai_care.ask(
messages_list=[
{
"role": "ai_care",
"content": f"The results of the sensor are as follows: {str(sensor_data)}.",
},
],
depth_left = _depth_left - 1,
)
not_existed_sensors_set = set(sensors) - set(self.ai_care.sensors)
if not_existed_sensors_set:
self.ai_care.ask(
messages_list=[
{
"role": "ai_care",
"content": f"There are no {str(not_existed_sensors_set)} sensor. Please use the correct sensor name.",
}
],
depth_left = _depth_left - 1,
)
return
self.ai_care.set_timer(
interval=delay,
function=detect_env_callback,
args=(sensors,),
)
@_ability(
description=(
"Release detectors. This option means you forego making an active choice and instead release "
"some detectors, which will automatically report back to you and ask for your decision when "
"specific conditions are met. You can only choose which detectors to use from the list of "
"available detectors."
),
)
@_ability_parameter(
name="delay",
description="Set the time in seconds before releasing the detectors.",
)
@_ability_parameter(
name="detectors",
description="The list of names of the detectors to be released.",
)
@_auto_depth(depth_param_name="_depth_left")
def release_detector(self, delay: int | float, detectors: list[str], _depth_left: int) -> None:
def release_detector_callback(detectors_list: list[str]):
self.ai_care.release_detector(detectors_list)
not_existed_detectors_set = set(detectors) - set(self.ai_care.detectors)
if not_existed_detectors_set:
self.ai_care.ask(
messages_list=[
{
"role": "ai_care",
"content": f"There are no {str(not_existed_detectors_set)} detector. Please use the correct detector name.",
}
],
depth_left = _depth_left - 1,
)
return
self.ai_care.set_timer(interval=delay, function=release_detector_callback, args=(detectors,))
@_ability(
description=(
"Ask again after a while. This choice means that you do not want to make a decision right now, "
"and you would like Aicarey to ask you again later."
),
)
@_ability_parameter(
name="delay",
description="Set how long to wait before asking you again, in seconds.",
)
@_auto_depth(depth_param_name="_depth_left")
def ask_later(self, delay: int | float, _depth_left: int) -> None:
if self.ai_care._ask_later_count_left <= 0:
return
self.ai_care._ask_later_count_left -= 1
self.ai_care.set_timer(
interval=delay,
function=self.ai_care.ask,
kwargs={
"messages_list": [
{
"role": "ai_care",
"content": (
f"{delay} senconds ago, you asked me to inquire later, "
f"and now {delay} seconds have passed. Please make a choice."
)
}
],
"depth_left": _depth_left - 1,
},
)
@_ability(
description=(
"Cycle release of detectors. This option means that you forgo making an active choice "
"and instead continuously release some detectors, which will automatically report back "
"to you and ask for your decision when specific conditions are met. The detectors you "
"select will be released periodically at set time intervals until the next conversation "
"is initiated. All chosen detectors will be released in each cycle."
),
)
@_ability_parameter(
name="interval",
description="Set the time interval between each release of the detectors. Unit in seconds",
)
@_ability_parameter(
name="detectors",
description="The list of names of the detectors to be released.",
)
@_auto_depth(depth_param_name="_depth_left")
def cyclic_detection(self, interval: int | float, detectors: list[str], _depth_left) -> None:
not_existed_detectors_set = set(detectors) - set(self.ai_care.detectors)
if not_existed_detectors_set:
self.ai_care.ask(
messages_list=[
{
"role": "ai_care",
"content": f"There are no {str(not_existed_detectors_set)} detector. Please use the correct detector name.",
}
],
depth_left = _depth_left - 1,
)
return
def repeat_interval(interval: float):
while True:
yield interval
self.ai_care.set_cyclic_detection(
detectors=detectors,
interval_gen=repeat_interval(float(interval)),
cancel_after_trigger_ask=True
)
# Path: src/ai_care/choice_execute.py
def choice_execute(ai_care: AICare, choice_code: str, content: str | Generator[str, None, None], depth_left: int) -> None:
try:
choice = Choice(choice_code)
except ValueError as e:
logger.warning(f"Invalid choice {choice_code}.")
ai_care.ask(
messages_list=[
{
"role": "assistant",
"content": f"AA00{choice_code}{choice_code}:{content}",
},
{
"role": "ai_care",
"content": f"Your choice code {choice_code} is not correct. Please make a correct choice again.",
},
],
depth_left = depth_left - 1,
)
return
if choice == Choice.ERROR:
ai_care.ask(messages_list=[], depth_left = depth_left - 1)
return
logger.info(f"Choice: {choice.name}")
if isinstance(content, str):
ai_care._ask_context.append(
{
"role": "assistant",
"content": f"AA00{choice_code}{choice_code}:{content}",
}
)
elif isinstance(content, Generator):
# This case has been handled in parse_response.
pass
else:
assert False
ability_method = ai_care.ability.abilities[choice.name.lower()]
if choice == Choice.STAY_SILENT:
ability_method()
return
if choice == Choice.SPEAK_NOW:
params = {"content": content}
else:
assert isinstance(content, str)
try:
params = json.loads(content)
except json.JSONDecodeError as e:
logger.warning(f"Failed to correctly parse the parameter. Parameter json string: {content}. Error: {e}.")
ai_care.ask(
messages_list=[
{
"role": "ai_care",
"content": (
"Failed to correctly parse the parameter. "
"Please send the correct parameters in JSON format, "
"or make a choice again."
),
},
],
depth_left = depth_left - 1,
)
return
if not isinstance(params, dict):
ai_care.ask(
messages_list=[
{
"role": "ai_care",
"content": (
"The parameters should be a dictionary in JSON format."
"Please send the correct parameters in JSON format, or make a choice again."
),
},
],
depth_left = depth_left - 1,
)
return
ability_params = {}
for param in ability_method._ability_parameters_:
default_value = param["default_value"]
if param["required"] is False:
ability_params[param["name"]] = default_value
ability_params.update(params)
if getattr(ability_method, "_auto_depth_", False):
ability_params[ability_method._depth_param_name_] = depth_left
needed_params_set = {param["name"] for param in ability_method._ability_parameters_}
missing_params_set = needed_params_set - set(ability_params.keys())
if missing_params_set:
ai_care.ask(
messages_list=[
{
"role": "ai_care",
"content": (
f"You did not provide the following parameters: {str(missing_params_set)} ."
"Please send the correct parameters in JSON format, or make a choice again."
),
},
],
depth_left = depth_left - 1
)
return
logger.info(f"Choice parameters: {str(ability_params)}")
ability_method(**ability_params)
# Path: src/ai_care/parse_response.py
def parse_response(
ai_care: AICare,
response: str | Generator[str, None, None],
) -> tuple[str, str | Generator[str, None, None]]:
choice_code = ""
content = ""
if isinstance(response, str):
ai_care._stream_mode = False
prefix, content = _extract_info(response)
if not all(prefix):
return '00', ''
choice_code = prefix[2]
check_valid = prefix[3]
if choice_code == check_valid:
ai_care._valid_msg_count += 1
else:
ai_care._invalid_msg_count += 1
assert isinstance(choice_code, str)
return choice_code, content or ""
elif isinstance(response, Generator):
ai_care._stream_mode = True
buffer = ""
first_item = ""
chunk_list = []
found_choice = False
prefix = (None, None, None, None)
choice = Choice.ERROR
for chunk in response:
buffer += chunk
if not found_choice:
prefix, content = _extract_info(buffer)
if all(prefix) and len(buffer) >= 9 and not found_choice:
choice_code = prefix[2]
check_valid = prefix[3]
if choice_code == check_valid:
ai_care._valid_msg_count += 1
else:
ai_care._invalid_msg_count += 1
try:
choice = Choice(choice_code)
except ValueError as e:
logger.warning(f"Invalid choice {choice_code}. Error: {e}")
assert isinstance(choice_code, str)
return choice_code, ''
found_choice = True
if choice == Choice.SPEAK_NOW:
first_item = buffer[9:]
break
chunk_list.append(buffer[9:])
continue
if found_choice:
chunk_list.append(chunk)
if found_choice is False:
return '00', ''
prefix = cast(tuple[str, str, str, str], prefix)
if choice == Choice.SPEAK_NOW:
def response_content_gen():
gen_content_record = []
yield first_item
gen_content_record.append(first_item)
for item in response:
yield item
gen_content_record.append(item)
ai_care._ask_context.append(
{
"role": "ai_care",
"content": ''.join(gen_content_record),
}
)
return choice.value, response_content_gen()
else:
return choice.value, ''.join(chunk_list)
else:
assert False, "The response must be str or a generator."
# Path: src/ai_care/render_prompt.py
def render_basic_prompt(
ai_care: AICare,
inactive_abilities_list: list[Choice] | None = None,
inactive_sensors_list: list[str] | None = None,
inactive_detectors_list: list[str] | None = None,
) -> str:
inactive_abilities_set = set() if inactive_abilities_list is None else set(inactive_abilities_list)
inactive_sensors_set = set() if inactive_sensors_list is None else set(inactive_sensors_list)
inactive_detectors_set = set() if inactive_detectors_list is None else set(inactive_detectors_list)
if ai_care._ask_later_count_left <= 0:
inactive_abilities_set.add(Choice.ASK_LATER)
abilities_dict = ai_care.ability.abilities
intervals_info = (
f"""The intervals of the last {len(ai_care._chat_intervals)} times the user conversed with you are recorded in the following list (unit in seconds):
{str(ai_care._chat_intervals)}
""" if ai_care._chat_intervals else ""
) + (
f"""It has been {time.monotonic() - ai_care._last_chat_time} seconds since the last time the user spoke with you."""
if ai_care._last_chat_time is not None else ""
)
sorted_abilities = sorted(abilities_dict.values(), key=lambda x: Choice[x.__name__.upper()].value)
abilities_info = ''.join(_render_ability_description(ability_method).lstrip()
for ability_method in sorted_abilities
if ability_method not in inactive_abilities_set
).replace('\n', '\n ')
prompt = textwrap.dedent(
f"""
I am a program, and my name is Aicarey.
You can see your conversation history with the user in the previous messages.
This message is sent by me. Please continue to focus on the user and do not attempt to converse with me.
You should seriously judge which choice you should make based on the content of your conversation with the user.
For example, if you are in a question-and-answer mode with the user,
then you may not need to speak when the user doesn't ask a question.
However, if you are chatting with the user like a friend,
then you may need to proactively continue the conversation like a friend when the user doesn't speak.
When replying to this message, you must follow the rules below:
========================RESPONSE RULES=======================
1. Start with eight characters followed by an English colon.
The first two characters of these eight must be 'AA', the third and fourth must be '00',
the fifth and sixth are the code of your choice.
The seventh and eighth characters should repeat the fifth and sixth characters.
2. After the colon is the content corresponding to the chosen option.
If it involves a function call, this part of the content must be in the format of a JSON string.
=============================================================
Here are the choices you can make:
===========================CHOICES===========================
{abilities_info}
=============================================================
You must choose one of the options provided above as your reply.
Response Examples:
If you want to remain silent: AA000101:
If you want to say to the user: AA000202: [Your content here]
If you decide to ask the user what they are doing if they haven't spoken to you in a minute: AA000303: {{"delay":60, "message":"What are you doing?"}}
===========================SENSORS===========================
Sensors list:
{
str(
[
{"sensor_name": sensor["name"], "sensor_description": sensor["annotation"]}
for sensor in ai_care.sensors.values()
if sensor["name"] not in inactive_sensors_set
]
)
}
=============================================================
==========================DETECTORS==========================
Detectors list:
{
str(
[
{"detector_name": detector.name, "detector_description": detector.annotation}
for detector in ai_care.detectors.values()
if detector.name not in inactive_detectors_set
]
)
}
=============================================================
============================FACTS============================
{intervals_info}
{ai_care.guide}
=============================================================
"""
)
return prompt
# Path: src/ai_care/ai_care.py
import itertools
import logging
import time
import threading
from abc import ABCMeta, abstractmethod
from typing import Callable, Any, Generator, TypedDict, Literal, cast
from .abilities import Ability
from .choice_execute import choice_execute
from .parse_response import parse_response
from .render_prompt import render_basic_prompt
from __future__ import annotations
logger = logging.getLogger("ai_care")
ChatContext = Any
ConfigKey = Literal["delay", "ask_later_count_limit", "ask_depth", "n_chat_intervals"]
class AICare:
def __init__(self) -> None:
self.timers: dict[int, AICareTimer] = {}
self.detectors: dict[str, Detector] = {}
self.sensors: dict[str, dict] = {}
self.ability: Ability = Ability(self)
self.chat_context: Any = None
| self._to_llm_method: ( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: neu-spiral/multi-label-emg
# Path: multi_label_emg/data.py
def load_data_dict():
"""
Loads features and labels from subject folders into a single dictionary as described below.
NOTE - preprocessing should be been done first to extract features from raw data (see README).
data_dict = {
Subj0: {
Calibration_features: ...,
Calibration_dir_labels: ...,
Calibration_mod_labels: ...,
Calibration_visual_dir_labels: ...,
Calibration_visual_mod_labels: ...,
SimultaneousPulse1_NoFeedback_features: ...,
...
},
...
}
"""
blocks = ["Calibration"]
for i in [1, 2, 3]:
for feedback in ["NoFeedBack", "WithFeedBack"]:
blocks.append(f"SimultaneousPulse{i}_{feedback}")
blocks.append(f"HoldPulse{i}_{feedback}")
results = {}
for i in trange(11, desc="Load Subjects", leave=True):
results[f"Subj{i}"] = {}
for block in tqdm(blocks, leave=False, position=1):
path = DATASET_DIR / "python" / f"Subj{i}" / block
# NOTE - features.npy is created during preprocessing script
results[f"Subj{i}"][f"{block}_features"] = np.load(path / "features.npy")
results[f"Subj{i}"][f"{block}_dir_labels"] = np.load(path / "joystick_direction_labels.npy")
results[f"Subj{i}"][f"{block}_mod_labels"] = np.load(path / "joystick_modifier_labels.npy")
results[f"Subj{i}"][f"{block}_visual_dir_labels"] = np.load(path / "visual_direction_labels.npy")
results[f"Subj{i}"][f"{block}_visual_mod_labels"] = np.load(path / "visual_modifier_labels.npy")
return results
# Path: multi_label_emg/models.py
class AvgPairs:
"""Create fake doubles by averaging pairs of singles. New items have hard labels including both classes"""
def __init__(self, n_per_class: int):
self.n_per_class = n_per_class
def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):
"""
Args:
x_single: (n_samples_in, n_features) - data/features from single gestures
y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures
y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures
Returns:
x_prime: (n_samples_aug, n_features) - augmented data
y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels
y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels
"""
x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)
x_aug, y_dir_aug, y_mod_aug = [], [], []
for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):
x_aug.append((x1 + x2) / 2)
y_dir_aug.append(y1)
y_mod_aug.append(y2)
x_aug = np.stack(x_aug)
y_dir_aug = np.stack(y_dir_aug)
y_mod_aug = np.stack(y_mod_aug)
if self.n_per_class > 0:
# For each combination class, truncate to self.n_per_class
res_x, res_y_dir, res_y_mod = [], [], []
for d in np.unique(y_dir_aug, axis=0):
for m in np.unique(y_mod_aug, axis=0):
idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]
perm = np.random.permutation(len(idx))
res_x.append(x_aug[idx[perm[: self.n_per_class]]])
res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])
res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])
x_aug = np.concatenate(res_x)
y_dir_aug = np.concatenate(res_y_dir)
y_mod_aug = np.concatenate(res_y_mod)
return x_aug, y_dir_aug, y_mod_aug
def __repr__(self):
return f"{type(self).__name__}(n_per_class={self.n_per_class})"
# Path: multi_label_emg/models.py
class ElementwiseMaxPairs:
"""Create fake doubles by taking elementwise max of each feature.
New items have hard labels including both classes"""
def __init__(self, n_per_class: int):
self.n_per_class = n_per_class
def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):
"""
Args:
x_single: (n_samples_in, n_features) - data/features from single gestures
y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures
y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures
Returns:
x_prime: (n_samples_aug, n_features) - augmented data
y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels
y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels
"""
x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)
x_aug, y_dir_aug, y_mod_aug = [], [], []
for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):
x_aug.append(np.maximum(x1, x2))
y_dir_aug.append(y1)
y_mod_aug.append(y2)
x_aug = np.stack(x_aug)
y_dir_aug = np.stack(y_dir_aug)
y_mod_aug = np.stack(y_mod_aug)
if self.n_per_class > 0:
# For each combination class, truncate to self.n_per_class
res_x, res_y_dir, res_y_mod = [], [], []
for d in np.unique(y_dir_aug, axis=0):
for m in np.unique(y_mod_aug, axis=0):
idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]
perm = np.random.permutation(len(idx))
res_x.append(x_aug[idx[perm[: self.n_per_class]]])
res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])
res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])
x_aug = np.concatenate(res_x)
y_dir_aug = np.concatenate(res_y_dir)
y_mod_aug = np.concatenate(res_y_mod)
return x_aug, y_dir_aug, y_mod_aug
def __repr__(self):
return f"{type(self).__name__}(n_per_class={self.n_per_class})"
# Path: multi_label_emg/models.py
class ParallelA(BaseParallelModel):
DEFAULT_SAVE_NAME = "ParallelA.pkl"
def __init__(
self,
dir_clf,
mod_clf,
use_augmentation: bool,
n_aug_per_class: int = -1,
include_rest_data_for_clf: bool = False,
):
self.dir_clf = dir_clf
self.mod_clf = mod_clf
self.use_augmentation = use_augmentation
self.n_aug_per_class = n_aug_per_class
self._n_aug_created = None
self.include_rest_data_for_clf = include_rest_data_for_clf
def get_params(self, deep=True):
return {
"dir_clf": self.dir_clf,
"mod_clf": self.mod_clf,
"use_augmentation": self.use_augmentation,
"n_aug_per_class": self.n_aug_per_class,
"include_rest_data_for_clf": self.include_rest_data_for_clf,
}
def fit(self, features, y_dir, y_mod):
if self.use_augmentation:
aug = AvgPairs(self.n_aug_per_class)
aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)
features = np.concatenate([features, aug_features])
y_dir = np.concatenate([y_dir, aug_dir_labels])
y_mod = np.concatenate([y_mod, aug_mod_labels])
self._n_aug_created = len(aug_features)
if y_dir.ndim == 2:
y_dir = y_dir.argmax(-1)
if y_mod.ndim == 2:
y_mod = y_mod.argmax(-1)
if self.include_rest_data_for_clf:
# In this case, the label (NoDir, NoMod) could mean "active and doesn't fit our classes" or "resting"
self.dir_clf.fit(features, y_dir)
self.mod_clf.fit(features, y_mod)
else:
# In this case, the label (NoDir, NoMod) means "active and doesn't fit classes".
# "Rest" data is out-of-domain
active_idx = np.logical_or(y_dir != NO_DIR_IDX, y_mod != NO_MOD_IDX)
active_features = features[active_idx]
active_y_dir = y_dir[active_idx]
active_y_mod = y_mod[active_idx]
self.dir_clf.fit(active_features, active_y_dir)
self.mod_clf.fit(active_features, active_y_mod)
return self
def predict_proba(self, features):
"""Only for gestures"""
dir_probs = self.dir_clf.predict_proba(features)
mod_probs = self.mod_clf.predict_proba(features)
return dir_probs, mod_probs
def predict(self, features):
"""features.shape == (n_channels, n_samples) or (n_trials, n_channels, n_samples)"""
dir_probs = self.dir_clf.predict_proba(features)
mod_probs = self.mod_clf.predict_proba(features)
return dir_probs.argmax(-1), mod_probs.argmax(-1)
def save(self, save_dir: Path) -> Path:
assert save_dir.exists() and save_dir.is_dir()
file_path = save_dir / self.DEFAULT_SAVE_NAME
with open(file_path, "wb") as f:
pickle.dump(self, f)
return file_path
@classmethod
def load(cls, file_path: Path) -> "ParallelA":
with open(file_path, "rb") as f:
return pickle.load(f)
def __repr__(self):
return (
f"{type(self).__name__}(dir_clf={self.dir_clf}, "
f"use_augmentation={self.use_augmentation}, "
f"n_aug_per_class={self.n_aug_per_class}, "
+ f"mod_clf={self.mod_clf}, "
+ f"include_rest_data_for_clf={self.include_rest_data_for_clf})"
)
# Path: multi_label_emg/models.py
class ParallelB(BaseParallelModel):
DEFAULT_SAVE_NAME = "ParallelB.pkl"
def __init__(
self,
dir_clf,
mod_clf,
has_dir_clf,
has_mod_clf,
use_augmentation: bool,
n_aug_per_class: int = -1,
):
self.has_dir_clf = has_dir_clf
self.has_mod_clf = has_mod_clf
self.dir_clf = dir_clf
self.mod_clf = mod_clf
self.use_augmentation = use_augmentation
self.n_aug_per_class = n_aug_per_class
self._n_aug_created = None
def get_params(self, deep=True):
return {
"dir_clf": self.dir_clf,
"mod_clf": self.mod_clf,
"has_dir_clf": self.dir_clf,
"has_mod_clf": self.mod_clf,
"use_augmentation": self.use_augmentation,
"n_aug_per_class": self.n_aug_per_class,
}
def fit(self, features, y_dir, y_mod):
if self.use_augmentation:
aug = AvgPairs(self.n_aug_per_class)
aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)
features = np.concatenate([features, aug_features])
y_dir = np.concatenate([y_dir, aug_dir_labels])
y_mod = np.concatenate([y_mod, aug_mod_labels])
self._n_aug_created = len(aug_features)
if y_dir.ndim == 2:
y_dir = y_dir.argmax(-1)
if y_mod.ndim == 2:
y_mod = y_mod.argmax(-1)
has_direction = y_dir != NO_DIR_IDX
has_modifier = y_mod != NO_MOD_IDX
# Event check
self.has_dir_clf.fit(features, has_direction.astype(int))
self.has_mod_clf.fit(features, has_modifier.astype(int))
# Direction and modifier
self.dir_clf.fit(features[has_direction], y_dir[has_direction])
self.mod_clf.fit(features[has_modifier], y_mod[has_modifier])
return self
def predict_proba(self, features):
p_has_direction = self.has_dir_clf.predict_proba(features)
p_has_modifier = self.has_mod_clf.predict_proba(features)
p_dir_probs = self.dir_clf.predict_proba(features)
p_mod_probs = self.mod_clf.predict_proba(features)
# Check probs
dir_probs = np.zeros((features.shape[0], 5))
mod_probs = np.zeros((features.shape[0], 3))
dir_probs[:, NO_DIR_IDX] = p_has_direction[:, 0] # p(no_direction | x)
mod_probs[:, NO_MOD_IDX] = p_has_modifier[:, 0] # p(no_modifier | x)
dir_probs[:, :NO_DIR_IDX] = np.multiply(
p_dir_probs, p_has_direction[:, 1][..., None]
) # p(direction | has_direction)
mod_probs[:, :NO_MOD_IDX] = np.multiply(
p_mod_probs, p_has_modifier[:, 1][..., None]
) # p(modifier | has_modifier)
assert np.allclose(dir_probs.sum(-1), 1) and np.allclose(mod_probs.sum(-1), 1), "Probabilities should sum to 1"
# return probs
"""Only for gestures"""
return dir_probs, mod_probs
def predict(self, features):
dir_probs, mod_probs = self.predict_proba(features)
return dir_probs.argmax(-1), mod_probs.argmax(-1)
def save(self, save_dir: Path) -> Path:
assert save_dir.exists() and save_dir.is_dir()
file_path = save_dir / self.DEFAULT_SAVE_NAME
with open(file_path, "wb") as f:
pickle.dump(self, f)
return file_path
@classmethod
def load(cls, file_path: Path) -> "ParallelB":
with open(file_path, "rb") as f:
return pickle.load(f)
def __repr__(self):
return (
f"{type(self).__name__}(has_dir_clf={self.has_dir_clf}, "
f"dir_clf={self.dir_clf}, "
f"use_augmentation={self.use_augmentation}, "
f"n_aug_per_class={self.n_aug_per_class}, "
f"has_mod_clf={self.has_mod_clf}),"
f"mod_clf={self.mod_clf})"
)
# Path: multi_label_emg/utils.py
NO_DIR_IDX = len(DIRECTION_GESTURES) # When predicting direction, we have an extra class representing "None"
# Path: multi_label_emg/utils.py
NO_MOD_IDX = len(MODIFIER_GESTURES)
# Path: multi_label_emg/utils.py
RESULTS_DIR = PROJECT_ROOT.parent / "results" # For experiment outputs and figures
# Path: multi_label_emg/utils.py
def canonical_coords():
"""NOTE - order does not matter: (Up, Pinch) and (Pinch, Up) are both labeled as (Up, Pinch)
Make a list table so we can convert:
from integer labels such as (0, 1),
to an index in confusion matrix and a string label"""
result_int = []
result_str = []
# Add (<DIR>, NoMod) items
for i, d in enumerate(DIRECTION_GESTURES):
result_int.append((i, NO_MOD_IDX))
result_str.append(f"({d}, NoMod)")
# Add (NoDir, <MOD>) items
for i, m in enumerate(MODIFIER_GESTURES):
result_int.append((NO_DIR_IDX, i))
result_str.append(f"(NoDir, {m})")
# Add (<DIR>, <MOD>) items
for i, d in enumerate(DIRECTION_GESTURES):
for j, m in enumerate(MODIFIER_GESTURES):
result_int.append((i, j))
result_str.append(f"({d}, {m})")
# Add the (NoDir, NoMod) item
result_int.append((NO_DIR_IDX, NO_MOD_IDX))
result_str.append("(NoDir, NoMod)")
return result_int, result_str
# Path: multi_label_emg/utils.py
def confusion_matrix(y_true_2d, y_pred_2d, normalize_rows=True):
"""
Number of classes = 4 direction + 2 modifier + 4*2 combinations + (NoDir, NoMod) = 15
Create a confusion matrix of shape (15, 15), arranged according to the canonical
coordinates above
NOTE - result may contain nans - use nanmean later
"""
coords, coords_str = canonical_coords()
cm = np.zeros((len(coords), len(coords)), dtype=int)
for yt, yp in zip(y_true_2d, y_pred_2d):
cm[coords.index(tuple(yt)), coords.index(tuple(yp))] += 1
if normalize_rows:
cm = cm.astype(float)
with np.errstate(all="ignore"): # Ignore division by zero for empty rows
cm /= cm.sum(axis=-1, keepdims=True)
return cm
# Path: multi_label_emg/utils.py
def str2bool(s):
if s.lower() in ("yes", "true", "t", "y", "1"):
return True
elif s.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ValueError("Boolean value expected.")
# Path: multi_label_emg/train.py
import sys
import numpy as np
import plotly.graph_objects as go
import argparse
from loguru import logger
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.mixture import GaussianMixture
from sklearn.neighbors import KernelDensity, KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.svm import SVC
from multi_label_emg.data import load_data_dict
from multi_label_emg.models import AvgPairs, ElementwiseMaxPairs, ParallelA, ParallelB
from multi_label_emg.utils import (
NO_DIR_IDX,
NO_MOD_IDX,
RESULTS_DIR,
canonical_coords,
confusion_matrix,
str2bool,
)
def get_name(
subject: str,
seed: int,
parallel_model_type: str,
clf_name: str,
doubles_method: str,
fraction_doubles_per_class: float,
singles_method: str,
rel_fraction_singles_per_class: float,
include_doubles_in_train: bool,
feature_combine_type: str,
):
return "__".join(
[
f"subj={subject}",
f"seed={seed}",
f"par={parallel_model_type}",
f"clf={clf_name}",
f"doubles={doubles_method}",
f"frac_doubles={fraction_doubles_per_class}",
f"singles={singles_method}",
f"frac_singles={rel_fraction_singles_per_class}",
f"incl_doubles={include_doubles_in_train}",
f"feat_type={feature_combine_type}",
]
)
def plot_confusion_matrix(data: np.ndarray):
def make_text(cm):
text = []
for v in cm.flatten():
| text.append(f"{round(v, 2)}") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ebb-earl-co/tidal-wave
# Path: tidal_wave/hls.py
def playlister(
session: Session, vesrj: Optional[VideosEndpointStreamResponseJSON]
) -> m3u8.M3U8:
"""Attempts to parse a VideosEndpointStreamResponseJSON object into an
m3u8.M3U8 object. Requires fetching HTTP(s) resources, so takes a
requests.Session object as an argument. If error occurs, raises
TidalM3U8Exception"""
em_three_you_ate: Optional[m3u8.M3U8] = None
if vesrj.manifest_mime_type == "application/vnd.tidal.emu":
try:
manifest: Dict[str, str] = json.loads(vesrj.manifest_bytes)
except json.decoder.JSONDecodeError:
raise TidalM3U8Exception(
f"Expected an HLS spec. in JSON format for video {vesrj.video_id}"
)
else:
mt: Optional[str] = manifest.get("mimeType")
url: Optional[str] = manifest.get("urls", [None])[0]
if (
(mt is None)
or (mt != "application/vnd.apple.mpegurl")
or (url is None)
or (".m3u8" not in url)
):
raise TidalM3U8Exception(
f"Manifest for video {vesrj.video_id}, video mode "
f"{vesrj.video_quality} does not make available an "
"M3U8 file"
)
em_three_you_ate: m3u8.M3U8 = m3u8.load(
url, http_client=RequestsClient(session=session)
)
return em_three_you_ate
# Path: tidal_wave/hls.py
def variant_streams(
m3u8: m3u8.M3U8, return_urls: bool = False
) -> Optional[Union[m3u8.Playlist, List[str]]]:
"""By default, return the highest-bandwidth option of m3u8.playlists
as an m3u8.Playlist object. If return_urls, then returns the object's
.files attribute, which is a list of strings. N.b. if m3u8.is_variant
is False, then return None as there are no variant streams."""
if not m3u8.is_variant:
return
playlist: m3u8.Playlist = max(m3u8.playlists, key=lambda p: p.stream_info.bandwidth)
if return_urls:
_m3u8: m3u8.M3U8 = m3u8.load(playlist)
return _m3u8.files
else:
return playlist
# Path: tidal_wave/hls.py
class TidalM3U8Exception(Exception):
pass
# Path: tidal_wave/media.py
TAG_MAPPING: Dict[str, Dict[str, str]] = {
"album": {"flac": "ALBUM", "m4a": "\xa9alb"},
"album_artist": {"flac": "ALBUMARTIST", "m4a": "aART"},
"artist": {"flac": "ARTIST", "m4a": "\xa9ART"},
"artists": {"flac": "ARTISTS", "m4a": "----:com.apple.iTunes:ARTISTS"},
"barcode": {"flac": "BARCODE", "m4a": "----:com.apple.iTunes:BARCODE"},
"comment": {"flac": "COMMENT", "m4a": "\xa9cmt"},
"composer": {"flac": "COMPOSER", "m4a": "\xa9wrt"},
"copyright": {"flac": "COPYRIGHT", "m4a": "cprt"},
"date": {"flac": "DATE", "m4a": "\xa9day"},
"director": {"flac": None, "m4a": "\xa9dir"},
"engineer": {"flac": "ENGINEER", "m4a": "----:com.apple.iTunes:ENGINEER"},
"isrc": {"flac": "ISRC", "m4a": "----:com.apple.iTunes:ISRC"},
"lyrics": {"flac": "LYRICS", "m4a": "\xa9lyr"},
"lyricist": {"flac": "LYRICIST", "m4a": "----:com.apple.iTunes:LYRICIST"},
"mixer": {"flac": "MIXER", "m4a": "----:com.apple.iTunes:MIXER"},
"producer": {"flac": "PRODUCER", "m4a": "----:com.apple.iTunes:PRODUCER"},
"remixer": {"flac": "REMIXER", "m4a": "----:com.apple.iTunes:REMIXER"},
"album_peak_amplitude": {
"flac": "REPLAYGAIN_ALBUM_PEAK",
"m4a": "----:com.apple.iTunes:REPLAYGAIN_ALBUM_PEAK",
},
"album_replay_gain": {
"flac": "REPLAYGAIN_ALBUM_GAIN",
"m4a": "----:com.apple.iTunes:REPLAYGAIN_ALBUM_GAIN",
},
"track_peak_amplitude": {
"flac": "REPLAYGAIN_TRACK_PEAK",
"m4a": "----:com.apple.iTunes:REPLAYGAIN_TRACK_PEAK",
},
"track_replay_gain": {
"flac": "REPLAYGAIN_TRACK_GAIN",
"m4a": "----:com.apple.iTunes:REPLAYGAIN_TRACK_GAIN",
},
"title": {"flac": "TITLE", "m4a": "\xa9nam"},
}
# Path: tidal_wave/media.py
class VideoFormat(str, Enum):
high = "HIGH"
medium = "MEDIUM"
low = "LOW"
audio_only = "AUDIO_ONLY"
# Path: tidal_wave/models.py
class VideosContributorsResponseJSON(dataclass_wizard.JSONWizard):
"""The response from the TIDAL API endpoint /videos/<ID>/contributors
is modeled by this class."""
limit: int
offset: int
total_number_of_items: int
items: List["VideoContributor"]
def get_role(self, role: str) -> Optional[Tuple["VideoContributor"]]:
"""Given a contributor role (e.g. Composer, Film Director), go through
`self.items` object, returning the `VideoContributor` object(s)
for the given contributor type if there are any"""
role_contributors = tuple(vc for vc in self.items if vc.role == role)
try:
role_contributors[0]
except IndexError:
logger.debug(f"There are no credits of type '{role}' for this video")
return
else:
return role_contributors
def get_contributors(self, role: str) -> Optional[Tuple[str]]:
"""Given a contributor role (e.g. Lyricist, Composer),
return a tuple of all the names of the contributors
"""
vcs: Optional[Tuple["VideoContributor"]] = self.get_role(role)
if vcs is not None:
return tuple(vc.name for vc in vcs)
else:
return
def __post_init__(self):
"""Try to parse the various Contributors to top-level
attributes of this class"""
self.composer: Optional[Tuple[str]] = self.get_contributors("Composer")
self.director: Optional[Tuple[str]] = self.get_contributors("Director")
self.film_director: Optional[Tuple[str]] = self.get_contributors(
"Film Director"
)
self.film_producer: Optional[Tuple[str]] = self.get_contributors(
"Film Producer"
)
self.lyricist: Optional[Tuple[str]] = self.get_contributors("Lyricist")
self.mastering_engineer: Optional[Tuple[str]] = self.get_contributors(
"Mastering Engineer"
)
self.producer: Optional[Tuple[str]] = self.get_contributors("Producer")
self.video_producer: Optional[Tuple[str]] = self.get_contributors(
"Video Producer"
)
# Path: tidal_wave/models.py
class VideosEndpointResponseJSON(dataclass_wizard.JSONWizard):
"""Response from the TIDAL API, videos/<VIDEOID> endpoint.If the params and
headers are correctly specified, the API returns metadata of the available
version of the (music) video, including video quality, video title, date,
video artists, duration, etc."""
id: int = field(repr=False)
title: str
volume_number: int
track_number: int
release_date: Annotated[
datetime, dataclass_wizard.Pattern("%Y-%m-%dT%H:%M:%S.%f%z")
]
duration: int # seconds
quality: str
explicit: bool
type: str
artist: "Artist"
artists: List["Artist"]
def __post_init__(self):
self.name: str = (
self.title.replace("/", "_")
.replace("|", "_")
.replace(":", " -")
.replace('"', "")
)
# Path: tidal_wave/models.py
class VideosEndpointStreamResponseJSON(dataclass_wizard.JSONWizard):
"""Response from the TIDAL API's videos/<VIDEO_ID> stream
endpoint. The params and headers, if correctly specified, return the
manifest of the video to be streamed. The manifest is a base64-encoded
JSON object containing a .m3u8 URL"""
video_id: int
stream_type: str # ON_DEMAND
video_quality: VideoQualityType
manifest: str = field(repr=False)
manifest_mime_type: str = field(repr=False)
def __post_init__(self):
self.manifest_bytes: bytes = base64.b64decode(self.manifest)
# Path: tidal_wave/requesting.py
def request_videos(
session: Session, identifier: int
) -> Optional[VideosEndpointResponseJSON]:
return requester_maker(
session=session,
endpoint="videos",
identifier=identifier,
headers={"Accept": "application/json"},
subclass=VideosEndpointResponseJSON,
)
# Path: tidal_wave/requesting.py
def request_video_contributors(
session: Session, identifier: int
) -> Optional[VideosContributorsResponseJSON]:
return requester_maker(
session=session,
endpoint="videos",
identifier=identifier,
headers={"Accept": "application/json"},
parameters={"limit": 100},
url_end="/contributors",
subclass=VideosContributorsResponseJSON,
)
# Path: tidal_wave/requesting.py
def request_video_stream(
session: Session, video_id: int, video_quality: str
) -> Optional[VideosEndpointStreamResponseJSON]:
func = partial(
requester_maker,
session=session,
identifier=video_id,
endpoint="videos",
headers={"Accept": "application/json"},
parameters={
"videoquality": video_quality,
"playbackmode": "STREAM",
"assetpresentation": "FULL",
},
url_end="/playbackinfopostpaywall",
subclass=VideosEndpointStreamResponseJSON,
)
return func()
# Path: tidal_wave/utils.py
@contextmanager
def temporary_file(suffix: str = ".mka"):
"""This context-managed function is a stand-in for
tempfile.NamedTemporaryFile as that stdlib object experiences
errors on Windows."""
file_name: str = os.path.join(
tempfile.gettempdir(), f"{os.urandom(24).hex()}{suffix}"
)
if not os.path.exists(file_name):
open(file=file_name, mode="x").close()
tf = open(file=file_name, mode="wb")
try:
yield tf
finally:
tf.close()
os.unlink(tf.name)
# Path: tidal_wave/video.py
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional
from .hls import playlister, variant_streams, TidalM3U8Exception
from .media import TAG_MAPPING, VideoFormat
from .models import (
VideosContributorsResponseJSON,
VideosEndpointResponseJSON,
VideosEndpointStreamResponseJSON,
)
from .requesting import request_videos, request_video_contributors, request_video_stream
from .utils import temporary_file
from requests import Session
import json
import logging
import sys
import ffmpeg
import mutagen
import m3u8
def set_urls(self):
"""This method uses self.m3u8, an m3u8.M3U8 object that is variant:
(https://developer.apple.com/documentation/http-live-streaming/creating-a-multivariant-playlist)
It retrieves the highest-quality .m3u8 in its .playlists attribute,
and sets self.urls as the list of strings from that m3u8.Playlist"""
# for now, just get the highest-bandwidth playlist
playlist: m3u8.Playlist = variant_streams(self.m3u8)
self.M3U8 = m3u8.load(playlist.uri)
if self.M3U8 is None or len(self.M3U8.files) == 0:
raise TidalM3U8Exception(
f"HLS media segments are not available for video {self.video_id}"
)
self.urls: List[str] = self.M3U8.files
def set_artist_dir(self, out_dir: Path):
"""Set self.artist_dir, which is the subdirectory of `out_dir`
with name `self.metadata.artist.name`"""
self.artist_dir: Path = out_dir / self.metadata.artist.name
self.artist_dir.mkdir(parents=True, exist_ok=True)
def set_filename(self, out_dir: Path):
"""Set self.filename, which is constructed from self.metadata.name
and self.stream.video_quality"""
self.filename: str = (
f"{self.metadata.name} [{self.stream.video_quality}].{self.codec}"
)
def set_outfile(self):
"""Uses self.artist_dir and self.metadata and self.filename
to craft the pathlib.Path object, self.outfile, that is a
reference to where the track will be written on disk."""
self.outfile: Path = self.artist_dir / self.filename
if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):
logger.info(
f"Video {str(self.outfile.absolute())} already exists "
"and therefore will not be overwritten"
)
return
else:
return self.outfile
def download(self, session: Session, out_dir: Path) -> Optional[Path]:
"""Requests the HLS video files that constitute self.video_id.
Writes HLS bytes to a temporary file, then uses FFmpeg to write the
video data to self.outfile"""
if session.session_id is not None:
download_headers: Dict[str, str] = {"sessionId": session.session_id}
else:
download_headers: dict = dict()
download_params: Dict[str, None] = {k: None for k in session.params}
# self.outfile should already have been set by self.set_outfile()
logger.info(
f"Writing video {self.video_id} to '{str(self.outfile.absolute())}'"
)
with temporary_file() as ntf:
for u in self.urls:
with session.get(
url=u, headers=download_headers, params=download_params
) as download_response:
if not download_response.ok:
logger.warning(f"Could not download {self}")
else:
ntf.write(download_response.content)
else:
ntf.seek(0)
# will always be .mp4 because HLS
ffmpeg.input(ntf.name, hide_banner=None, y=None).output(
str(self.outfile.absolute()),
vcodec="copy",
acodec="copy",
loglevel="quiet",
).run()
logger.info(
f"Video {self.video_id} written to '{str(self.outfile.absolute())}'"
)
return self.outfile
def craft_tags(self):
"""Using the TAG_MAPPING dictionary, write the correct values of
various metadata tags to the file. Videos are .mp4"""
tags = dict()
tag_map = {k: v["m4a"] for k, v in TAG_MAPPING.items()}
tags[tag_map["artist"]] = ";".join((a.name for a in self.metadata.artists))
tags[tag_map["artists"]] = [a.name for a in self.metadata.artists]
tags[tag_map["comment"]] = f"https://tidal.com/browse/video/{self.video_id}"
tags[tag_map["date"]] = str(self.metadata.release_date.date())
tags[tag_map["title"]] = self.metadata.title
for tag in {"composer", "director", "lyricist", "producer"}:
try:
_credits_tag = ";".join(getattr(self.contributors, tag))
except (TypeError, AttributeError): # NoneType problems
continue
else:
tags[tag_map[tag]] = _credits_tag
# Have to convert to bytes the values of the tags starting with '----'
for k, v in tags.copy().items():
if k.startswith("----"):
if isinstance(v, str):
tags[k]: bytes = v.encode("UTF-8")
elif isinstance(v, list):
tags[k]: List[bytes] = [s.encode("UTF-8") for s in v]
self.tags: dict = {k: v for k, v in tags.items() if v is not None}
def set_tags(self):
"""Instantiate a mutagen.File instance, add self.tags to it, and
save it to disk"""
self.mutagen = mutagen.File(self.outfile)
self.mutagen.clear()
self.mutagen.update(**self.tags)
self.mutagen.save()
def get(
| self, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lbcb-sci/GNNome
# Path: graph_dataset.py
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx)
graph = add_positional_encoding(graph)
print(f'DGL graph idx={idx} info:\n',graph)
self.graph_list.append((idx, graph))
self.graph_list.sort(key=lambda x: x[0])
def has_cache(self):
"""Check if the raw data is already processed and stored."""
raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)}
prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)}
return len(raw_files - prc_files) == 0 # set difference
def __len__(self):
return len(os.listdir(self.save_dir))
def __getitem__(self, idx):
i, graph = self.graph_list[idx]
return i, graph
def process(self):
pass
# Path: hyperparameters.py
def get_hyperparameters():
return {
# Setup
'data_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/hifi/train',
'temp_path': '/home/vrcekl/scratch/gnnome_assembly/train',
'eval_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/hifi/evaluate',
'asms_path': '/home/vrcekl/scratch/gnnome_assembly/evaluate',
'refs_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/references',
'checkpoints_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/checkpoints',
'models_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/models',
'data_path_ont': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/ont/train',
'eval_path_ont': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/ont/evaluate',
'asms_path_ont': '/home/vrcekl/scratch/gnnome_assembly/evaluate_ont',
'raven_path': '',
'hifiasm_path': '',
'pbsim3_dir': '',
'sample_profile_id': '',
'sample_file': '',
'assembler': 'hifiasm',
'dataset': 'chm13', # Not used at the moment
'initials': 'LV',
'device': 'cuda:0' if torch.cuda.is_available() else 'cpu',
'seed': 1,
'wandb_mode': 'disabled', # switch between 'online' and 'disabled'
# 'wandb_project': 'GeNNome-hifiasm',
'wandb_project': 'hifiasm-runs',
# 'wandb_project': 'Sep-23_ablations',
'chr_overfit': 0,
'plot_nga50_during_training': False,
'eval_frequency': 20,
# Data
'use_similarities': True,
# 'pos_to_neg_ratio': 16.5, # Not used, but could be a hyperparam for loss weight
# Model
'dim_latent': 64,
'num_gnn_layers': 8,
'node_features': 2,
'edge_features': 2, # Put 2 if you use similarities, 1 otherwise
'hidden_edge_features': 16,
'hidden_edge_scores': 64,
'nb_pos_enc': 0,
'type_pos_enc': 'PR',
'batch_norm': True,
# 'dropout': 0.08,
# Training
'num_epochs': 200,
'lr': 1e-4,
'use_symmetry_loss': True,
'alpha': 0.1,
'num_parts_metis_train': 200,
'num_parts_metis_eval': 200,
'num_nodes_per_cluster': 10000, # 2000 = max 10GB GPU memory for d=128, L=8
'npc_lower_bound': 1, # 0.8
'npc_upper_bound': 1, # 1.2
'k_extra_hops': 1,
'patience': 2,
'decay': 0.95,
'masking': True,
'mask_frac_low': 80, # ~ 25x
'mask_frac_high': 100, # ~ 60x
# Decoding
'strategy': 'greedy',
'num_decoding_paths': 100,
'decode_with_labels': False,
'load_checkpoint': True,
'num_threads': 32,
'B': 1,
'len_threshold': 10,
}
# Path: config.py
def get_config():
return {
'checkpoints_path': 'checkpoints',
'models_path': 'models',
'tool_dir': 'vendor',
'raven_dir': 'vendor/raven-1.8.1',
'hifiasm_dir': 'vendor/hifiasm-0.18.8',
'pbsim3_dir': 'vendor/pbsim3',
'sample_profile_id': '',
'sample_file': '',
'sequencing_depth': 60,
}
# Path: inference.py
def inference(data_path, model_path, assembler, savedir, device='cpu', dropout=None):
"""Using a pretrained model, get walks and contigs on new data."""
hyperparameters = get_hyperparameters()
seed = hyperparameters['seed']
num_gnn_layers = hyperparameters['num_gnn_layers']
hidden_features = hyperparameters['dim_latent']
nb_pos_enc = hyperparameters['nb_pos_enc']
batch_norm = hyperparameters['batch_norm']
node_features = hyperparameters['node_features']
edge_features = hyperparameters['edge_features']
hidden_edge_features = hyperparameters['hidden_edge_features']
hidden_edge_scores = hyperparameters['hidden_edge_scores']
strategy = hyperparameters['strategy']
B = hyperparameters['B']
nb_paths = hyperparameters['num_decoding_paths']
len_threshold = hyperparameters['len_threshold']
use_labels = hyperparameters['decode_with_labels']
load_checkpoint = hyperparameters['load_checkpoint']
threads = hyperparameters['num_threads']
# assembly_path = hyperparameters['asms_path']
device = 'cpu' # Hardcode, because we cannot do inference on a GPU - usually not enough memory to load the whole graph
utils.set_seed(seed)
time_start = datetime.now()
ds = AssemblyGraphDataset(data_path, assembler)
inference_dir = os.path.join(savedir, 'decode')
if not os.path.isdir(inference_dir):
os.makedirs(inference_dir)
checkpoint_dir = os.path.join(savedir, 'checkpoint')
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
walks_per_graph = []
contigs_per_graph = []
elapsed = utils.timedelta_to_str(datetime.now() - time_start)
print(f'\nelapsed time (loading network and data): {elapsed}\n')
for idx, g in ds:
# Get scores
print(f'==== Processing graph {idx} ====')
with torch.no_grad():
time_start_get_scores = datetime.now()
g = g.to(device)
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe = torch.cat((pe_in, pe_out), dim=1) # No PageRank
if use_labels: # Debugging
print('Decoding with labels...')
g.edata['score'] = g.edata['y'].clone()
else:
print('Decoding with model scores...')
predicts_path = os.path.join(inference_dir, f'{idx}_predicts.pt')
if os.path.isfile(predicts_path):
print(f'Loading the scores from:\n{predicts_path}\n')
g.edata['score'] = torch.load(predicts_path)
else:
print(f'Loading model parameters from: {model_path}')
model = models.SymGatedGCNModel(node_features, edge_features, hidden_features, hidden_edge_features, num_gnn_layers, hidden_edge_scores, batch_norm, nb_pos_enc, dropout=dropout)
model.load_state_dict(torch.load(model_path, map_location=torch.device(device)))
model.eval()
model.to(device)
print(f'Computing the scores with the model...\n')
edge_predictions = model(g, x, e, pe)
g.edata['score'] = edge_predictions.squeeze()
torch.save(g.edata['score'], os.path.join(inference_dir, f'{idx}_predicts.pt'))
elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_scores)
print(f'elapsed time (get_scores): {elapsed}')
# Load info data
print(f'Loading successors...')
with open(f'{data_path}/{assembler}/info/{idx}_succ.pkl', 'rb') as f_succs:
succs = pickle.load(f_succs)
print(f'Loading predecessors...')
with open(f'{data_path}/{assembler}/info/{idx}_pred.pkl', 'rb') as f_preds:
preds = pickle.load(f_preds)
print(f'Loading edges...')
with open(f'{data_path}/{assembler}/info/{idx}_edges.pkl', 'rb') as f_edges:
edges = pickle.load(f_edges)
print(f'Done loading the auxiliary graph data!')
# Get walks
time_start_get_walks = datetime.now()
# Some prefixes can be <0 and that messes up the assemblies
g.edata['prefix_length'] = g.edata['prefix_length'].masked_fill(g.edata['prefix_length']<0, 0)
if strategy == 'greedy':
walks = get_contigs_greedy(g, succs, preds, edges, nb_paths, len_threshold, use_labels, checkpoint_dir, load_checkpoint, device='cpu', threads=threads)
else:
print('Invalid decoding strategy')
raise Exception
elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_walks)
print(f'elapsed time (get_walks): {elapsed}')
inference_path = os.path.join(inference_dir, f'{idx}_walks.pkl')
pickle.dump(walks, open(f'{inference_path}', 'wb'))
print(f'Loading reads...')
with open(f'{data_path}/{assembler}/info/{idx}_reads.pkl', 'rb') as f_reads:
reads = pickle.load(f_reads)
print(f'Done!')
time_start_get_contigs = datetime.now()
contigs = evaluate.walk_to_sequence(walks, g, reads, edges)
elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_contigs)
print(f'elapsed time (get_contigs): {elapsed}')
assembly_dir = os.path.join(savedir, f'assembly')
if not os.path.isdir(assembly_dir):
os.makedirs(assembly_dir)
evaluate.save_assembly(contigs, assembly_dir, idx)
walks_per_graph.append(walks)
contigs_per_graph.append(contigs)
elapsed = utils.timedelta_to_str(datetime.now() - time_start)
print(f'elapsed time (total): {elapsed}')
if DEBUG:
exit(0)
print(f'Found contigs for {data_path}!')
print(f'Model used: {model_path}')
print(f'Assembly saved in: {savedir}')
# Path: train.py
import argparse
import copy
import os
import pickle
import random
import re
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import dgl
import wandb
import evaluate
import models
import utils
from datetime import datetime
from tqdm import tqdm
from torch.nn.functional import kl_div
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.profiler import profile, record_function, ProfilerActivity
from dgl.dataloading import GraphDataLoader
from graph_dataset import AssemblyGraphDataset
from hyperparameters import get_hyperparameters
from config import get_config
from inference import inference
loss_per_epoch_valid.append(min_loss_valid)
elapsed = utils.timedelta_to_str(datetime.now() - time_start)
print(f'Loading data done. Elapsed time: {elapsed}')
try:
with wandb.init(project=wandb_project, config=hyperparameters, mode=wandb_mode, name=out):
wandb.watch(model, criterion, log='all', log_freq=1000)
for epoch in range(start_epoch, num_epochs):
train_loss_all_graphs, train_fp_rate_all_graphs, train_fn_rate_all_graphs = [], [], []
train_acc_all_graphs, train_precision_all_graphs, train_recall_all_graphs, train_f1_all_graphs = [], [], [], []
train_loss_epoch, train_fp_rate_epoch, train_fn_rate_epoch = [], [], []
train_acc_epoch, train_precision_epoch, train_recall_epoch, train_f1_epoch = [], [], [], []
train_acc_inv_epoch, train_precision_inv_epoch, train_recall_inv_epoch, train_f1_inv_epoch = [], [], [], []
train_aps_epoch, train_aps_inv_epoch = [], []
print('\n===> TRAINING\n')
random.shuffle(ds_train.graph_list)
for data in ds_train:
model.train()
idx, g = data
print(f'\n(TRAIN: Epoch = {epoch:3}) NEW GRAPH: index = {idx}')
if masking:
fraction = random.randint(mask_frac_low, mask_frac_high) / 100 # Fraction of nodes to be left in the graph (.85 -> ~30x, 1.0 -> 60x)
g = mask_graph_strandwise(g, fraction, device)
# Number of clusters dependant on graph size!
num_nodes_per_cluster_min = int(num_nodes_per_cluster * npc_lower_bound)
num_nodes_per_cluster_max = int(num_nodes_per_cluster * npc_upper_bound) + 1
num_nodes_for_g = torch.LongTensor(1).random_(num_nodes_per_cluster_min, num_nodes_per_cluster_max).item()
num_clusters = g.num_nodes() // num_nodes_for_g + 1
if num_nodes_for_g >= g.num_nodes(): # train with full graph
print(f'\nUse METIS: False')
print(f'Use full graph')
g = g.to(device)
if use_symmetry_loss:
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
# pe = g.ndata['pe'].to(device)
# pe = (pe - pe.mean()) / pe.std()
pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
# pe = torch.cat((pe_in, pe_out, pe), dim=1)
pe = torch.cat((pe_in, pe_out), dim=1)
org_scores = model(g, x, e, pe).squeeze(-1)
edge_predictions = org_scores
edge_labels = g.edata['y'].to(device)
g = dgl.reverse(g, True, True)
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
# pe = g.ndata['pe'].to(device)
# pe = (pe - pe.mean()) / pe.std()
pe_out = g.ndata['in_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
pe_in = g.ndata['out_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
# pe = torch.cat((pe_in, pe_out, pe), dim=1)
pe = torch.cat((pe_in, pe_out), dim=1)
rev_scores = model(g, x, e, pe).squeeze(-1)
loss = symmetry_loss(org_scores, rev_scores, edge_labels, pos_weight, alpha=alpha)
else:
x = g.ndata['x'].to(device)
e = g.edata['e'].to(device)
# pe = g.ndata['pe'].to(device)
# pe = (pe - pe.mean()) / pe.std()
pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)
pe_in = (pe_in - pe_in.mean()) / pe_in.std()
pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)
pe_out = (pe_out - pe_out.mean()) / pe_out.std()
# pe = torch.cat((pe_in, pe_out, pe), dim=1)
pe = torch.cat((pe_in, pe_out), dim=1)
edge_predictions = model(g, x, e, pe)
edge_predictions = edge_predictions.squeeze(-1)
edge_labels = g.edata['y'].to(device)
loss = criterion(edge_predictions, edge_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.item()
TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)
acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)
try:
fp_rate = FP / (FP + TN)
except ZeroDivisionError:
fp_rate = 0.0
try:
fn_rate = FN / (FN + TP)
except ZeroDivisionError:
fn_rate = 0.0
train_fp_rate = fp_rate
train_fn_rate = fn_rate
train_acc = acc
train_precision = precision
train_recall = recall
train_f1 = f1
train_loss_epoch.append(loss.item())
train_fp_rate_epoch.append(fp_rate)
train_fn_rate_epoch.append(fn_rate)
# elapsed = utils.timedelta_to_str(datetime.now() - time_start)
# print(f'\nTRAINING (one training graph): Epoch = {epoch}, Graph = {idx}')
# print(f'Loss: {train_loss:.4f}, fp_rate(GT=0): {train_fp_rate:.4f}, fn_rate(GT=1): {train_fn_rate:.4f}')
# print(f'elapsed time: {elapsed}\n\n')
else: # train with mini-batch
print(f'\nUse METIS: True')
print(f'Number of clusters:', num_clusters)
g = g.long()
| d = dgl.metis_partition(g, num_clusters, extra_cached_hops=k_extra_hops) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SusheelThapa/C-DOTS
# Path: features/documenter.py
class CodeDocumenter(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
self.typing_timer = QTimer(self)
self.typing_timer.timeout.connect(self.type_next_character)
self.current_typing_position = 0
def init_ui(self):
self.setWindowTitle("Article Generator")
self.setGeometry(100, 100, 1500, 900)
self.setStyleSheet("background-color: #FFFFFF; color: #000000;")
main_layout = QHBoxLayout()
main_layout.setContentsMargins(20, 20, 20, 20)
left_layout = QVBoxLayout()
left_layout.setContentsMargins(10, 20, 10, 20)
left_layout.setSpacing(20)
right_layout = QVBoxLayout()
right_layout.setContentsMargins(10, 10, 10, 10)
splitter = QSplitter()
language_label = QLabel("Select Programming Language:")
language_label.setFont(QFont("Arial", 16))
self.language_selection = QComboBox()
self.language_selection.setFont(QFont("Arial", 16))
self.language_selection.setStyleSheet(
"QComboBox { padding: 8px; background-color: #E0E0E0 ; color: #000000; }"
)
self.language_selection.addItems(["Python", "Java", "C++", "JavaScript", "C"])
code_label = QLabel("Code to add Documentation")
code_label.setFont(QFont("Arial", 16))
self.code_entry = QTextEdit()
self.code_entry.setFont(QFont("Arial", 16))
self.code_entry.setStyleSheet(
"QTextEdit { border-radius: 5px; padding: 5px; background-color: #E0E0E0 ; color: #000000; }"
)
generate_doc_button = QPushButton("Generate Documentation")
generate_doc_button.setFont(QFont("Arial", 18))
generate_doc_button.setStyleSheet(
"QPushButton { border-radius: 10px; padding: 10px; background-color: #4CAF50 ; color: #FFFFFF; font-weight:600;} QPushButton:hover { background-color: #45A049; }"
)
generate_doc_button.clicked.connect(self.generate_documentation)
left_layout.addWidget(language_label)
left_layout.addWidget(self.language_selection)
left_layout.addWidget(code_label)
left_layout.addWidget(self.code_entry)
left_layout.addWidget(generate_doc_button)
self.generated_text_area = QTextBrowser()
self.generated_text_area.setReadOnly(True)
self.generated_text_area.setFont(QFont("Arial", 16))
self.generated_text_area.setStyleSheet(
"QTextBrowser { border-radius: 5px; padding: 5px; background-color: #E0E0E0 ; color: #000000; }"
)
right_layout.addWidget(self.generated_text_area)
# Assembling the main layout
left_widget = QWidget()
left_widget.setLayout(left_layout)
right_widget = QWidget()
right_widget.setLayout(right_layout)
splitter.addWidget(left_widget)
splitter.addWidget(right_widget)
splitter.setSizes([400, 800])
main_layout.addWidget(splitter)
self.setLayout(main_layout)
def generate_documentation(self):
language = self.language_selection.currentText()
code = self.code_entry.toPlainText()
self.generated_text_area.setText("Documentng the Code Snippets...")
self.worker = Worker(language, code)
self.thread = threading.Thread(target=self.worker.run)
self.worker.finished.connect(self.on_finished)
self.thread.start()
def on_finished(self, processed_text):
self.processed_text = processed_text
self.current_typing_position = 0
self.typing_timer.start(20)
def type_next_character(self):
if self.current_typing_position < len(self.processed_text):
if self.current_typing_position == 0:
self.generated_text_area.clear()
current_text = self.processed_text[self.current_typing_position]
self.generated_text_area.moveCursor(QTextCursor.End)
self.generated_text_area.insertPlainText(current_text)
self.current_typing_position += 1
else:
self.typing_timer.stop()
# Path: features/optimizer.py
class CodeOptimizer(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
self.typing_timer = QTimer(self)
self.typing_timer.timeout.connect(self.type_next_character)
self.current_typing_position = 0
def init_ui(self):
self.setWindowTitle("Article Generator")
self.setGeometry(100, 100, 1500, 900)
self.setStyleSheet("background-color: #FFFFFF; color: #000000;")
main_layout = QHBoxLayout()
main_layout.setContentsMargins(20, 20, 20, 20)
left_layout = QVBoxLayout()
left_layout.setContentsMargins(10, 20, 10, 20)
left_layout.setSpacing(20)
right_layout = QVBoxLayout()
right_layout.setContentsMargins(10, 10, 10, 10)
splitter = QSplitter()
language_label = QLabel("Select Programming Language:")
language_label.setFont(QFont("Arial", 16))
self.language_selection = QComboBox()
self.language_selection.setFont(QFont("Arial", 16))
self.language_selection.setStyleSheet(
"QComboBox { padding: 8px; background-color: #dae6db; color: #000000; }"
)
self.language_selection.addItems(["Python", "Java", "C++", "JavaScript", "C"])
code_label = QLabel("Code to Optimize")
code_label.setFont(QFont("Arial", 16))
self.code_entry = QTextEdit()
self.code_entry.setFont(QFont("Arial", 16))
self.code_entry.setStyleSheet(
"QTextEdit { border-radius: 5px; padding: 5px; background-color: #dae6db; color: #000000; }"
)
generate_doc_button = QPushButton("Optimize Code")
generate_doc_button.setFont(QFont("Arial", 18))
generate_doc_button.setStyleSheet(
"QPushButton { border-radius: 10px; padding: 10px; background-color: #1565C0; color: #FFFFFF; font-weight:600; } QPushButton:hover { background-color: #0F4FA8; }"
)
generate_doc_button.clicked.connect(self.optimize_code)
left_layout.addWidget(language_label)
left_layout.addWidget(self.language_selection)
left_layout.addWidget(code_label)
left_layout.addWidget(self.code_entry)
left_layout.addWidget(generate_doc_button)
self.generated_text_area = QTextBrowser()
self.generated_text_area.setReadOnly(True)
self.generated_text_area.setFont(QFont("Arial", 16))
self.generated_text_area.setStyleSheet(
"QTextBrowser { border-radius: 5px; padding: 5px; background-color: #dae6db; color: #000000; }"
)
right_layout.addWidget(self.generated_text_area)
left_widget = QWidget()
left_widget.setLayout(left_layout)
right_widget = QWidget()
right_widget.setLayout(right_layout)
splitter.addWidget(left_widget)
splitter.addWidget(right_widget)
splitter.setSizes([400, 800])
main_layout.addWidget(splitter)
self.setLayout(main_layout)
def optimize_code(self):
language = self.language_selection.currentText()
code = self.code_entry.toPlainText()
self.generated_text_area.setText("Optimizing Code Snippets...")
self.worker = Worker(language, code)
self.thread = threading.Thread(target=self.worker.run)
self.worker.finished.connect(self.on_finished)
self.thread.start()
def on_finished(self, processed_text):
self.processed_text = processed_text
self.current_typing_position = 0
self.typing_timer.start(20)
def type_next_character(self):
if self.current_typing_position < len(self.processed_text):
if self.current_typing_position == 0:
self.generated_text_area.clear()
current_text = self.processed_text[self.current_typing_position]
self.generated_text_area.moveCursor(QTextCursor.End)
self.generated_text_area.insertPlainText(current_text)
self.current_typing_position += 1
else:
self.typing_timer.stop()
# Path: features/summarizer.py
class CodeSummarizer(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
self.typing_timer = QTimer(self)
self.typing_timer.timeout.connect(self.type_next_character)
self.current_typing_position = 0
def init_ui(self):
self.setWindowTitle("Code Summarizer")
self.setGeometry(100, 100, 1500, 900)
self.setStyleSheet("background-color: #FFFFFF; color: #000000;")
main_layout = QHBoxLayout()
main_layout.setContentsMargins(20, 20, 20, 20)
left_layout = QVBoxLayout()
left_layout.setContentsMargins(10, 20, 10, 20)
left_layout.setSpacing(20)
right_layout = QVBoxLayout()
right_layout.setContentsMargins(10, 10, 10, 10)
splitter = QSplitter()
language_label = QLabel("Select Programming Language:")
language_label.setFont(QFont("Arial", 16))
self.language_selection = QComboBox()
self.language_selection.setFont(QFont("Arial", 16))
self.language_selection.setStyleSheet(
"QComboBox { padding: 8px; background-color: #F2EFEF; color: #202020; }"
)
self.language_selection.addItems(["Python", "Java", "C++", "JavaScript", "C"])
code_label = QLabel("Code to Summarize")
code_label.setFont(QFont("Arial", 16))
self.code_entry = QTextEdit()
self.code_entry.setFont(QFont("Arial", 16))
self.code_entry.setStyleSheet(
"QTextEdit { border-radius: 5px; padding: 5px; background-color: #F2EFEF; color: #202020; }"
)
generate_summarize_button = QPushButton("Summarize Code")
generate_summarize_button.setFont(QFont("Arial", 18))
generate_summarize_button.setStyleSheet(
"QPushButton { border-radius: 10px; padding: 10px; background-color: #601527; color: #FFFFFF; font-weight:600; } QPushButton:hover { background-color: #601527; }"
)
generate_summarize_button.clicked.connect(self.summarize_code)
left_layout.addWidget(language_label)
left_layout.addWidget(self.language_selection)
left_layout.addWidget(code_label)
left_layout.addWidget(self.code_entry)
left_layout.addWidget(generate_summarize_button)
self.generated_text_area = QTextBrowser()
self.generated_text_area.setReadOnly(True)
self.generated_text_area.setFont(QFont("Arial", 16))
self.generated_text_area.setStyleSheet(
"QTextBrowser { border-radius: 5px; padding: 5px; background-color: #F2EFEF; color: #202020; }"
)
right_layout.addWidget(self.generated_text_area)
left_widget = QWidget()
left_widget.setLayout(left_layout)
right_widget = QWidget()
right_widget.setLayout(right_layout)
splitter.addWidget(left_widget)
splitter.addWidget(right_widget)
splitter.setSizes([400, 800])
main_layout.addWidget(splitter)
self.setLayout(main_layout)
def summarize_code(self):
language = self.language_selection.currentText()
code = self.code_entry.toPlainText()
self.generated_text_area.setText("Summarizing the Code Snippets...")
self.worker = Worker(language, code)
self.thread = threading.Thread(target=self.worker.run)
self.worker.finished.connect(self.on_finished)
self.thread.start()
def on_finished(self, processed_text):
self.processed_text = processed_text
self.current_typing_position = 0
self.typing_timer.start(20)
def type_next_character(self):
if self.current_typing_position < len(self.processed_text):
if self.current_typing_position == 0:
self.generated_text_area.clear()
current_text = self.processed_text[self.current_typing_position]
self.generated_text_area.moveCursor(QTextCursor.End)
self.generated_text_area.insertPlainText(current_text)
self.current_typing_position += 1
else:
self.typing_timer.stop()
# Path: features/translator.py
class CodeTranslator(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
self.typing_timer = QTimer(self)
self.typing_timer.timeout.connect(self.type_next_character)
self.current_typing_position = 0
self.processed_text = ""
def init_ui(self):
self.setWindowTitle("Code Translator")
self.setGeometry(100, 100, 1500, 900)
self.setStyleSheet("background-color: #FFFFFF; color: #000000;")
main_layout = QHBoxLayout()
self.setup_main_layout(main_layout)
self.setLayout(main_layout)
def setup_main_layout(self, main_layout):
main_layout.setContentsMargins(20, 20, 20, 20)
left_layout = self.setup_left_layout()
right_layout = self.setup_right_layout()
splitter = QSplitter()
left_widget = QWidget()
left_widget.setLayout(left_layout)
right_widget = QWidget()
right_widget.setLayout(right_layout)
splitter.addWidget(left_widget)
splitter.addWidget(right_widget)
splitter.setSizes([400, 800])
main_layout.addWidget(splitter)
def setup_left_layout(self):
left_layout = QVBoxLayout()
left_layout.setContentsMargins(10, 20, 10, 20)
left_layout.setSpacing(20)
source_lang_label = self.create_label("Select Source Programming Language:")
self.source_lang_selection = self.create_combobox(
["Python", "Java", "C++", "JavaScript", "C"]
)
target_lang_label = self.create_label("Select Target Programming Language:")
self.target_lang_selection = self.create_combobox(
["Python", "Java", "C++", "JavaScript", "C"]
)
code_label = self.create_label("Code Snippets:")
self.code_entry = self.create_text_edit()
translate_button = self.create_button(
"Translate Code", self.generate_translate_code
)
left_layout.addWidget(source_lang_label)
left_layout.addWidget(self.source_lang_selection)
left_layout.addWidget(target_lang_label)
left_layout.addWidget(self.target_lang_selection)
left_layout.addWidget(code_label)
left_layout.addWidget(self.code_entry)
left_layout.addWidget(translate_button)
return left_layout
def setup_right_layout(self):
right_layout = QVBoxLayout()
right_layout.setContentsMargins(10, 10, 10, 10)
self.generated_text_area = self.create_text_browser()
right_layout.addWidget(self.generated_text_area)
return right_layout
def create_label(self, text):
label = QLabel(text)
label.setFont(QFont("Arial", 16))
return label
def create_combobox(self, items):
combobox = QComboBox()
combobox.setFont(QFont("Arial", 16))
combobox.addItems(items)
combobox.setStyleSheet(
"padding: 5px; background-color: #F0E6F4; color: #303030;"
)
return combobox
def create_text_edit(self):
text_edit = QTextEdit()
text_edit.setFont(QFont("Arial", 16))
text_edit.setStyleSheet(
"border-radius: 5px; padding: 5px; background-color: #F0E6F4; color: #303030;"
)
return text_edit
def create_button(self, text, callback):
button = QPushButton(text)
button.setFont(QFont("Arial", 18))
button.clicked.connect(callback)
button.setStyleSheet(
"""
QPushButton {
border-radius: 10px;
padding: 10px;
background-color: #6A1B9A;
color: #FFFFFF;
font-weight:600;
}
QPushButton:hover {
background-color: #6A1B9A;
}
"""
)
return button
def create_text_browser(self):
text_browser = QTextBrowser()
text_browser.setReadOnly(True)
text_browser.setFont(QFont("Arial", 16))
text_browser.setStyleSheet(
"border-radius: 5px; padding: 5px; background-color: #F0E6F4; color: #303030;"
)
return text_browser
def generate_translate_code(self):
source_lang = self.source_lang_selection.currentText()
target_lang = self.target_lang_selection.currentText()
code = self.code_entry.toPlainText()
self.generated_text_area.setText(
f"Translating Code Snippet from {source_lang} to {target_lang}..."
)
self.worker = Worker(source_lang, target_lang, code)
self.thread = threading.Thread(target=self.worker.run)
self.worker.finished.connect(self.on_finished)
self.thread.start()
def on_finished(self, processed_text):
self.processed_text = processed_text
self.current_typing_position = 0
self.typing_timer.start(20)
def type_next_character(self):
if self.current_typing_position < len(self.processed_text):
if self.current_typing_position == 0:
self.generated_text_area.clear()
current_text = self.processed_text[self.current_typing_position]
self.generated_text_area.moveCursor(QTextCursor.End)
self.generated_text_area.insertPlainText(current_text)
self.current_typing_position += 1
else:
self.typing_timer.stop()
# Path: app.py
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QTabWidget, QWidget, QTabBar
from features.documenter import CodeDocumenter
from features.optimizer import CodeOptimizer
from features.summarizer import CodeSummarizer
from features.translator import CodeTranslator
class StretchedTabBar(QTabBar):
def __init__(self, parent=None):
super().__init__(parent)
def tabSizeHint(self, index):
| size = super().tabSizeHint(index) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amadad/agentcy3
# Path: agency_swarm/agents/agent.py
class Agent():
@property
def assistant(self):
if self._assistant is None:
raise Exception("Assistant is not initialized. Please run init_oai() first.")
return self._assistant
@assistant.setter
def assistant(self, value):
self._assistant = value
@property
def functions(self):
return [tool for tool in self.tools if issubclass(tool, BaseTool)]
def __init__(self, id: str = None, name: str = None, description: str = None, instructions: str = "",
tools: List[Union[Type[BaseTool], Type[Retrieval], Type[CodeInterpreter]]] = None,
files_folder: Union[List[str], str] = None,
file_ids: List[str] = None, metadata: Dict[str, str] = None, model: str = "gpt-4-1106-preview"):
"""
Initializes an Agent with specified attributes, tools, and OpenAI client.
Parameters:
id (str, optional): Unique identifier for the agent. Defaults to None.
name (str, optional): Name of the agent. Defaults to the class name if not provided.
description (str, optional): A brief description of the agent's purpose. Defaults to None.
instructions (str, optional): Path to a file containing specific instructions for the agent. Defaults to an empty string.
tools (List[Union[Type[BaseTool], Type[Retrieval], Type[CodeInterpreter]]], optional): A list of tools (as classes) that the agent can use. Defaults to an empty list.
files_folder (Union[List[str], str], optional): Path or list of paths to directories containing files associated with the agent. Defaults to None.
file_ids (List[str], optional): List of file IDs for files associated with the agent. Defaults to an empty list.
metadata (Dict[str, str], optional): Metadata associated with the agent. Defaults to an empty dictionary.
model (str, optional): The model identifier for the OpenAI API. Defaults to "gpt-4-1106-preview".
This constructor sets up the agent with its unique properties, initializes the OpenAI client, reads instructions if provided, and uploads any associated files.
"""
self.id = id
self.name = name if name else self.__class__.__name__
self.description = description
self.instructions = instructions
self.tools = tools if tools else []
self.files_folder = files_folder
self.file_ids = file_ids if file_ids else []
self.metadata = metadata if metadata else {}
self.model = model
self._assistant: Any = None
self._shared_instructions = None
self.client = get_openai_client()
if os.path.isfile(self.instructions):
self._read_instructions(self.instructions)
elif os.path.isfile(os.path.join(self.get_class_folder_path(), self.instructions)):
self._read_instructions(os.path.join(self.get_class_folder_path(), self.instructions))
self._upload_files()
def init_oai(self):
"""
Initializes the OpenAI assistant for the agent.
This method handles the initialization and potential updates of the agent's OpenAI assistant. It loads the assistant based on a saved ID, updates the assistant if necessary, or creates a new assistant if it doesn't exist. After initialization or update, it saves the assistant's settings.
Output:
self: Returns the agent instance for chaining methods or further processing.
"""
# check if settings.json exists
path = self.get_settings_path()
# load assistant from id
if self.id:
self.assistant = self.client.beta.assistants.retrieve(self.id)
# update assistant if parameters are different
if not self._check_parameters(self.assistant.model_dump()):
self._update_assistant()
return self
# load assistant from settings
if os.path.exists(path):
with open(path, 'r') as f:
settings = json.load(f)
# iterate settings and find the assistant with the same name
for assistant_settings in settings:
if assistant_settings['name'] == self.name:
self.assistant = self.client.beta.assistants.retrieve(assistant_settings['id'])
self.id = assistant_settings['id']
# update assistant if parameters are different
if not self._check_parameters(self.assistant.model_dump()):
print("Updating assistant... " + self.name)
self._update_assistant()
self._update_settings()
return self
# create assistant if settings.json does not exist or assistant with the same name does not exist
self.assistant = self.client.beta.assistants.create(
name=self.name,
description=self.description,
instructions=self.instructions,
tools=self.get_oai_tools(),
file_ids=self.file_ids,
metadata=self.metadata,
model=self.model
)
self.id = self.assistant.id
self._save_settings()
return self
def _update_assistant(self):
"""
Updates the existing assistant's parameters on the OpenAI server.
This method updates the assistant's details such as name, description, instructions, tools, file IDs, metadata, and the model. It only updates parameters that have non-empty values. After updating the assistant, it also updates the local settings file to reflect these changes.
No input parameters are directly passed to this method as it uses the agent's instance attributes.
No output parameters are returned, but the method updates the assistant's details on the OpenAI server and locally updates the settings file.
"""
params = {
"name": self.name,
"description": self.description,
"instructions": self.instructions,
"tools": self.get_oai_tools(),
"file_ids": self.file_ids,
"metadata": self.metadata,
"model": self.model
}
params = {k: v for k, v in params.items() if v}
self.assistant = self.client.beta.assistants.update(
self.id,
**params,
)
self._update_settings()
def _check_parameters(self, assistant_settings):
"""
Checks if the agent's parameters match with the given assistant settings.
Parameters:
assistant_settings (dict): A dictionary containing the settings of an assistant.
Returns:
bool: True if all the agent's parameters match the assistant settings, False otherwise.
This method compares the current agent's parameters such as name, description, instructions, tools, file IDs, metadata, and model with the given assistant settings. It uses DeepDiff to compare complex structures like tools and metadata. If any parameter does not match, it returns False; otherwise, it returns True.
"""
if self.name != assistant_settings['name']:
return False
if self.description != assistant_settings['description']:
return False
if self.instructions != assistant_settings['instructions']:
return False
tools_diff = DeepDiff(self.get_oai_tools(), assistant_settings['tools'], ignore_order=True)
if tools_diff != {}:
return False
if set(self.file_ids) != set(assistant_settings['file_ids']):
return False
metadata_diff = DeepDiff(self.metadata, assistant_settings['metadata'], ignore_order=True)
if metadata_diff != {}:
return False
if self.model != assistant_settings['model']:
return False
return True
def _save_settings(self):
path = self.get_settings_path()
# check if settings.json exists
if not os.path.isfile(path):
with open(path, 'w') as f:
json.dump([self.assistant.model_dump()], f, indent=4)
else:
settings = []
with open(path, 'r') as f:
settings = json.load(f)
settings.append(self.assistant.model_dump())
with open(path, 'w') as f:
json.dump(settings, f, indent=4)
def _update_settings(self):
path = self.get_settings_path()
# check if settings.json exists
if os.path.isfile(path):
settings = []
with open(path, 'r') as f:
settings = json.load(f)
for i, assistant_settings in enumerate(settings):
if assistant_settings['id'] == self.id:
settings[i] = self.assistant.model_dump()
break
with open(path, 'w') as f:
json.dump(settings, f, indent=4)
def _read_instructions(self, path):
with open(path, 'r') as f:
self.instructions = f.read()
def _upload_files(self):
if isinstance(self.files_folder, str):
f_path = self.files_folder
if not os.path.isdir(f_path):
f_path = os.path.join(self.get_class_folder_path(), self.files_folder)
if os.path.isdir(f_path):
f_paths = os.listdir(f_path)
f_paths = [f for f in f_paths if not f.startswith(".")]
f_paths = [os.path.join(f_path, f) for f in f_paths]
for f_path in f_paths:
file_id = self._get_id_from_file(f_path)
if file_id:
print("File already uploaded. Skipping... " + os.path.basename(f_path))
self.file_ids.append(file_id)
else:
print("Uploading new file... " + os.path.basename(f_path))
with open(f_path, 'rb') as f:
file_id = self.client.files.create(file=f, purpose="assistants").id
self.file_ids.append(file_id)
self._add_id_to_file(f_path, file_id)
if Retrieval not in self.tools:
print("Detected files without Retrieval. Adding Retrieval tool...")
self.add_tool(Retrieval)
else:
raise Exception("Files folder path is not a directory.")
def _add_id_to_file(self, f_path, id):
"""Add file id to file name"""
if os.path.isfile(f_path):
file_name, file_ext = os.path.splitext(f_path)
f_path_new = file_name + "_" + id + file_ext
os.rename(f_path, f_path_new)
return f_path_new
else:
raise Exception("Items in files folder must be files.")
def _get_id_from_file(self, f_path):
"""Get file id from file name"""
if os.path.isfile(f_path):
file_name, file_ext = os.path.splitext(f_path)
file_name = os.path.basename(file_name)
file_name = file_name.split("_")
if len(file_name) > 1:
return file_name[-1] if "file-" in file_name[-1] else None
else:
return None
else:
raise Exception("Items in files folder must be files.")
def get_settings_path(self):
return os.path.join("./", 'settings.json')
def get_class_folder_path(self):
return os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))
def set_params(self, **params):
for k, v in params.items():
setattr(self, k, v)
def add_tool(self, tool):
if not isinstance(tool, type):
raise Exception("Tool must not be initialized.")
if issubclass(tool, Retrieval):
# check that tools name is not already in tools
for t in self.tools:
if issubclass(t, Retrieval):
return
self.tools.append(tool)
elif issubclass(tool, CodeInterpreter):
for t in self.tools:
if issubclass(t, Retrieval):
return
self.tools.append(tool)
elif issubclass(tool, BaseTool):
for t in self.tools:
if t.__name__ == tool.__name__:
self.tools.remove(t)
self.tools.append(tool)
else:
raise Exception("Invalid tool type.")
def add_instructions(self, instructions: str):
if self._shared_instructions is None:
self._shared_instructions = instructions
else:
self.instructions = self.instructions.replace(self._shared_instructions, "")
self.instructions = self.instructions.strip().strip("\n")
self._shared_instructions = instructions
self.instructions = self._shared_instructions + "\n\n" + self.instructions
def get_oai_tools(self):
tools = []
for tool in self.tools:
if not isinstance(tool, type):
raise Exception("Tool must not be initialized.")
if issubclass(tool, Retrieval):
tools.append(tool().model_dump())
elif issubclass(tool, CodeInterpreter):
tools.append(tool().model_dump())
elif issubclass(tool, BaseTool):
tools.append({
"type": "function",
"function": tool.openai_schema
})
else:
raise Exception("Invalid tool type.")
return tools
def delete_assistant(self):
self.client.beta.assistants.delete(self.id)
self._delete_settings()
def _delete_settings(self):
path = self.get_settings_path()
# check if settings.json exists
if os.path.isfile(path):
settings = []
with open(path, 'r') as f:
settings = json.load(f)
for i, assistant_settings in enumerate(settings):
if assistant_settings['id'] == self.id:
settings.pop(i)
break
with open(path, 'w') as f:
json.dump(settings, f, indent=4)
# Path: agency_swarm/messages/message_output.py
class MessageOutput:
def __init__(self, msg_type: Literal["function", "function_output", "text", "system"], sender_name: str, receiver_name: str, content):
self.msg_type = msg_type
self.sender_name = str(sender_name)
self.receiver_name = str(receiver_name)
self.content = str(content)
self.client = get_openai_client()
def hash_names_to_color(self):
if self.msg_type == "function":
return "green"
if self.msg_type == "system":
return "red"
combined_str = self.sender_name + self.receiver_name
encoded_str = combined_str.encode()
hash_obj = hashlib.md5(encoded_str)
hash_int = int(hash_obj.hexdigest(), 16)
colors = [
'grey', 'yellow', 'blue', 'magenta', 'cyan', 'white',
]
color_index = hash_int % len(colors)
return colors[color_index]
def cprint(self):
color = self.hash_names_to_color()
text = self.get_formatted_content()
print(colored(text, color))
def get_formatted_content(self):
if self.msg_type == "function":
text = self.sender_name + " Executing Function: " + str(self.content) + "\n"
return text
if self.msg_type == "function_output":
text = self.sender_name + f" Function Output (by {self.receiver_name}): " + str(self.content) + "\n"
return text
text = self.sender_name + f' (to {self.receiver_name})' ": " + self.content + "\n"
return text
def get_sender_emoji(self):
if self.msg_type == "function" or self.msg_type == "function_output":
return "🧠"
if self.msg_type == "system":
return "🤖"
if self.sender_name.lower() == "user":
return "👤"
if self.sender_name.lower() == "ceo":
return "🤵"
# output emoji based on hash of sender name
encoded_str = self.sender_name.encode()
hash_obj = hashlib.md5(encoded_str)
hash_int = int(hash_obj.hexdigest(), 16)
emojis = [
'🐶', '🐱', '🐭', '🐹', '🐰', '🦊',
'🐻', '🐼', '🐨', '🐯', '🦁', '🐮',
'🐷', '🐸', '🐵', '🐔', '🐧', '🐦',
'🐤']
emoji_index = hash_int % len(emojis)
return emojis[emoji_index]
# Path: agency_swarm/user/user.py
class User:
name: str = "User"
def __init__(self, name: str = None):
# later, we can add more attributes to the user like bio, etc
pass
# Path: agency_swarm/util/oai.py
def get_openai_client():
global client
with client_lock:
if client is None:
# Check if the API key is set
api_key = openai.api_key or os.getenv('OPENAI_API_KEY')
if api_key is None:
raise ValueError("OpenAI API key is not set. Please set it using set_openai_key.")
client = instructor.patch(openai.OpenAI(api_key=api_key))
return client
# Path: agency_swarm/threads/thread.py
import inspect
import time
from typing import Literal
from agency_swarm.agents import Agent
from agency_swarm.messages import MessageOutput
from agency_swarm.user import User
from agency_swarm.util.oai import get_openai_client
class Thread:
id: str
thread = None
run = None
def __init__(self, agent: Literal[Agent, User], recipient_agent: Agent):
self.agent = agent
self.recipient_agent = recipient_agent
self.client = get_openai_client()
def get_completion(self, message: str, yield_messages=True):
if not self.thread:
self.thread = self.client.beta.threads.create()
self.id = self.thread.id
# send message
self.client.beta.threads.messages.create(
thread_id=self.thread.id,
role="user",
content=message
)
if yield_messages:
yield MessageOutput("text", self.agent.name, self.recipient_agent.name, message)
# create run
self.run = self.client.beta.threads.runs.create(
thread_id=self.thread.id,
assistant_id=self.recipient_agent.id,
)
while True:
# wait until run completes
while self.run.status in ['queued', 'in_progress']:
time.sleep(0.5)
self.run = self.client.beta.threads.runs.retrieve(
thread_id=self.thread.id,
run_id=self.run.id
)
# function execution
if self.run.status == "requires_action":
tool_calls = self.run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
if yield_messages:
yield MessageOutput("function", self.recipient_agent.name, self.agent.name, str(tool_call.function))
output = self._execute_tool(tool_call)
if inspect.isgenerator(output):
try:
while True:
item = next(output)
if isinstance(item, MessageOutput) and yield_messages:
yield item
except StopIteration as e:
output = e.value
else:
if yield_messages:
yield MessageOutput("function_output", tool_call.function.name, self.recipient_agent.name, output)
tool_outputs.append({"tool_call_id": tool_call.id, "output": str(output)})
# submit tool outputs
self.run = self.client.beta.threads.runs.submit_tool_outputs(
thread_id=self.thread.id,
run_id=self.run.id,
tool_outputs=tool_outputs
)
# error
elif self.run.status == "failed":
| raise Exception("Run Failed. Error: ", self.run.last_error) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Deltares/imod-python
# Path: imod/util.py
DATETIME_FORMATS = {
14: "%Y%m%d%H%M%S",
12: "%Y%m%d%H%M",
10: "%Y%m%d%H",
8: "%Y%m%d",
4: "%Y",
}
def to_datetime(s):
def _groupdict(stem: str, pattern: str) -> Dict:
def decompose(path, pattern: str = None) -> Dict[str, Any]:
def _convert_datetimes(times, use_cftime):
def _compose_timestring(time, time_format="%Y%m%d%H%M%S") -> str:
def compose(d, pattern=None) -> pathlib.Path:
def _xycoords(bounds, cellsizes) -> Dict[str, Any]:
def coord_reference(da_coord) -> Tuple[float, float, float]:
def spatial_reference(
a: xr.DataArray,
) -> Tuple[float, float, float, float, float, float]:
def transform(a: xr.DataArray) -> affine.Affine:
def equidistant(dx, name):
def cd(path: Union[str, pathlib.Path]):
def temporary_directory() -> pathlib.Path:
def ignore_warnings():
def ugrid2d_data(da: xr.DataArray, face_dim: str) -> xr.DataArray:
def unstack_dim_into_variable(
dataset: Union[xr.Dataset, xu.UgridDataset], dim: str
) -> Union[xr.Dataset, xu.UgridDataset]:
def mdal_compliant_ugrid2d(dataset: xr.Dataset) -> xr.Dataset:
def from_mdal_compliant_ugrid2d(dataset: xu.UgridDataset):
def to_ugrid2d(data: Union[xr.DataArray, xr.Dataset]) -> xr.Dataset:
def is_divisor(numerator: FloatArray, denominator: float) -> bool:
def initialize_nested_dict(depth: int) -> collections.defaultdict:
def set_nested(d: collections.defaultdict, keys: List[str], value: Any) -> None:
def append_nested_dict(dict1: Dict, dict2: Dict) -> None:
def sorted_nested_dict(d: Dict) -> Dict:
def _layer(layer: Union[int, Sequence[int], IntArray]) -> IntArray:
def _time(time: Any) -> Any:
def empty_2d(
dx: Union[float, FloatArray],
xmin: float,
xmax: float,
dy: Union[float, FloatArray],
ymin: float,
ymax: float,
) -> xr.DataArray:
def empty_3d(
dx: Union[float, FloatArray],
xmin: float,
xmax: float,
dy: Union[float, FloatArray],
ymin: float,
ymax: float,
layer: Union[int, Sequence[int], IntArray],
) -> xr.DataArray:
def empty_2d_transient(
dx: Union[float, FloatArray],
xmin: float,
xmax: float,
dy: Union[float, FloatArray],
ymin: float,
ymax: float,
time: Any,
) -> xr.DataArray:
def empty_3d_transient(
dx: Union[float, FloatArray],
xmin: float,
xmax: float,
dy: Union[float, FloatArray],
ymin: float,
ymax: float,
layer: Union[int, Sequence[int], IntArray],
time: Any,
) -> xr.DataArray:
def where(condition, if_true, if_false, keep_nan: bool = True) -> xr.DataArray:
def replace(da: xr.DataArray, to_replace: Any, value: Any) -> xr.DataArray:
def _replace(
a: np.ndarray, to_replace: np.ndarray, value: np.ndarray
) -> np.ndarray:
def values_within_range(da, min=None, max=None):
def __init__(self, name):
def __getattr__(self, name):
class MissingOptionalModule:
# Path: imod/formats/idf.py
def header(path, pattern):
def _read(path, headersize, nrow, ncol, nodata, dtype):
def read(path, pattern=None):
def open(path, use_cftime=False, pattern=None):
def _merge_subdomains(pathlists, use_cftime, pattern):
def open_subdomains(path, use_cftime=False, pattern=None):
def open_dataset(globpath, use_cftime=False, pattern=None):
def write(path, a, nodata=1.0e20, dtype=np.float32):
def _as_voxeldata(a):
def save(path, a, nodata=1.0e20, pattern=None, dtype=np.float32):
# Path: imod/tests/test_formats/test_idf.py
import numpy as np
import pytest
import xarray as xr
from pytest import approx
from imod import idf, util
dx, dy = 1.0, -1.0
xmin, xmax = 0.0, 4.0
ymin, ymax = 0.0, 3.0
coords = util._xycoords((xmin, xmax, ymin, ymax), (dx, dy))
assert np.allclose(coords["x"], np.arange(xmin + dx / 2.0, xmax, dx))
assert np.allclose(coords["y"], np.arange(ymax + dy / 2.0, ymin, dy))
assert coords["dx"] == dx
assert coords["dy"] == dy
def test_xycoords_nonequidistant():
dx = np.array([0.9, 1.1, 0.8, 1.2])
dy = np.array([-1.3, -0.7, -1.0])
xmin, xmax = 0.0, 4.0
ymin, ymax = 0.0, 3.0
coords = util._xycoords((xmin, xmax, ymin, ymax), (dx, dy))
assert np.allclose(coords["x"], np.array([0.45, 1.45, 2.4, 3.4]))
assert np.allclose(coords["y"], np.array([2.35, 1.35, 0.5]))
assert coords["dx"][0] == "x"
assert np.allclose(coords["dx"][1], dx)
assert coords["dy"][0] == "y"
assert np.allclose(coords["dy"][1], dy)
def test_xycoords_equidistant_array():
dx = np.array([2.0, 2.0, 2.0, 2.0])
dy = np.array([-0.5, -0.500001, -0.5])
xmin, xmax = 0.0, 8.0
ymin, ymax = 0.0, 1.5
coords = util._xycoords((xmin, xmax, ymin, ymax), (dx, dy))
assert np.allclose(coords["x"], np.arange(xmin + 1.0, xmax, 2.0))
assert np.allclose(coords["y"], np.arange(ymax - 0.25, ymin, -0.5))
assert coords["dx"] == approx(2.0)
assert coords["dy"] == approx(-0.5)
def test_saveopen__nonequidistant(test_da_nonequidistant, tmp_path):
idf.save(tmp_path / "nonequidistant", test_da_nonequidistant)
assert (tmp_path / "nonequidistant.idf").exists()
da = idf.open(tmp_path / "nonequidistant.idf")
assert isinstance(da, xr.DataArray)
assert np.array_equal(da, test_da_nonequidistant)
# since the coordinates are created in float64 and stored in float32,
# we lose some precision, which we have to allow for here
xr.testing.assert_allclose(da, test_da_nonequidistant)
def test_save_topbot__single_layer(test_da, tmp_path):
da = test_da
da = da.assign_coords(z=0.5)
da = da.assign_coords(dz=1.0)
idf.save(tmp_path / "test", da)
da_read = idf.open(tmp_path / "test.idf")
assert da_read["z"] == approx(0.5)
assert da_read["dz"] == approx(1.0)
def test_save_topbot__layers(test_layerda, tmp_path):
da = test_layerda
da = da.assign_coords(z=("layer", np.arange(1.0, 6.0) - 0.5))
idf.save(tmp_path / "layer", da)
da_l1 = idf.open(tmp_path / "layer_l1.idf")
assert da_l1["z"] == approx(0.5)
assert da_l1["dz"] == approx(1.0)
da_l2 = idf.open(tmp_path / "layer_l2.idf")
assert da_l2["z"] == approx(1.5)
assert da_l2["dz"] == approx(1.0)
# Read multiple idfs
actual = idf.open(tmp_path / "layer_l*.idf")
assert np.allclose(actual["z"], da["z"])
assert actual["dz"] == approx(1.0)
def test_save_topbot__layers_nonequidistant(test_layerda, tmp_path):
da = test_layerda
dz = np.arange(-1.0, -6.0, -1.0)
z = np.cumsum(dz) - 0.5 * dz
da = da.assign_coords(z=("layer", z))
da = da.assign_coords(dz=("layer", dz))
idf.save(tmp_path / "layer", da)
# Read multiple idfs
actual = idf.open(tmp_path / "layer_l*.idf")
assert np.allclose(actual["z"], da["z"])
assert np.allclose(actual["dz"], da["dz"])
def test_save_topbot__only_z(test_layerda, tmp_path):
da = test_layerda
da = da.assign_coords(z=("layer", np.arange(1.0, 6.0) - 0.5))
da = da.swap_dims({"layer": "z"})
da = da.drop_vars("layer")
idf.save(tmp_path / "layer", da)
da_l1 = idf.open(tmp_path / "layer_l1.idf")
assert da_l1["z"] == approx(0.5)
assert da_l1["dz"] == approx(1.0)
da_l2 = idf.open(tmp_path / "layer_l2.idf")
assert da_l2["z"] == approx(1.5)
assert da_l2["dz"] == approx(1.0)
def test_save_topbot__errors(test_layerda, tmp_path):
da = test_layerda
# non-equidistant, cannot infer dz
z = np.array([0.0, -1.0, -3.0, -4.5, -5.0])
da = da.assign_coords(z=("layer", z))
with pytest.raises(ValueError):
idf.save(tmp_path / "layer", da)
def test_saveopen_dtype(test_da, tmp_path):
da = test_da
idf.save(tmp_path / "dtype", da, dtype=np.float32)
backda = idf.open(tmp_path / "dtype.idf")
assert backda.dtype == np.float32
idf.save(tmp_path / "dtype", da, dtype=np.float64)
backda = idf.open(tmp_path / "dtype.idf")
assert backda.dtype == np.float64
| def test_dtype_error(test_da, tmp_path): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Dong142857/Live3DPortrait
# Path: torch_utils/misc.py
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
def suppress_tracer_warnings():
def assert_shape(tensor, ref_shape):
def profiled_function(fn):
def decorator(*args, **kwargs):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
def __iter__(self):
def params_and_buffers(module):
def named_params_and_buffers(module):
def copy_params_and_buffers(src_module, dst_module, require_all=False):
def ddp_sync(module, sync):
def check_ddp_consistency(module, ignore_regex=None):
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
def pre_hook(_mod, _inputs):
def post_hook(mod, _inputs, outputs):
class InfiniteSampler(torch.utils.data.Sampler):
# Path: torch_utils/persistence.py
def persistent_class(orig_class):
def __init__(self, *args, **kwargs):
def init_args(self):
def init_kwargs(self):
def __reduce__(self):
def is_persistent(obj):
def import_hook(hook):
def _reconstruct_persistent_obj(meta):
def _module_to_src(module):
def _src_to_module(src):
def _check_pickleable(obj):
def recurse(obj):
class Decorator(orig_class):
# Path: torch_utils/ops/conv2d_gradfix.py
def no_weight_gradients(disable=True):
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
def _should_use_custom_op(input):
def _tuple_of_ints(xs, ndim):
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
def calc_output_padding(input_shape, output_shape):
def forward(ctx, input, weight, bias):
def backward(ctx, grad_output):
def forward(ctx, grad_output, input, weight):
def backward(ctx, grad2_grad_weight):
class Conv2d(torch.autograd.Function):
class Conv2dGradWeight(torch.autograd.Function):
# Path: torch_utils/ops/filtered_lrelu.py
def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
r"""Filtered leaky ReLU for a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Add channel-specific bias if provided (`b`).
2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
3. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
so that the footprint of all output pixels lies within the input image.
5. Multiply each value by the provided gain factor (`gain`).
6. Apply leaky ReLU activation function to each value.
7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
it so that the footprint of all output pixels lies within the input image.
9. Downsample the image by keeping every Nth pixel (`down`).
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float16/float64 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
fu: Float32 upsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
fd: Float32 downsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The length of vector must must match the channel dimension of `x`.
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor. (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
slope: Slope on the negative side of leaky ReLU (default: 0.2).
clamp: Maximum magnitude for leaky ReLU output (default: None).
flip_filter: False = convolution, True = correlation (default: False).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
# Path: torch_utils/ops/bias_act.py
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
# Path: models/eg3d/networks_stylegan3.py
import numpy as np
import scipy.signal
import scipy.optimize
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import filtered_lrelu
from torch_utils.ops import bias_act
return x
def extra_repr(self):
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisInput(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
channels, # Number of output channels.
size, # Output spatial size: int or [width, height].
sampling_rate, # Output sampling rate.
bandwidth, # Output bandwidth.
):
super().__init__()
self.w_dim = w_dim
self.channels = channels
self.size = np.broadcast_to(np.asarray(size), [2])
self.sampling_rate = sampling_rate
self.bandwidth = bandwidth
# Draw random frequencies from uniform 2D disc.
freqs = torch.randn([self.channels, 2])
radii = freqs.square().sum(dim=1, keepdim=True).sqrt()
freqs /= radii * radii.square().exp().pow(0.25)
freqs *= bandwidth
phases = torch.rand([self.channels]) - 0.5
# Setup parameters and buffers.
self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels]))
self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0])
self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image.
self.register_buffer('freqs', freqs)
self.register_buffer('phases', phases)
def forward(self, w):
# Introduce batch dimension.
transforms = self.transform.unsqueeze(0) # [batch, row, col]
freqs = self.freqs.unsqueeze(0) # [batch, channel, xy]
phases = self.phases.unsqueeze(0) # [batch, channel]
# Apply learned transformation.
t = self.affine(w) # t = (r_c, r_s, t_x, t_y)
t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y)
m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image.
m_r[:, 0, 0] = t[:, 0] # r'_c
m_r[:, 0, 1] = -t[:, 1] # r'_s
m_r[:, 1, 0] = t[:, 1] # r'_s
m_r[:, 1, 1] = t[:, 0] # r'_c
m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image.
m_t[:, 0, 2] = -t[:, 2] # t'_x
m_t[:, 1, 2] = -t[:, 3] # t'_y
transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform.
# Transform frequencies.
phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2)
freqs = freqs @ transforms[:, :2, :2]
# Dampen out-of-band frequencies that may occur due to the user-specified transform.
amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1)
# Construct sampling grid.
theta = torch.eye(2, 3, device=w.device)
theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate
theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate
grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False)
# Compute Fourier features.
x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel]
x = x + phases.unsqueeze(1).unsqueeze(2)
x = torch.sin(x * (np.pi * 2))
x = x * amplitudes.unsqueeze(1).unsqueeze(2)
# Apply trainable mapping.
weight = self.weight / np.sqrt(self.channels)
x = x @ weight.t()
# Ensure correct shape.
x = x.permute(0, 3, 1, 2) # [batch, channel, height, width]
misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])])
return x
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},',
f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
is_torgb, # Is this the final ToRGB layer?
is_critically_sampled, # Does this layer use critical sampling?
use_fp16, # Does this layer use FP16?
# Input & output specifications.
in_channels, # Number of input channels.
out_channels, # Number of output channels.
in_size, # Input spatial size: int or [width, height].
out_size, # Output spatial size: int or [width, height].
in_sampling_rate, # Input sampling rate (s).
out_sampling_rate, # Output sampling rate (s).
in_cutoff, # Input cutoff frequency (f_c).
out_cutoff, # Output cutoff frequency (f_c).
in_half_width, # Input transition band half-width (f_h).
out_half_width, # Output Transition band half-width (f_h).
# Hyperparameters.
conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer.
filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling.
lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.
use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers.
conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping.
magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes.
):
super().__init__()
| self.w_dim = w_dim |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: blaise-tk/RVC_CLI
# Path: rvc/lib/utils.py
def load_audio(file, sampling_rate):
try:
file = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sampling_rate)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except Exception as error:
raise RuntimeError(f"Failed to load audio: {error}")
return np.frombuffer(out, np.float32).flatten()
# Path: rvc/lib/infer_pack/models.py
class SynthesizerTrnMs256NSFsid(nn.Module):
def __init__(
self,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
spk_embed_dim,
gin_channels,
sr,
**kwargs
):
super(SynthesizerTrnMs256NSFsid, self).__init__()
if isinstance(sr, str):
sr = sr2sr[sr]
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = float(p_dropout)
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.gin_channels = gin_channels
# self.hop_length = hop_length#
self.spk_embed_dim = spk_embed_dim
self.enc_p = TextEncoder256(
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
float(p_dropout),
)
self.dec = GeneratorNSF(
inter_channels,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=gin_channels,
sr=sr,
is_half=kwargs["is_half"],
)
self.enc_q = PosteriorEncoder(
spec_channels,
inter_channels,
hidden_channels,
5,
1,
16,
gin_channels=gin_channels,
)
self.flow = ResidualCouplingBlock(
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
self.flow.remove_weight_norm()
self.enc_q.remove_weight_norm()
def __prepare_scriptable__(self):
for hook in self.dec._forward_pre_hooks.values():
# The hook we want to remove is an instance of WeightNorm class, so
# normally we would do `if isinstance(...)` but this class is not accessible
# because of shadowing, so we check the module name directly.
# https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.dec)
for hook in self.flow._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.flow)
if hasattr(self, "enc_q"):
for hook in self.enc_q._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.enc_q)
return self
@torch.jit.ignore
def forward(
self,
phone: torch.Tensor,
phone_lengths: torch.Tensor,
pitch: torch.Tensor,
pitchf: torch.Tensor,
y: torch.Tensor,
y_lengths: torch.Tensor,
ds: Optional[torch.Tensor] = None,
): # 这里ds是id,[bs,1]
# print(1,pitch.shape)#[bs,t]
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
z_p = self.flow(z, y_mask, g=g)
z_slice, ids_slice = commons.rand_slice_segments(
z, y_lengths, self.segment_size
)
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
# print(-2,pitchf.shape,z_slice.shape)
o = self.dec(z_slice, pitchf, g=g)
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
@torch.jit.export
def infer(
self,
phone: torch.Tensor,
phone_lengths: torch.Tensor,
pitch: torch.Tensor,
nsff0: torch.Tensor,
sid: torch.Tensor,
rate: Optional[torch.Tensor] = None,
):
g = self.emb_g(sid).unsqueeze(-1)
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
if rate is not None:
assert isinstance(rate, torch.Tensor)
head = int(z_p.shape[2] * (1 - rate.item()))
z_p = z_p[:, :, head:]
x_mask = x_mask[:, :, head:]
nsff0 = nsff0[:, head:]
z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec(z * x_mask, nsff0, g=g)
return o, x_mask, (z, z_p, m_p, logs_p)
# Path: rvc/lib/infer_pack/models.py
class SynthesizerTrnMs256NSFsid_nono(nn.Module):
def __init__(
self,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
spk_embed_dim,
gin_channels,
sr=None,
**kwargs
):
super(SynthesizerTrnMs256NSFsid_nono, self).__init__()
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = float(p_dropout)
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.gin_channels = gin_channels
# self.hop_length = hop_length#
self.spk_embed_dim = spk_embed_dim
self.enc_p = TextEncoder256(
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
float(p_dropout),
f0=False,
)
self.dec = Generator(
inter_channels,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=gin_channels,
)
self.enc_q = PosteriorEncoder(
spec_channels,
inter_channels,
hidden_channels,
5,
1,
16,
gin_channels=gin_channels,
)
self.flow = ResidualCouplingBlock(
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
self.flow.remove_weight_norm()
self.enc_q.remove_weight_norm()
def __prepare_scriptable__(self):
for hook in self.dec._forward_pre_hooks.values():
# The hook we want to remove is an instance of WeightNorm class, so
# normally we would do `if isinstance(...)` but this class is not accessible
# because of shadowing, so we check the module name directly.
# https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.dec)
for hook in self.flow._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.flow)
if hasattr(self, "enc_q"):
for hook in self.enc_q._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.enc_q)
return self
@torch.jit.ignore
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
z_p = self.flow(z, y_mask, g=g)
z_slice, ids_slice = commons.rand_slice_segments(
z, y_lengths, self.segment_size
)
o = self.dec(z_slice, g=g)
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
@torch.jit.export
def infer(
self,
phone: torch.Tensor,
phone_lengths: torch.Tensor,
sid: torch.Tensor,
rate: Optional[torch.Tensor] = None,
):
g = self.emb_g(sid).unsqueeze(-1)
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
if rate is not None:
head = int(z_p.shape[2] * (1.0 - rate.item()))
z_p = z_p[:, :, head:]
x_mask = x_mask[:, :, head:]
z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec(z * x_mask, g=g)
return o, x_mask, (z, z_p, m_p, logs_p)
# Path: rvc/lib/infer_pack/models.py
class SynthesizerTrnMs768NSFsid(nn.Module):
def __init__(
self,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
spk_embed_dim,
gin_channels,
sr,
**kwargs
):
super(SynthesizerTrnMs768NSFsid, self).__init__()
if isinstance(sr, str):
sr = sr
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = float(p_dropout)
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.gin_channels = gin_channels
# self.hop_length = hop_length#
self.spk_embed_dim = spk_embed_dim
self.enc_p = TextEncoder768(
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
float(p_dropout),
)
self.dec = GeneratorNSF(
inter_channels,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=gin_channels,
sr=sr,
is_half=kwargs["is_half"],
)
self.enc_q = PosteriorEncoder(
spec_channels,
inter_channels,
hidden_channels,
5,
1,
16,
gin_channels=gin_channels,
)
self.flow = ResidualCouplingBlock(
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
self.flow.remove_weight_norm()
self.enc_q.remove_weight_norm()
def __prepare_scriptable__(self):
for hook in self.dec._forward_pre_hooks.values():
# The hook we want to remove is an instance of WeightNorm class, so
# normally we would do `if isinstance(...)` but this class is not accessible
# because of shadowing, so we check the module name directly.
# https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.dec)
for hook in self.flow._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.flow)
if hasattr(self, "enc_q"):
for hook in self.enc_q._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.enc_q)
return self
@torch.jit.ignore
def forward(
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
): # 这里ds是id,[bs,1]
# print(1,pitch.shape)#[bs,t]
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
z_p = self.flow(z, y_mask, g=g)
z_slice, ids_slice = commons.rand_slice_segments(
z, y_lengths, self.segment_size
)
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
# print(-2,pitchf.shape,z_slice.shape)
o = self.dec(z_slice, pitchf, g=g)
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
@torch.jit.export
def infer(
self,
phone: torch.Tensor,
phone_lengths: torch.Tensor,
pitch: torch.Tensor,
nsff0: torch.Tensor,
sid: torch.Tensor,
rate: Optional[torch.Tensor] = None,
):
g = self.emb_g(sid).unsqueeze(-1)
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
if rate is not None:
head = int(z_p.shape[2] * (1.0 - rate.item()))
z_p = z_p[:, :, head:]
x_mask = x_mask[:, :, head:]
nsff0 = nsff0[:, head:]
z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec(z * x_mask, nsff0, g=g)
return o, x_mask, (z, z_p, m_p, logs_p)
# Path: rvc/lib/infer_pack/models.py
class SynthesizerTrnMs768NSFsid_nono(nn.Module):
def __init__(
self,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
spk_embed_dim,
gin_channels,
sr=None,
**kwargs
):
super(SynthesizerTrnMs768NSFsid_nono, self).__init__()
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = float(p_dropout)
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.gin_channels = gin_channels
# self.hop_length = hop_length#
self.spk_embed_dim = spk_embed_dim
self.enc_p = TextEncoder768(
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
float(p_dropout),
f0=False,
)
self.dec = Generator(
inter_channels,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=gin_channels,
)
self.enc_q = PosteriorEncoder(
spec_channels,
inter_channels,
hidden_channels,
5,
1,
16,
gin_channels=gin_channels,
)
self.flow = ResidualCouplingBlock(
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
self.flow.remove_weight_norm()
self.enc_q.remove_weight_norm()
def __prepare_scriptable__(self):
for hook in self.dec._forward_pre_hooks.values():
# The hook we want to remove is an instance of WeightNorm class, so
# normally we would do `if isinstance(...)` but this class is not accessible
# because of shadowing, so we check the module name directly.
# https://github.com/pytorch/pytorch/blob/be0ca00c5ce260eb5bcec3237357f7a30cc08983/torch/nn/utils/__init__.py#L3
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.dec)
for hook in self.flow._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.flow)
if hasattr(self, "enc_q"):
for hook in self.enc_q._forward_pre_hooks.values():
if (
hook.__module__ == "torch.nn.utils.weight_norm"
and hook.__class__.__name__ == "WeightNorm"
):
torch.nn.utils.remove_weight_norm(self.enc_q)
return self
@torch.jit.ignore
def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
z_p = self.flow(z, y_mask, g=g)
z_slice, ids_slice = commons.rand_slice_segments(
z, y_lengths, self.segment_size
)
o = self.dec(z_slice, g=g)
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
@torch.jit.export
def infer(
self,
phone: torch.Tensor,
phone_lengths: torch.Tensor,
sid: torch.Tensor,
rate: Optional[torch.Tensor] = None,
):
g = self.emb_g(sid).unsqueeze(-1)
m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
if rate is not None:
head = int(z_p.shape[2] * (1.0 - rate.item()))
z_p = z_p[:, :, head:]
x_mask = x_mask[:, :, head:]
z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec(z * x_mask, g=g)
return o, x_mask, (z, z_p, m_p, logs_p)
# Path: rvc/configs/config.py
class Config:
def __init__(self):
self.device = "cuda:0"
self.is_half = True
self.use_jit = False
self.n_cpu = 0
self.gpu_name = None
self.json_config = self.load_config_json()
self.gpu_mem = None
self.instead = ""
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
@staticmethod
def load_config_json() -> dict:
d = {}
for config_file in version_config_list:
with open(f"rvc/configs/{config_file}", "r") as f:
d[config_file] = json.load(f)
return d
@staticmethod
def has_mps() -> bool:
if not torch.backends.mps.is_available():
return False
try:
torch.zeros(1).to(torch.device("mps"))
return True
except Exception:
return False
@staticmethod
def has_xpu() -> bool:
if hasattr(torch, "xpu") and torch.xpu.is_available():
return True
else:
return False
def use_fp32_config(self):
for config_file in version_config_list:
self.json_config[config_file]["train"]["fp16_run"] = False
with open(f"rvc/configs/{config_file}", "r") as f:
strr = f.read().replace("true", "false")
with open(f"rvc/configs/{config_file}", "w") as f:
f.write(strr)
with open("rvc/train/preprocess/preprocess.py", "r") as f:
strr = f.read().replace("3.7", "3.0")
with open("rvc/train/preprocess/preprocess.py", "w") as f:
f.write(strr)
def device_config(self) -> tuple:
if torch.cuda.is_available():
if self.has_xpu():
self.device = self.instead = "xpu:0"
self.is_half = True
i_device = int(self.device.split(":")[-1])
self.gpu_name = torch.cuda.get_device_name(i_device)
if (
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
or "P40" in self.gpu_name.upper()
or "P10" in self.gpu_name.upper()
or "1060" in self.gpu_name
or "1070" in self.gpu_name
or "1080" in self.gpu_name
):
self.is_half = False
self.use_fp32_config()
self.gpu_mem = int(
torch.cuda.get_device_properties(i_device).total_memory
/ 1024
/ 1024
/ 1024
+ 0.4
)
if self.gpu_mem <= 4:
with open("rvc/train/preprocess/preprocess.py", "r") as f:
strr = f.read().replace("3.7", "3.0")
with open("rvc/train/preprocess/preprocess.py", "w") as f:
f.write(strr)
elif self.has_mps():
print("No supported Nvidia GPU found")
self.device = self.instead = "mps"
self.is_half = False
self.use_fp32_config()
else:
print("No supported Nvidia GPU found")
self.device = self.instead = "cpu"
self.is_half = False
self.use_fp32_config()
if self.n_cpu == 0:
self.n_cpu = cpu_count()
if self.is_half:
x_pad = 3
x_query = 10
x_center = 60
x_max = 65
else:
x_pad = 1
x_query = 6
x_center = 38
x_max = 41
if self.gpu_mem is not None and self.gpu_mem <= 4:
x_pad = 1
x_query = 5
x_center = 30
x_max = 32
return x_pad, x_query, x_center, x_max
# Path: rvc/infer/infer.py
import os
import sys
import torch
import numpy as np
import soundfile as sf
from vc_infer_pipeline import VC
from rvc.lib.utils import load_audio
from fairseq import checkpoint_utils
from rvc.lib.infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from rvc.configs.config import Config
config = Config()
torch.manual_seed(114514)
hubert_model = None
def load_hubert():
global hubert_model
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
["hubert_base.pt"],
suffix="",
)
hubert_model = models[0]
hubert_model = hubert_model.to(config.device)
if config.is_half:
| hubert_model = hubert_model.half() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SubConv/SubConv
# Path: modules/convert/util.py
def RandUserAgent() -> str:
return userAgents[random.randint(0, len(userAgents) - 1)]
# Path: modules/convert/util.py
def get(content):
if content is None:
return ""
else:
return content
# Path: modules/convert/util.py
def uniqueName(names: dict, name):
index = names.get(name)
if index is None:
index = 0
names[name] = index
else:
index += 1
names[name] = index
name = "%s-%02d" % (name, index)
return name
# Path: modules/convert/util.py
def urlSafe(string):
return string.replace("+", "-").replace("/", "_")
# Path: modules/convert/util.py
def base64RawStdDecode(encoded):
return base64.b64decode(
encoded + "="*(-len(encoded)%4)
).decode("utf-8")
# Path: modules/convert/util.py
def base64RawURLDecode(encoded):
return base64.urlsafe_b64decode(
encoded + "="*(-len(encoded)%4)
).decode("utf-8")
# Path: modules/convert/v.py
def handleVShareLink(names: dict, url: urlparse.ParseResult, scheme: str, proxy: dict):
query = dict(urlparse.parse_qsl(url.query))
proxy["name"] = uniqueName(names, urlparse.unquote(url.fragment))
if url.hostname == "":
raise
if url.port == "":
raise
proxy["type"] = scheme
proxy["server"] = url.hostname
proxy["port"] = url.port
proxy["uuid"] = url.username
proxy["udp"] = True
tls = get(query.get("security")).lower()
if tls.endswith("tls") or tls == "reality":
proxy["tls"] = True
fingerprint = get(query.get("fp"))
if fingerprint == "":
proxy["client-fingerprint"] = "chrome"
else:
proxy["client-fingerprint"] = fingerprint
alpn = get(query.get("alpn"))
if alpn != "":
proxy["alpn"] = alpn.split(",")
sni = get(query.get("sni"))
if sni != "":
proxy["servername"] = sni
realityPublicKey = get(query.get("pbk"))
if realityPublicKey != "":
proxy["reality-opts"] = {
"public-key": realityPublicKey,
"short-id": get(query.get("sid"))
}
switch = get(query.get("packetEncoding"))
if switch == "none" or switch == "":
pass
elif switch == "packet":
proxy["packet-addr"] = True
else:
proxy["xudp"] = True
network = get(query.get("type")).lower()
if network == "":
network = "tcp"
fakeType = get(query.get("headerType")).lower()
if fakeType == "http":
network = "http"
elif network == "http":
network = "h2"
proxy["network"] = network
if network == "tcp":
if fakeType != "none" and fakeType != "":
headers = {}
httpOpts = {}
httpOpts["path"] = "/"
host = get(query.get("host"))
if host != "":
headers["Host"] = str(host)
method = get(query.get("method"))
if method != "":
httpOpts["method"] = method
path = get(query.get("path"))
if path != "":
httpOpts["path"] = str(path)
httpOpts["headers"] = headers
proxy["http-opts"] = httpOpts
elif network == "http":
headers = {}
h2Opts = {}
h2Opts["path"] = "/"
path = get(query.get("path"))
if path != "":
h2Opts["path"] = str(path)
host = get(query.get("host"))
if host != "":
h2Opts["host"] = str(host)
h2Opts["headers"] = headers
proxy["h2-opts"] = h2Opts
elif network == "ws":
headers = {}
wsOpts = {}
headers["User-Agent"] = RandUserAgent()
headers["Host"] = get(query.get("host"))
wsOpts["path"] = get(query.get("path"))
wsOpts["headers"] = headers
earlyData = get(query.get("ed"))
if earlyData != "":
try:
med = int(earlyData)
except:
raise
wsOpts["max-early-data"] = med
earlyDataHeader = get(query.get("edh"))
if earlyDataHeader != "":
wsOpts["early-data-header-name"] = earlyDataHeader
proxy["ws-opts"] = wsOpts
elif network == "grpc":
grpcOpts = {}
grpcOpts["grpc-service-name"] = get(query.get("serviceName"))
proxy["grpc-opts"] = grpcOpts
# Path: modules/convert/converter.py
from modules.convert.util import RandUserAgent
from modules.convert.util import get
from modules.convert.util import uniqueName
from modules.convert.util import urlSafe
from modules.convert.util import base64RawStdDecode
from modules.convert.util import base64RawURLDecode
from modules.convert.v import handleVShareLink
import json
import base64
import urllib.parse as urlparse
import distutils.util
trojan["client-fingerprint"] = "chrome"
else:
trojan["client-fingerprint"] = fingerprint
proxies.append(trojan)
elif scheme == "vless":
try:
urlVless = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlVless.query))
vless = {}
try:
handleVShareLink(names, urlVless, scheme, vless)
except:
continue
flow = get(query.get("flow"))
if flow != "":
vless["flow"] = str(flow).lower()
proxies.append(vless)
elif scheme == "vmess":
try:
dcBuf = base64.b64decode(body)
except:
# Xray VMessAEAD share link
try:
urlVMess = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlVMess.query))
vmess = {}
try:
handleVShareLink(names, urlVMess, scheme, vmess)
except:
continue
vmess["alterId"] = 0
vmess["cipher"] = "auto"
encryption = get(query.get("encryption"))
if encryption != "":
vmess["cipher"] = encryption
proxies.append(vmess)
continue
values = {}
try:
values = json.loads(dcBuf)
except:
continue
try:
tempName = values["ps"]
except:
continue
name = uniqueName(names, tempName)
vmess = {}
vmess["name"] = name
vmess["type"] = scheme
vmess["server"] = values["add"]
vmess["port"] = values["port"]
vmess["uuid"] = values["id"]
alterId = values.get("aid")
if alterId is not None:
vmess["alterId"] = alterId
else:
vmess["alterId"] = 0
vmess["udp"] = True
vmess["xudp"] = True
vmess["tls"] = False
vmess["skip-cert-verify"] = False
vmess["cipher"] = "auto"
cipher = get(values.get("scy"))
if cipher != "":
vmess["cipher"] = cipher
sni = get(values.get("sni"))
if sni != "":
vmess["servername"] = sni
network = get(values.get("net")).lower()
if values.get("type") == "http":
network = "http"
elif network == "http":
network = "h2"
vmess["network"] = network
tls = values.get("tls")
if tls is not None:
tls = str(tls).lower()
if tls.endswith("tls"):
vmess["tls"] = True
alpn = values.get("alpn")
if alpn is not None and alpn != "":
vmess["alpn"] = alpn.split(",")
if network == "http":
headers = {}
httpOpts = {}
host = get(values.get("host"))
if host != "":
headers["Host"] = host
httpOpts["path"] = "/"
path = get(values.get("path"))
if path != "":
httpOpts["path"] = path
httpOpts["headers"] = headers
vmess["http-opts"] = httpOpts
elif network == "h2":
headers = {}
h2Opts = {}
host = get(values.get("host"))
if host != "":
headers["Host"] = host
h2Opts["path"] = get(values.get("path"))
| h2Opts["headers"] = headers |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Opt-Mucca/PySCIPOpt-ML
# Path: src/pyscipopt_ml/modelling/base_predictor_constraint.py
class AbstractPredictorConstr(ABC):
"""Base class to store all information of embedded ML model by :py:func`pyscipopt_ml.add_predictor_constr`.
This class is the base class to store everything that is added to
a SCIP model when a trained predictor is inserted into it. Depending on
the type of the predictor, a class derived from it will be returned
by :py:func:`pyscipopt_ml.add_predictor_constr`.
Warning
-------
Users should usually never construct objects of this class or one of its derived
classes. They are returned by the :py:func:`pyscipopt_ml.add_predictor_constr` and
other functions.
"""
def __init__(
self, scip_model, input_vars, output_vars=None, unique_naming_prefix="", **kwargs
):
self.scip_model = scip_model
self.unique_naming_prefix = unique_naming_prefix
self._validate(input_vars, output_vars)
self._created_vars = []
self._created_cons = []
self._build_predictor_model(**kwargs)
def _validate(self, input_vars, output_vars=None):
"""Validate input and output variables (check shapes, reshape if needed)."""
# Ensure the correct type of input and output is given
if type(input_vars) not in [list, np.ndarray]:
raise ParameterError(
f"Input variables are not type list or np.ndarray. They are type {type(input_vars)}."
)
if output_vars is not None:
if not isinstance(output_vars, list) and not isinstance(output_vars, np.ndarray):
raise ParameterError(
f"Output variables are not type list or np.ndarray. They are type {type(output_vars)}."
)
# Transform the type list to type np.ndarray
if isinstance(input_vars, list):
input_vars = np.array(input_vars, dtype=object)
if isinstance(output_vars, list):
output_vars = np.array(output_vars, dtype=object)
# Change the dimension of the input variables if needed. (Always want number of data points first)
if input_vars.ndim == 1:
input_vars = input_vars.reshape((1, -1))
if input_vars.ndim >= 3:
input_vars = input_vars.reshape((input_vars.shape[0], -1))
# In the case of the output being None, create the appropriate output variables here
if output_vars is None:
output_vars = self._create_output_vars(input_vars)
# Change the dimensions of the output variables if needed (Always want the number of data points first)
if output_vars.ndim == 1:
if input_vars.shape[0] == 1:
output_vars = output_vars.reshape((1, -1))
else:
output_vars = output_vars.reshape((-1, 1))
# Ensure that the variable dimensions match that of the predictor
if hasattr(self, "input_size") and input_vars.shape[-1] != self.input_size:
raise ParameterError(
f"Input variables dimension don't conform with predictor {type(self)} "
+ f"Input variable dimensions: {input_vars.shape[-1]} != {self.input_size}"
)
if hasattr(self, "output_size") and output_vars.shape[-1] != self.output_size:
raise ParameterError(
f"Output variable dimensions don't conform with predictor {type(self)} "
+ f"Output variable dimensions: {output_vars.shape[-1]} != {self.output_size}"
)
if output_vars.shape[0] != input_vars.shape[0]:
raise ParameterError(
"Non-conforming dimension between input variables and output variables: "
+ f"{output_vars.shape[0]} != {input_vars.shape[0]}"
)
self._input = input_vars
self._output = output_vars
def _build_predictor_model(self, **kwargs):
self._mip_model(**kwargs)
def print_stats(self, file=None):
"""Print statistics on model additions stored by this class.
This function prints detailed statistics on the variables
and constraints that were added to the model.
Arguments
---------
file: None, optional
Text stream to which output should be redirected. By default, this is sys.stdout.
"""
n_indicator_cons = 0
n_sos_cons = 0
n_linear_cons = 0
created_cons = self._created_cons
created_vars = self._created_vars
if hasattr(self, "_estimators"):
for estimator in self._estimators:
created_cons += estimator._created_cons
created_vars += estimator._created_vars
if hasattr(self, "_layers"):
for layer in self._layers:
created_cons += layer._created_cons
created_vars += layer._created_vars
for cons_set in created_cons:
it = np.nditer(cons_set, flags=["multi_index", "refs_ok"])
for _ in it:
if isinstance(cons_set[it.multi_index], Constraint):
cons_type = cons_set[it.multi_index].getConshdlrName()
if cons_type == "indicator":
n_indicator_cons += 1
elif cons_type == "SOS1":
n_sos_cons += 1
elif cons_type == "linear":
n_linear_cons += 1
else:
raise TypeError(
f"Cons {cons_set[it.multi_index]} is of unknown type {cons_type}"
)
n_bin_vars = 0
n_cont_vars = 0
for var_set in created_vars:
it = np.nditer(var_set, flags=["multi_index", "refs_ok"])
for _ in it:
if isinstance(var_set[it.multi_index], Variable):
var_type = var_set[it.multi_index].vtype()
if var_type == "BINARY":
n_bin_vars += 1
elif var_type == "CONTINUOUS":
n_cont_vars += 1
else:
raise TypeError(
f"Var {var_set[it.multi_index]} is of unknown type {var_type}"
)
print(
f"Constraints created:\n Linear {n_linear_cons}\n Indicator {n_indicator_cons}\n "
f"SOS1 {n_sos_cons}\n"
f"Created (internal) variables:\n Binary {n_bin_vars}\n Continuous {n_cont_vars}\n"
f"Input Shape: {self.input.shape}\nOutput Shape: {self.output.shape}",
file=file,
)
def _create_output_vars(self, input_vars):
"""May be defined in derived class to create the output variables of predictor."""
if (not hasattr(self, "_output") or self._output is None) and (
not hasattr(self, "output_size") or self.output_size is None
):
raise AttributeError
if not hasattr(self, "_output") or self._output is None:
if hasattr(self, "classification"):
if self.classification:
vtype = "B"
else:
vtype = "C"
else:
vtype = "C"
output_vars = create_vars(
self.scip_model,
(input_vars.shape[0], self.output_size),
vtype,
lb=None,
ub=None,
name_prefix="out",
)
return output_vars
else:
return self._output
@property
def _has_solution(self):
"""Returns true if we have a solution."""
if self.scip_model.getNSols() > 0:
return True
return False
@abstractmethod
def get_error(self, eps):
"""Returns error in SCIP's solution with respect to prediction from input.
Returns
-------
error : ndarray of same shape as
:py:attr:`pyscipopt_ml.modelling.base_predictor_constr.AbstractPredictorConstr.output`
Assuming that we have a solution for the input and output variables
`x, y`. Returns the absolute value of the differences between `predictor.predict(x)` and
`y`. Where predictor is the regression / classification model represented by this object.
Raises
------
NoSolution
If the SCIP model has no solution (either was not optimized or is infeasible).
"""
...
@abstractmethod
def _mip_model(self, **kwargs):
"""Makes MIP model for the predictor."""
...
@property
def input(self):
"""Returns the input variables of embedded predictor.
Returns
-------
output : np.ndarray
"""
return self._input
@property
def output(self):
"""Output variables of embedded predictor.
Returns
-------
output : np.ndarray
"""
return self._output
@property
def input_values(self):
"""Returns the values for the input variables if a solution is known.
Returns
-------
input_vals : np.ndarray
Raises
------
NoSolution
If SCIP has no solution (either was not optimized or is infeasible).
"""
if not self._has_solution:
raise NoSolution
input_vals = np.zeros(self.input.shape)
for i in range(self.input.shape[0]):
for j in range(self.input.shape[1]):
input_vals[i][j] = self.scip_model.getVal(self.input[i][j])
return input_vals
@property
def output_values(self):
"""Returns the values for the output variables if a solution is known.
Returns
-------
output_value : np.ndarray
Raises
------
NoSolution
If SCIP has no solution (either was not optimized or is infeasible).
"""
if not self._has_solution:
raise NoSolution
output_vals = np.zeros(self.output.shape)
for i in range(self.output.shape[0]):
for j in range(self.output.shape[1]):
output_vals[i][j] = self.scip_model.getVal(self.output[i][j])
return output_vals
def __str__(self):
return self._name
# Path: src/pyscipopt_ml/modelling/gradient_boosting/aggregate_tree_model.py
def aggregated_estimator_formulation(
scip_model,
_input,
output,
tree_vars,
trees,
constant,
lr,
n_estimators,
unique_naming_prefix,
epsilon,
aggr,
classification,
**kwargs,
):
"""
Creates the model that represents the aggregation of estimators into a single output.
This function is used exclusively for the case where the estimators are decision trees, and the larger
predictor is either a gradient boosting decision tree or random forest.
Parameters
----------
scip_model : PySCIPOpt Model
The SCIP Model where the predictor should be inserted.
_input : np.ndarray
The input variables that are passed to each decision tree
output : np.ndarray
The output variables of the predictor
tree_vars : np.ndarray
The PySCIPOpt variables that have been created to represent the output of each decision tree (i.e. estimator)
trees : list
A list of lists containing dictionary information that completely describe each decision tree (i.e. estimator)
constant : np.ndarray
An array of constant shift values that are added to the output values of each decision tree (i.e. estimator)
lr : float or int
The learning rate used while training. For GBDT / RF this scales the output of each tree
n_estimators : int
The number of decision trees (i.e. estimators)
unique_naming_prefix : str
The unique naming prefix string that goes before all variables and constraints that are constructed by SCIP
epsilon : float
The epsilon that is used for each decision tree model. See
:py:func:`pyscipopt_ml.modelling.decision_tree.leaf_formulation`.
aggr : str, "sum" or "avg"
The aggregation method used in the formulation. Either the estimators are averages or summed.
classification : bool
Whether the aggregated output of each decision tree (i.e. estimator) should be used for classification.
Returns
-------
estimators : list
A list of :py:class`pyscipopt_ml.modelling.aggregate_tree_model.TreeEstimator`
created_vars : list
A list containing all created PySCIPOpt vars
created_cons : list
A list containing all created PySCIPOpt cons
"""
# Get the number of samples and output dimension
n_samples = _input.shape[0]
outdim = output.shape[-1]
# Create the individual tree estimators
estimators = create_tree_estimators(
scip_model,
_input,
tree_vars,
trees,
n_estimators,
outdim,
unique_naming_prefix,
epsilon,
False,
**kwargs,
)
# Aggregate the trees over the output dimension
aggregate_tree_output = aggregate_estimator_outputs(tree_vars, lr, constant, aggr=aggr)
# Formulate the appropriate constraints
created_vars, created_cons = create_aggregation_constraints(
scip_model,
aggregate_tree_output,
output,
n_samples,
outdim,
unique_naming_prefix,
classification,
)
return estimators, created_vars, created_cons
# Path: src/pyscipopt_ml/lightgbm/lgbgetter.py
class LGBgetter(AbstractPredictorConstr):
"""Utility class for lightgbm models convertors.
Implement some common functionalities: check predictor is fitted, output dimension, get error
Attributes
----------
predictor
Lightgbm predictor embedded into SCIP model.
"""
def __init__(self, predictor, input_vars, output_type="regular", **kwargs):
if not hasattr(predictor, "booster_"):
raise ParameterError(
"LightGBM model has not yet been fitted. There is nothing to model."
)
if predictor.boosting_type not in ["gbdt", "rf"]:
raise NoModel(
predictor,
f"There is only support for LightGBM boosting type gbdt and rf. "
f"Not {predictor.boosting_type}",
)
self.predictor = predictor
def extract_raw_data_and_create_tree_vars(self, epsilon=0.0):
"""
Function for extracting information from lgb._Booster and creating additional modelling variables.
Parameters
----------
epsilon : float, optional
Small value used to impose strict inequalities for splitting nodes in
MIP formulations.
Returns
-------
trees : list
A list of tree dictionaries similar in structure to that provided by SKlearn
tree_vars : np.ndarray
A numpy array filled with variables that represent output of trees from the trained LightGBM model
"""
n_samples = self.input.shape[0]
outdim = self.output.shape[1]
# Extract the raw data
raw = self.predictor.booster_.dump_model()
n_features = self.predictor.n_features_
n_trees = len(raw["tree_info"])
n_estimators = self.predictor.n_estimators_
def read_node(tree, tree_structure):
# Increase the node count and keep track of the current node index
node_idx = tree["node_count"]
tree["node_count"] += 1
# Add features specific to the node of the tree
if "split_feature" in tree_structure:
tree["feature"].append(tree_structure["split_feature"])
else:
tree["feature"].append(-1)
if "threshold" in tree_structure:
tree["threshold"].append(tree_structure["threshold"])
else:
tree["threshold"].append(-1)
if "internal_value" in tree_structure:
tree["value"].append([[tree_structure["internal_value"]]])
else:
tree["value"].append([[tree_structure["leaf_value"]]])
if "decision_type" in tree_structure and tree_structure["decision_type"] != "<=":
raise ParameterError(
f"Currently no support for LightGBM trees with decision type "
f"{tree_structure['decision_type']}"
)
# Add in dummy left and right children
tree["children_left"].append(-1)
tree["children_right"].append(-1)
# Now recursively call a new node
if "left_child" in tree_structure:
tree["children_left"][node_idx] = tree["node_count"]
tree = read_node(tree, tree_structure["left_child"])
if "right_child" in tree_structure:
tree["children_right"][node_idx] = tree["node_count"]
tree = read_node(tree, tree_structure["right_child"])
return tree
trees = [
[
{
"node_count": 0,
"n_features": n_features,
"children_left": [],
"children_right": [],
"feature": [],
"threshold": [],
"value": [],
}
for _ in range(outdim)
]
for _ in range(n_estimators)
]
trees_converted = [0 for _ in range(outdim)]
for i in range(n_trees):
tree_raw = raw["tree_info"][i]["tree_structure"]
class_idx = i % outdim
tree_idx = trees_converted[class_idx]
trees[tree_idx][class_idx] = read_node(trees[tree_idx][class_idx], tree_raw)
trees[tree_idx][class_idx]["children_left"] = np.array(
trees[tree_idx][class_idx]["children_left"], dtype=np.int32
)
trees[tree_idx][class_idx]["children_right"] = np.array(
trees[tree_idx][class_idx]["children_right"], dtype=np.int32
)
trees[tree_idx][class_idx]["feature"] = np.array(
trees[tree_idx][class_idx]["feature"], dtype=np.int32
)
trees[tree_idx][class_idx]["threshold"] = np.array(
trees[tree_idx][class_idx]["threshold"], dtype=np.float64
)
trees[tree_idx][class_idx]["value"] = np.array(
trees[tree_idx][class_idx]["value"], dtype=np.float64
)
trees_converted[class_idx] += 1
shape = (n_samples, n_estimators, outdim)
tree_vars = create_vars(
self.scip_model, shape=shape, vtype="C", lb=None, ub=None, name_prefix="tree"
)
return trees, tree_vars
def get_error(self, eps=None):
"""
Returns error in SCIP's solution with respect to the actual output of the trained predictor
Parameters
----------
eps : float or int or None, optional
The maximum allowed tolerance for a mismatch between the actual predictive model and SCIP.
If the error is larger than eps an appropriate warning is printed
Returns
-------
error: np.ndarray
The absolute values of the difference between SCIP's solution and the trained ML model's output given
the input as defined by SCIP. The matrix is the same dimension as the output of the trained predictor.
Using sklearn / pyscipopt, the absolute difference between model.predict(input) and scip.getVal(output).
Raises
------
NoSolution
If SCIP has no solution (either was not optimized or is infeasible).
"""
if self._has_solution:
if not is_classifier(self.predictor):
lgb_output_values = self.predictor.predict(self.input_values).reshape(
self.input.shape[0], self.output.shape[-1]
)
else:
lgb_class_prediction = self.predictor.predict(self.input_values)
lgb_output_values = np.zeros((self.input.shape[0], self.output.shape[-1]))
for i, class_pred in enumerate(lgb_class_prediction):
lgb_output_values[i][class_pred] = 1
scip_output_values = self.output_values
error = np.abs(lgb_output_values - scip_output_values)
max_error = np.max(error)
if eps is not None and max_error > eps:
print(
f"SCIP output values of ML model {self.predictor} have larger than max error {max_error} > {eps}"
)
return error
raise NoSolution()
# Path: src/pyscipopt_ml/lightgbm/lightgbm_constr.py
import numpy as np
from ..modelling import AbstractPredictorConstr
from ..modelling.gradient_boosting import aggregated_estimator_formulation
from .lgbgetter import LGBgetter
"""Module for formulating a LightGBM gradient boosting or random forest regressor / classifier into
a PySCIPOpt Model."""
def add_lgbregressor_constr(
scip_model,
lightgbm_regressor,
input_vars,
| output_vars=None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Yanyutin753/CowAndPandoraNext
# Path: bridge/context.py
class ContextType(Enum):
TEXT = 1 # 文本消息
VOICE = 2 # 音频消息
IMAGE = 3 # 图片消息
IMAGE_CREATE = 10 # 创建图片命令
JOIN_GROUP = 20 # 加入群聊
PATPAT = 21 # 拍了拍
def __str__(self):
return self.name
# Path: bridge/reply.py
class Reply:
def __init__(self, type: ReplyType = None, content=None):
self.type = type
self.content = content
def __str__(self):
return "Reply(type={}, content={})".format(self.type, self.content)
# Path: bridge/reply.py
class ReplyType(Enum):
TEXT = 1 # 文本
VOICE = 2 # 音频文件
IMAGE = 3 # 图片文件
IMAGE_URL = 4 # 图片URL
INFO = 9
ERROR = 10
def __str__(self):
return self.name
# Path: config.py
class Config(dict):
def __init__(self, d=None):
def __getitem__(self, key):
def __setitem__(self, key, value):
def get(self, key, default=None):
def get_user_data(self, user) -> dict:
def load_user_datas(self):
def save_user_datas(self):
def load_config():
def get_root():
def read_file(path):
def conf():
def get_appdata_dir():
def subscribe_msg():
def write_plugin_config(pconf: dict):
def pconf(plugin_name: str) -> dict:
# Path: plugins/linkai/midjourney.py
class MJBot:
def __init__(self, config):
self.base_url = conf().get("linkai_api_base", "https://api.link-ai.chat") + "/v1/img/midjourney"
self.headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
self.config = config
self.tasks = {}
self.temp_dict = {}
self.tasks_lock = threading.Lock()
self.event_loop = asyncio.new_event_loop()
def judge_mj_task_type(self, e_context: EventContext):
"""
判断MJ任务的类型
:param e_context: 上下文
:return: 任务类型枚举
"""
if not self.config:
return None
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
context = e_context['context']
if context.type == ContextType.TEXT:
cmd_list = context.content.split(maxsplit=1)
if cmd_list[0].lower() == f"{trigger_prefix}mj":
return TaskType.GENERATE
elif cmd_list[0].lower() == f"{trigger_prefix}mju":
return TaskType.UPSCALE
elif cmd_list[0].lower() == f"{trigger_prefix}mjv":
return TaskType.VARIATION
elif cmd_list[0].lower() == f"{trigger_prefix}mjr":
return TaskType.RESET
elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix"):
return TaskType.GENERATE
def process_mj_task(self, mj_type: TaskType, e_context: EventContext):
"""
处理mj任务
:param mj_type: mj任务类型
:param e_context: 对话上下文
"""
context = e_context['context']
session_id = context["session_id"]
cmd = context.content.split(maxsplit=1)
if len(cmd) == 1 and context.type == ContextType.TEXT:
# midjourney 帮助指令
self._set_reply_text(self.get_help_text(verbose=True), e_context, level=ReplyType.INFO)
return
if len(cmd) == 2 and (cmd[1] == "open" or cmd[1] == "close"):
# midjourney 开关指令
is_open = True
tips_text = "开启"
if cmd[1] == "close":
tips_text = "关闭"
is_open = False
self.config["enabled"] = is_open
self._set_reply_text(f"Midjourney绘画已{tips_text}", e_context, level=ReplyType.INFO)
return
if not self.config.get("enabled"):
logger.warn("Midjourney绘画未开启,请查看 plugins/linkai/config.json 中的配置")
self._set_reply_text(f"Midjourney绘画未开启", e_context, level=ReplyType.INFO)
return
if not self._check_rate_limit(session_id, e_context):
logger.warn("[MJ] midjourney task exceed rate limit")
return
if mj_type == TaskType.GENERATE:
if context.type == ContextType.IMAGE_CREATE:
raw_prompt = context.content
else:
# 图片生成
raw_prompt = cmd[1]
reply = self.generate(raw_prompt, session_id, e_context)
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
elif mj_type == TaskType.UPSCALE or mj_type == TaskType.VARIATION:
# 图片放大/变换
clist = cmd[1].split()
if len(clist) < 2:
self._set_reply_text(f"{cmd[0]} 命令缺少参数", e_context)
return
img_id = clist[0]
index = int(clist[1])
if index < 1 or index > 4:
self._set_reply_text(f"图片序号 {index} 错误,应在 1 至 4 之间", e_context)
return
key = f"{str(mj_type)}_{img_id}_{index}"
if self.temp_dict.get(key):
self._set_reply_text(f"第 {index} 张图片已经{task_name_mapping.get(str(mj_type))}过了", e_context)
return
# 执行图片放大/变换操作
reply = self.do_operate(mj_type, session_id, img_id, e_context, index)
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
elif mj_type == TaskType.RESET:
# 图片重新生成
clist = cmd[1].split()
if len(clist) < 1:
self._set_reply_text(f"{cmd[0]} 命令缺少参数", e_context)
return
img_id = clist[0]
# 图片重新生成
reply = self.do_operate(mj_type, session_id, img_id, e_context)
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
else:
self._set_reply_text(f"暂不支持该命令", e_context)
def generate(self, prompt: str, user_id: str, e_context: EventContext) -> Reply:
"""
图片生成
:param prompt: 提示词
:param user_id: 用户id
:param e_context: 对话上下文
:return: 任务ID
"""
logger.info(f"[MJ] image generate, prompt={prompt}")
mode = self._fetch_mode(prompt)
body = {"prompt": prompt, "mode": mode, "auto_translate": self.config.get("auto_translate")}
if not self.config.get("img_proxy"):
body["img_proxy"] = False
res = requests.post(url=self.base_url + "/generate", json=body, headers=self.headers, timeout=(5, 40))
if res.status_code == 200:
res = res.json()
logger.debug(f"[MJ] image generate, res={res}")
if res.get("code") == 200:
task_id = res.get("data").get("task_id")
real_prompt = res.get("data").get("real_prompt")
if mode == TaskMode.RELAX.value:
time_str = "1~10分钟"
else:
time_str = "1分钟"
content = f"🚀您的作品将在{time_str}左右完成,请耐心等待\n- - - - - - - - -\n"
if real_prompt:
content += f"初始prompt: {prompt}\n转换后prompt: {real_prompt}"
else:
content += f"prompt: {prompt}"
reply = Reply(ReplyType.INFO, content)
task = MJTask(id=task_id, status=Status.PENDING, raw_prompt=prompt, user_id=user_id,
task_type=TaskType.GENERATE)
# put to memory dict
self.tasks[task.id] = task
# asyncio.run_coroutine_threadsafe(self.check_task(task, e_context), self.event_loop)
self._do_check_task(task, e_context)
return reply
else:
res_json = res.json()
logger.error(f"[MJ] generate error, msg={res_json.get('message')}, status_code={res.status_code}")
if res.status_code == INVALID_REQUEST:
reply = Reply(ReplyType.ERROR, "图片生成失败,请检查提示词参数或内容")
else:
reply = Reply(ReplyType.ERROR, "图片生成失败,请稍后再试")
return reply
def do_operate(self, task_type: TaskType, user_id: str, img_id: str, e_context: EventContext,
index: int = None) -> Reply:
logger.info(f"[MJ] image operate, task_type={task_type}, img_id={img_id}, index={index}")
body = {"type": task_type.name, "img_id": img_id}
if index:
body["index"] = index
if not self.config.get("img_proxy"):
body["img_proxy"] = False
res = requests.post(url=self.base_url + "/operate", json=body, headers=self.headers, timeout=(5, 40))
logger.debug(res)
if res.status_code == 200:
res = res.json()
if res.get("code") == 200:
task_id = res.get("data").get("task_id")
logger.info(f"[MJ] image operate processing, task_id={task_id}")
icon_map = {TaskType.UPSCALE: "🔎", TaskType.VARIATION: "🪄", TaskType.RESET: "🔄"}
content = f"{icon_map.get(task_type)}图片正在{task_name_mapping.get(task_type.name)}中,请耐心等待"
reply = Reply(ReplyType.INFO, content)
task = MJTask(id=task_id, status=Status.PENDING, user_id=user_id, task_type=task_type)
# put to memory dict
self.tasks[task.id] = task
key = f"{task_type.name}_{img_id}_{index}"
self.temp_dict[key] = True
# asyncio.run_coroutine_threadsafe(self.check_task(task, e_context), self.event_loop)
self._do_check_task(task, e_context)
return reply
else:
error_msg = ""
if res.status_code == NOT_FOUND_ORIGIN_IMAGE:
error_msg = "请输入正确的图片ID"
res_json = res.json()
logger.error(f"[MJ] operate error, msg={res_json.get('message')}, status_code={res.status_code}")
reply = Reply(ReplyType.ERROR, error_msg or "图片生成失败,请稍后再试")
return reply
def check_task_sync(self, task: MJTask, e_context: EventContext):
logger.debug(f"[MJ] start check task status, {task}")
max_retry_times = 90
while max_retry_times > 0:
time.sleep(10)
url = f"{self.base_url}/tasks/{task.id}"
try:
res = requests.get(url, headers=self.headers, timeout=8)
if res.status_code == 200:
res_json = res.json()
logger.debug(f"[MJ] task check res sync, task_id={task.id}, status={res.status_code}, "
f"data={res_json.get('data')}, thread={threading.current_thread().name}")
if res_json.get("data") and res_json.get("data").get("status") == Status.FINISHED.name:
# process success res
if self.tasks.get(task.id):
self.tasks[task.id].status = Status.FINISHED
self._process_success_task(task, res_json.get("data"), e_context)
return
max_retry_times -= 1
else:
res_json = res.json()
logger.warn(f"[MJ] image check error, status_code={res.status_code}, res={res_json}")
max_retry_times -= 20
except Exception as e:
max_retry_times -= 20
logger.warn(e)
logger.warn("[MJ] end from poll")
if self.tasks.get(task.id):
self.tasks[task.id].status = Status.EXPIRED
def _do_check_task(self, task: MJTask, e_context: EventContext):
threading.Thread(target=self.check_task_sync, args=(task, e_context)).start()
def _process_success_task(self, task: MJTask, res: dict, e_context: EventContext):
"""
处理任务成功的结果
:param task: MJ任务
:param res: 请求结果
:param e_context: 对话上下文
"""
# channel send img
task.status = Status.FINISHED
task.img_id = res.get("img_id")
task.img_url = res.get("img_url")
logger.info(f"[MJ] task success, task_id={task.id}, img_id={task.img_id}, img_url={task.img_url}")
# send img
reply = Reply(ReplyType.IMAGE_URL, task.img_url)
channel = e_context["channel"]
_send(channel, reply, e_context["context"])
# send info
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
text = ""
if task.task_type == TaskType.GENERATE or task.task_type == TaskType.VARIATION or task.task_type == TaskType.RESET:
text = f"🎨绘画完成!\n"
if task.raw_prompt:
text += f"prompt: {task.raw_prompt}\n"
text += f"- - - - - - - - -\n图片ID: {task.img_id}"
text += f"\n\n🔎使用 {trigger_prefix}mju 命令放大图片\n"
text += f"例如:\n{trigger_prefix}mju {task.img_id} 1"
text += f"\n\n🪄使用 {trigger_prefix}mjv 命令变换图片\n"
text += f"例如:\n{trigger_prefix}mjv {task.img_id} 1"
text += f"\n\n🔄使用 {trigger_prefix}mjr 命令重新生成图片\n"
text += f"例如:\n{trigger_prefix}mjr {task.img_id}"
reply = Reply(ReplyType.INFO, text)
_send(channel, reply, e_context["context"])
self._print_tasks()
return
def _check_rate_limit(self, user_id: str, e_context: EventContext) -> bool:
"""
midjourney任务限流控制
:param user_id: 用户id
:param e_context: 对话上下文
:return: 任务是否能够生成, True:可以生成, False: 被限流
"""
tasks = self.find_tasks_by_user_id(user_id)
task_count = len([t for t in tasks if t.status == Status.PENDING])
if task_count >= self.config.get("max_tasks_per_user"):
reply = Reply(ReplyType.INFO, "您的Midjourney作图任务数已达上限,请稍后再试")
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
return False
task_count = len([t for t in self.tasks.values() if t.status == Status.PENDING])
if task_count >= self.config.get("max_tasks"):
reply = Reply(ReplyType.INFO, "Midjourney作图任务数已达上限,请稍后再试")
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
return False
return True
def _fetch_mode(self, prompt) -> str:
mode = self.config.get("mode")
if "--relax" in prompt or mode == TaskMode.RELAX.value:
return TaskMode.RELAX.value
return mode or TaskMode.FAST.value
def _run_loop(self, loop: asyncio.BaseEventLoop):
"""
运行事件循环,用于轮询任务的线程
:param loop: 事件循环
"""
loop.run_forever()
loop.stop()
def _print_tasks(self):
for id in self.tasks:
logger.debug(f"[MJ] current task: {self.tasks[id]}")
def _set_reply_text(self, content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
"""
设置回复文本
:param content: 回复内容
:param e_context: 对话上下文
:param level: 回复等级
"""
reply = Reply(level, content)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
def get_help_text(self, verbose=False, **kwargs):
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
help_text = "🎨利用Midjourney进行画图\n\n"
if not verbose:
return help_text
help_text += f" - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n - 变换: mjv 图片ID 图片序号\n - 重置: mjr 图片ID"
help_text += f"\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 11055927171882 2\""
help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\""
return help_text
def find_tasks_by_user_id(self, user_id) -> list:
result = []
with self.tasks_lock:
now = time.time()
for task in self.tasks.values():
if task.status == Status.PENDING and now > task.expiry_time:
task.status = Status.EXPIRED
logger.info(f"[MJ] {task} expired")
if task.user_id == user_id:
result.append(task)
return result
# Path: bridge/bridge.py
class Bridge(object):
def __init__(self):
def get_bot(self, typename):
def get_bot_type(self, typename):
def fetch_reply_content(self, query, context: Context) -> Reply:
def fetch_voice_to_text(self, voiceFile) -> Reply:
def fetch_text_to_voice(self, text) -> Reply:
def fetch_translate(self, text, from_lang="", to_lang="en") -> Reply:
def reset_bot(self):
# Path: plugins/linkai/linkai.py
import plugins
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from config import global_config
from plugins import *
from .midjourney import MJBot
from bridge import bridge
@plugins.register(
name="linkai",
desc="A plugin that supports knowledge base and midjourney drawing.",
version="0.1.0",
author="https://link-ai.tech",
)
class LinkAI(Plugin):
def __init__(self):
super().__init__()
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
self.config = super().load_config()
if self.config:
self.mj_bot = MJBot(self.config.get("midjourney"))
logger.info("[LinkAI] inited")
| def on_handle_context(self, e_context: EventContext): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nerdslab/bams
# Path: bams/data/dataset.py
class Dataset(CachedDataset):
r"""Dataset for holding time series data, with input and target features.
Caching is possible if you need to avoid processing the data every time you run
the script. The cache file will be saved in `cache_path` and will be loaded if
`cache` is set to True. Be careful when using the cache, as it will not be updated
if the data changes. Only use once the data processing pipeline is finalized.
Deleteing the cache file will force the data to be processed again.
Args:
input_feats (np.ndarray): Array of shape (num_sequences, sequence_len, num_feats).
Use np.nan for missing values or padding frames.
target_feats (np.ndarray): Array of shape (num_sequences, sequence_len, num_feats).
Use np.nan for missing values or padding frames.
ignore_frames (np.ndarray): Array of shape (num_sequences, sequence_len).
Use True for missing values or padding frames.
hoa_bins (int): Number of bins for the histograms of actions.
hoa_window (int): Window size for the histograms of actions.
cache_path (str): Path to the cache file.
cache (bool): Whether to use the cache file.
"""
def __init__(
self,
input_feats,
target_feats,
ignore_frames,
*,
hoa_bins=32,
hoa_window=30,
cache_path=None,
cache=False,
):
self.input_feats = input_feats
self.target_feats = target_feats
self.ignore_frames = ignore_frames
assert hoa_bins <= 255, "n_bins must be less than 256, got {}.".format(hoa_bins)
self.hoa_bins = hoa_bins
assert hoa_window <= 255, "hoa_window must be less than 256, got {}.".format(
hoa_window
)
self.hoa_window = hoa_window
cache_path = "./data/tmp" if cache_path is None else cache_path
cache_path = cache_path + f"_bins{self.hoa_bins}.pkl"
super().__init__(cache_path, cache)
@staticmethod
def cache_is_available(cache_path, hoa_bins):
return os.path.exists(cache_path + f"_bins{hoa_bins}.pkl")
def process(self):
# quantize the target features in order to create the histogram of actions
bins = np.zeros(
(self.hoa_bins - 1, self.target_feats.shape[-1]), dtype=np.float32
)
quantized_target_feats = np.zeros_like(self.target_feats, dtype=np.uint8)
# pre-compute histogram of actions for target features
num_feats = self.target_feats.shape[2]
for i in tqdm(range(num_feats)):
# find the range of values (low, high) for each feature
feat = self.target_feats[..., i].flatten()
feat = feat[~np.isnan(feat)]
feat = feat[np.abs(feat) > 0.1]
low, high = np.nanpercentile(feat, [0.5, 99.5])
# compute histogram
bins[..., i] = np.linspace(low, high, self.hoa_bins - 1)
quantized_target_feats[..., i] = np.digitize(
self.target_feats[..., i], bins[..., i]
).astype(np.uint8)
# normalize
self.target_feats[..., i] = self.target_feats[..., i] / np.max(
[np.abs(low), np.abs(high)]
)
# normalize input features
for i in range(self.input_feats.shape[2]):
# z-score
self.input_feats[..., i] = self.input_feats[..., i] / np.nanmax(
np.abs(self.input_feats[..., i])
)
data = dict(
input_feats=self.input_feats,
target_feats=self.target_feats,
quantized_target_feats=quantized_target_feats,
ignore_frames=self.ignore_frames,
)
return data
def __getitem__(self, item):
# make histogram of actions
quantized_target_feat = self.quantized_target_feats[
item
] # shape (sequence_len, num_feats)
ignore_frames = self.ignore_frames[item] # shape (sequence_len,)
rows, cols = np.indices(quantized_target_feat.shape)
histogram_of_actions = np.zeros(
(*quantized_target_feat.shape, self.hoa_bins), dtype=np.uint8
)
weights = np.zeros_like(self.ignore_frames[item], dtype=np.float32)
for i in range(1, self.hoa_window + 1):
histogram_of_actions[rows[:-i], cols[:-i], quantized_target_feat[:-i]] += 1
weights[:-i] += 1 - self.ignore_frames[item][i:].astype(np.float32)
histogram_of_actions = histogram_of_actions / self.hoa_window
weights = weights / self.hoa_window
ignore_frames[: -self.hoa_window] = True
data = dict(
input=self.input_feats[item],
target_hist=histogram_of_actions,
ignore_frames=self.ignore_frames[item],
ignore_weights=weights,
)
return data
def __len__(self):
return self.input_feats.shape[0]
@cached_property
def input_size(self):
return self.input_feats.shape[2]
@cached_property
def target_size(self):
return self.target_feats.shape[2]
# Path: bams/data/utils.py
def diff(vec, axis=-1, h=1, padding="edge"):
assert padding in [
"zero",
"edge",
], "Padding must be one of ['zero', 'edge'],"
" got {}.".format(padding)
# move the target axis to the end
vec = np.moveaxis(vec, axis, -1)
# compute diff
dvec = np.zeros_like(vec)
dvec[..., h:] = vec[..., h:] - vec[..., :-h]
# take care of padding the beginning
if padding == "edge":
for i in range(h):
dvec[..., i] = dvec[..., h + 1]
# move the axis back to its original position
dvec = np.moveaxis(dvec, -1, axis)
return dvec
# Path: bams/data/utils.py
def to_polar_coordinates(vec):
r = np.linalg.norm(vec, axis=-1)
theta = np.arctan2(vec[..., 1], vec[..., 0])
return r, theta
# Path: bams/data/utils.py
def angle_clip(theta):
return np.mod(theta + np.pi, 2 * np.pi) - np.pi
# Path: bams/models/bams.py
class BAMS(nn.Module):
r"""BAMS model.
Args:
input_size (int): Number of input features.
predictor (dict): Parameters for the predictor MLP.
encoders (dict[dict]): A dictionnary of encoders, where each key is the name of
the encoder, and each value is a dictionnary of parameters for the encoder.
Each encoder is a TemporalConvNet.
"""
def __init__(
self,
input_size,
*,
predictor=None,
**encoder_kwargs,
):
super().__init__()
self.input_size = input_size
self.representation_size = 0
encoders = dict()
for name, tcn_kwargs in encoder_kwargs.items():
assert "num_inputs" not in tcn_kwargs
encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)
self.representation_size += tcn_kwargs["num_channels"][-1]
self.encoders = torch.nn.ModuleDict(encoders)
# hoa predictor (first layer is a lazy linear layer)
self.predictor = MLP(**predictor)
# byol predictors
byol_predictors = dict()
for name, tcn_kwargs in encoder_kwargs.items():
emb_dim = tcn_kwargs["num_channels"][-1]
byol_predictors[name] = nn.Sequential(
nn.Linear(emb_dim, emb_dim * 4, bias=False),
nn.BatchNorm1d(emb_dim * 4, eps=1e-5, momentum=0.1),
nn.ReLU(inplace=True),
nn.Linear(emb_dim * 4, emb_dim, bias=True),
)
self.byol_predictors = torch.nn.ModuleDict(byol_predictors)
def forward(self, x):
# input shape: (B: batch_size, L:sequence_length, N: num_feats)
# forward through TCNs
embs = OrderedDict()
byol_preds = OrderedDict()
for name, encoder in self.encoders.items():
embs[name] = encoder(x) # (B, L, N)
flattened_emb = embs[name].flatten(0, 1) # (B*L, N)
pred_emb = self.byol_predictors[name](flattened_emb)
byol_preds[name] = pred_emb.reshape(embs[name].shape)
# concatenate embeddings
h = torch.cat(list(embs.values()), dim=2) # (B, L, N)
# concatenate input and embeddings
hx = torch.cat([h, x], dim=2)
# prediction
hoa_pred = self.predictor(hx)
return embs, hoa_pred, byol_preds
def __repr__(self) -> str:
args = [
f" {name}: {encoder.__class__.__name__}"
f" (receptive field: {encoder.receptive_field},"
f" feature dim: {encoder.feat_dim})"
for name, encoder in self.encoders.items()
]
args.append(
f" predictor: {self.predictor.__class__.__name__}"
f" (input size: {self.input_size},"
f" output size: {self.predictor.out_dim})"
)
return "{}([\n{}\n])".format(self.__class__.__name__, ",\n".join(args))
# Path: bams/hoa_loss.py
class HoALoss(nn.Module):
def __init__(self, hoa_bins=32, skip_frames=60):
super().__init__()
self.hoa_bins = hoa_bins
self.skip_frames = skip_frames
def forward(self, target, pred, ignore_weights=None):
r"""
target: (B, L, N)
pred: (B, L, N)
ignore_weights: (B, L)"""
n = target.size(2)
# reshape
target = target.reshape(-1, self.hoa_bins)
pred = pred.reshape(-1, self.hoa_bins)
# make each histogram sum to 1
pred = torch.softmax(pred, dim=1)
# compute EMD using Mallow's distance
loss = earth_mover_distance(target, pred)
# ignore first `self.skip_frames` frames
ignore_weights[:, :self.skip_frames] = 1.0
ignore_weights = ignore_weights.unsqueeze(2).repeat((1, 1, n, 1))
weights = 1 - ignore_weights.view(-1)
loss = torch.sum(loss * weights) / torch.sum(weights)
return loss
# Path: mouse_triplets.py
import os
import numpy as np
import argparse
import torch
import torch.nn.functional as F
from datetime import datetime
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from bams.data import Dataset
from bams.data.utils import diff, to_polar_coordinates, angle_clip
from bams.models import BAMS
from bams import HoALoss
parser.add_argument("--num_workers", type=int, default=16)
parser.add_argument("--epochs", type=int, default=500)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=4e-5)
parser.add_argument("--log_every_step", type=int, default=50)
parser.add_argument("--ckpt_path", type=str, default=None)
args = parser.parse_args()
if args.job == "train":
train(args)
elif args.job == "compute_representations":
compute_representations(args)
def train(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# dataset
if not Dataset.cache_is_available(args.cache_path, args.hoa_bins):
print("Processing data...")
keypoints, split_mask, batch = load_mice_triplet(args.data_root)
input_feats, target_feats, ignore_frames = mouse_feature_extractor(keypoints)
else:
print("No need to process data")
input_feats = target_feats = ignore_frames = None
dataset = Dataset(
input_feats=input_feats,
target_feats=target_feats,
ignore_frames=ignore_frames,
cache_path=args.cache_path,
cache=True,
hoa_bins=args.hoa_bins,
hoa_window=30,
)
print("Number of sequences:", len(dataset))
# prepare dataloaders
train_loader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=args.num_workers,
pin_memory=True,
)
# build model
model = BAMS(
input_size=dataset.input_size,
short_term=dict(num_channels=(64, 64, 32, 32), kernel_size=3),
long_term=dict(num_channels=(64, 64, 64, 32, 32), kernel_size=3, dilation=4),
predictor=dict(
hidden_layers=(-1, 256, 512, 512, dataset.target_size * args.hoa_bins),
), # frame rate = 30, 6 steps = 200ms
).to(device)
model_name = f"bams-mouse-triplet-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
writer = SummaryWriter("runs/" + model_name)
main_params = [p for name, p in model.named_parameters() if "byol" not in name]
byol_params = list(model.byol_predictors.parameters())
optimizer = optim.AdamW(
[{"params": main_params}, {"params": byol_params, "lr": args.lr * 10}],
lr=args.lr,
weight_decay=args.weight_decay,
)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200], gamma=0.1)
criterion = HoALoss(hoa_bins=args.hoa_bins, skip_frames=60)
step = 0
for epoch in tqdm(range(1, args.epochs + 1)):
step = train_loop(
model,
device,
train_loader,
optimizer,
criterion,
writer,
step,
args.log_every_step,
)
scheduler.step()
if epoch % 100 == 0:
torch.save(model.state_dict(), model_name + ".pt")
def compute_representations(args):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
keypoints, split_mask, batch = load_mice_triplet(args.data_root)
# dataset
if not Dataset.cache_is_available(args.cache_path, args.hoa_bins):
print("Processing data...")
input_feats, target_feats, ignore_frames = mouse_feature_extractor(keypoints)
else:
print("No need to process data")
input_feats = target_feats = ignore_frames = None
# only use
dataset = Dataset(
input_feats=input_feats,
target_feats=target_feats,
ignore_frames=ignore_frames,
cache_path=args.cache_path,
hoa_bins=args.hoa_bins,
hoa_window=30,
)
print("Number of sequences:", len(dataset))
# build model
model = BAMS(
input_size=dataset.input_size,
| short_term=dict(num_channels=(64, 64, 32, 32), kernel_size=3), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: camenduru/MotionDirector-hf
# Path: models/unet_3d_blocks.py
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
):
super().__init__()
resnets = []
attentions = []
temp_attentions = []
temp_convs = []
self.gradient_checkpointing = False
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
attentions.append(
Transformer2DModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
)
)
temp_attentions.append(
TransformerTemporalModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
self.attentions = nn.ModuleList(attentions)
self.temp_attentions = nn.ModuleList(temp_attentions)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
attention_mask=None,
num_frames=1,
cross_attention_kwargs=None,
):
# TODO(Patrick, William) - attention mask is not used
output_states = ()
for resnet, temp_conv, attn, temp_attn in zip(
self.resnets, self.temp_convs, self.attentions, self.temp_attentions
):
if self.gradient_checkpointing:
hidden_states = cross_attn_g_c(
attn,
temp_attn,
resnet,
temp_conv,
hidden_states,
encoder_hidden_states,
cross_attention_kwargs,
temb,
num_frames,
inverse_temp=True
)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
).sample
if num_frames > 1:
hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: models/unet_3d_blocks.py
class CrossAttnUpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
add_upsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
):
super().__init__()
resnets = []
temp_convs = []
attentions = []
temp_attentions = []
self.gradient_checkpointing = False
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
attentions.append(
Transformer2DModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
)
)
temp_attentions.append(
TransformerTemporalModel(
out_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
self.attentions = nn.ModuleList(attentions)
self.temp_attentions = nn.ModuleList(temp_attentions)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(
self,
hidden_states,
res_hidden_states_tuple,
temb=None,
encoder_hidden_states=None,
upsample_size=None,
attention_mask=None,
num_frames=1,
cross_attention_kwargs=None,
):
# TODO(Patrick, William) - attention mask is not used
for resnet, temp_conv, attn, temp_attn in zip(
self.resnets, self.temp_convs, self.attentions, self.temp_attentions
):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.gradient_checkpointing:
hidden_states = cross_attn_g_c(
attn,
temp_attn,
resnet,
temp_conv,
hidden_states,
encoder_hidden_states,
cross_attention_kwargs,
temb,
num_frames,
inverse_temp=True
)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
).sample
if num_frames > 1:
hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: models/unet_3d_blocks.py
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
):
super().__init__()
resnets = []
temp_convs = []
self.gradient_checkpointing = False
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
def forward(self, hidden_states, temb=None, num_frames=1):
output_states = ()
for resnet, temp_conv in zip(self.resnets, self.temp_convs):
if self.gradient_checkpointing:
hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
# Path: models/unet_3d_blocks.py
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=True,
upcast_attention=False,
):
super().__init__()
self.gradient_checkpointing = False
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
temp_convs = [
TemporalConvLayer(
in_channels,
in_channels,
dropout=0.1
)
]
attentions = []
temp_attentions = []
for _ in range(num_layers):
attentions.append(
Transformer2DModel(
in_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
)
)
temp_attentions.append(
TransformerTemporalModel(
in_channels // attn_num_head_channels,
attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
)
)
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
in_channels,
in_channels,
dropout=0.1
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
self.attentions = nn.ModuleList(attentions)
self.temp_attentions = nn.ModuleList(temp_attentions)
def forward(
self,
hidden_states,
temb=None,
encoder_hidden_states=None,
attention_mask=None,
num_frames=1,
cross_attention_kwargs=None,
):
if self.gradient_checkpointing:
hidden_states = up_down_g_c(
self.resnets[0],
self.temp_convs[0],
hidden_states,
temb,
num_frames
)
else:
hidden_states = self.resnets[0](hidden_states, temb)
hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames)
for attn, temp_attn, resnet, temp_conv in zip(
self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:]
):
if self.gradient_checkpointing:
hidden_states = cross_attn_g_c(
attn,
temp_attn,
resnet,
temp_conv,
hidden_states,
encoder_hidden_states,
cross_attention_kwargs,
temb,
num_frames
)
else:
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
).sample
if num_frames > 1:
hidden_states = temp_attn(hidden_states, num_frames=num_frames).sample
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
return hidden_states
# Path: models/unet_3d_blocks.py
class UpBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
prev_output_channel: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_upsample=True,
):
super().__init__()
resnets = []
temp_convs = []
self.gradient_checkpointing = False
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
temp_convs.append(
TemporalConvLayer(
out_channels,
out_channels,
dropout=0.1
)
)
self.resnets = nn.ModuleList(resnets)
self.temp_convs = nn.ModuleList(temp_convs)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, num_frames=1):
for resnet, temp_conv in zip(self.resnets, self.temp_convs):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if self.gradient_checkpointing:
hidden_states = up_down_g_c(resnet, temp_conv, hidden_states, temb, num_frames)
else:
hidden_states = resnet(hidden_states, temb)
if num_frames > 1:
hidden_states = temp_conv(hidden_states, num_frames=num_frames)
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
# Path: models/unet_3d_blocks.py
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=True,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
):
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
raise ValueError(f"{down_block_type} does not exist.")
# Path: models/unet_3d_blocks.py
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=True,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
):
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
)
raise ValueError(f"{up_block_type} does not exist.")
# Path: models/unet_3d_blocks.py
def transformer_g_c(transformer, sample, num_frames):
sample = g_c(custom_checkpoint(transformer, mode='temp'),
sample, num_frames, use_reentrant=False
)['sample']
return sample
# Path: models/unet_3d_condition.py
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.transformer_temporal import TransformerTemporalModel
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
transformer_g_c
)
import torch
import torch.nn as nn
import torch.utils.checkpoint
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
| out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Yingyue-L/Mamba-LLaVA
# Path: llava/constants.py
IMAGE_TOKEN_INDEX = -200
# Path: llava/constants.py
DEFAULT_IMAGE_TOKEN = "<image>"
# Path: llava/constants.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: llava/constants.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: llava/constants.py
IMAGE_PLACEHOLDER = "<image-placeholder>"
# Path: llava/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: llava/model/builder.py
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda", **kwargs):
kwargs = {"device_map": device_map, **kwargs}
if device != "cuda":
kwargs['device_map'] = {"": device}
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
kwargs['torch_dtype'] = torch.float16
if 'llava' in model_name.lower():
# Load LLaVA model
if 'lora' in model_name.lower() and model_base is None:
warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading LLaVA from base model...')
model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional LLaVA weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
else:
# this is probably from HF Hub
from huggingface_hub import hf_hub_download
def load_from_hf(repo_id, filename, subfolder=None):
cache_file = hf_hub_download(
repo_id=repo_id,
filename=filename,
subfolder=subfolder)
return torch.load(cache_file, map_location='cpu')
non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
if any(k.startswith('model.model.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
model.load_state_dict(non_lora_trainables, strict=False)
from peft import PeftModel
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
print('Merging LoRA weights...')
model = model.merge_and_unload()
print('Model is loaded...')
elif model_base is not None:
# this may be mm projector only
print('Loading LLaVA from base model...')
if 'mpt' in model_name.lower():
if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):
shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)
cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
elif "mamba" in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained("/data/yingyueli/hub/gpt-neox-20b")
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = LlavaMambaForCausalLM.from_pretrained(model_base, dtype=torch.float16, config=cfg_pretrained, device=device)
else:
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
cfg_pretrained = AutoConfig.from_pretrained(model_path)
model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')
mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}
model.load_state_dict(mm_projector_weights, strict=False)
else:
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
else:
# Load language model
if model_base is not None:
# PEFT model
from peft import PeftModel
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)
print(f"Loading LoRA weights from {model_path}")
model = PeftModel.from_pretrained(model, model_path)
print(f"Merging weights")
model = model.merge_and_unload()
print('Convert to FP16...')
model.to(torch.float16)
else:
use_fast = False
if 'mpt' in model_name.lower():
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)
else:
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
image_processor = None
if 'llava' in model_name.lower():
mm_use_im_start_end = getattr(model.config, "mm_use_im_start_end", False)
mm_use_im_patch_token = getattr(model.config, "mm_use_im_patch_token", True)
if mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
if mm_use_im_start_end:
tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
model.resize_token_embeddings(len(tokenizer))
vision_tower = model.get_vision_tower()
if not vision_tower.is_loaded:
vision_tower.load_model()
vision_tower.to(device=device, dtype=torch.float16)
image_processor = vision_tower.image_processor
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len
# Path: llava/utils.py
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
# Path: llava/mm_utils.py
def process_images(images, image_processor, model_cfg):
image_aspect_ratio = getattr(model_cfg, "image_aspect_ratio", None)
new_images = []
if image_aspect_ratio == 'pad':
for image in images:
image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))
image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
new_images.append(image)
else:
return image_processor(images, return_tensors='pt')['pixel_values']
if all(x.shape == new_images[0].shape for x in new_images):
new_images = torch.stack(new_images, dim=0)
return new_images
# Path: llava/mm_utils.py
def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == 'pt':
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f'Unsupported tensor type: {return_tensors}')
return input_ids
# Path: llava/mm_utils.py
def get_model_name_from_path(model_path):
model_path = model_path.strip("/")
model_paths = model_path.split("/")
if model_paths[-1].startswith('checkpoint-'):
return model_paths[-2] + "_" + model_paths[-1]
else:
return model_paths[-1]
# Path: llava/mm_utils.py
class KeywordsStoppingCriteria(StoppingCriteria):
def __init__(self, keywords, tokenizer, input_ids):
self.keywords = keywords
self.keyword_ids = []
self.max_keyword_len = 0
for keyword in keywords:
cur_keyword_ids = tokenizer(keyword).input_ids
if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:
cur_keyword_ids = cur_keyword_ids[1:]
if len(cur_keyword_ids) > self.max_keyword_len:
self.max_keyword_len = len(cur_keyword_ids)
self.keyword_ids.append(torch.tensor(cur_keyword_ids))
self.tokenizer = tokenizer
self.start_len = input_ids.shape[1]
def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)
self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]
for keyword_id in self.keyword_ids:
if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():
return True
outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]
for keyword in self.keywords:
if keyword in outputs:
return True
return False
def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
outputs = []
for i in range(output_ids.shape[0]):
outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores))
return all(outputs)
# Path: llava/eval/run_llava.py
import argparse
import torch
import requests
import re
from llava.constants import (
IMAGE_TOKEN_INDEX,
DEFAULT_IMAGE_TOKEN,
DEFAULT_IM_START_TOKEN,
DEFAULT_IM_END_TOKEN,
IMAGE_PLACEHOLDER,
)
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import (
process_images,
tokenizer_image_token,
get_model_name_from_path,
KeywordsStoppingCriteria,
)
from PIL import Image
from PIL import Image
from io import BytesIO
def image_parser(args):
out = args.image_file.split(args.sep)
return out
def load_image(image_file):
if image_file.startswith("http") or image_file.startswith("https"):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert("RGB")
else:
image = Image.open(image_file).convert("RGB")
return image
def load_images(image_files):
out = []
for image_file in image_files:
image = load_image(image_file)
out.append(image)
return out
def eval_model(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(
args.model_path, args.model_base, model_name
)
qs = args.query
image_token_se = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
if IMAGE_PLACEHOLDER in qs:
if model.config.mm_use_im_start_end:
qs = re.sub(IMAGE_PLACEHOLDER, image_token_se, qs)
else:
qs = re.sub(IMAGE_PLACEHOLDER, DEFAULT_IMAGE_TOKEN, qs)
else:
if model.config.mm_use_im_start_end:
qs = image_token_se + "\n" + qs
else:
qs = DEFAULT_IMAGE_TOKEN + "\n" + qs
if "llama-2" in model_name.lower():
conv_mode = "llava_llama_2"
elif "v1" in model_name.lower():
conv_mode = "llava_v1"
elif "mpt" in model_name.lower():
conv_mode = "mpt"
else:
conv_mode = "llava_v0"
if args.conv_mode is not None and conv_mode != args.conv_mode:
print(
"[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}".format(
conv_mode, args.conv_mode, args.conv_mode
)
)
else:
| args.conv_mode = conv_mode |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: khwong-c/syn-magia
# Path: magia/core.py
class Input(Signal):
"""
Representing an input signal.
It has no driver, but it is driving other signals.
It is used by both the module declaration and the module instance.
"""
def __init__(
self,
name: str, width: int, signed: bool = False,
owner_instance: Optional["Instance"] = None,
**kwargs
):
"""
I/O ports must have name and width well-defined by designers.
"""
if name is None:
raise ValueError("Input name is not set")
if width == 0:
raise ValueError("Input width is not set")
super().__init__(name=name, width=width, signed=signed, **kwargs)
self._config.signal_type = SignalType.INPUT
self._config.owner_instance = owner_instance
@property
def net_name(self) -> str:
"""
Net Name of I/O must be the same with the name, even they are within an IOBundle
"""
return self.name
def elaborate(self) -> str:
"""
Elaborate the input signal in the module declaration.
:return: input logic (signed) [...]PORT_NAME
"""
port_decl = self.signal_decl().rstrip(";")
return f"input {port_decl}"
def copy(self, owner_instance: Optional["Instance"] = None) -> "Input":
"""
Copy the input signal. Driver is discarded.
I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.
:return: A new input signal with the same configuration.
"""
return Input(
name=self.name,
width=len(self),
signed=self.signed,
description=self.description,
owner_instance=owner_instance,
)
# Path: magia/core.py
class Output(Signal):
"""
Representing an output signal.
They are the starting points when we elaborate the module.
It is used by both the module declaration and the module instance.
"""
def __init__(
self,
name: str, width: int, signed: bool = False,
owner_instance: Optional["Instance"] = None,
**kwargs
):
"""
I/O ports must have name and width well-defined by designers.
"""
if name is None:
raise ValueError("Output name is not set")
if width == 0:
raise ValueError("Output width is not set")
super().__init__(name=name, width=width, signed=signed, **kwargs)
self._config.signal_type = SignalType.OUTPUT
self._config.owner_instance = owner_instance
@property
def net_name(self) -> str:
"""
Net Name of I/O must be the same with the name, even they are within an IOBundle
"""
return self.name
def elaborate(self) -> str:
"""
Elaborate the output signal in the module declaration.
:return: output logic (signed) [...]PORT_NAME
"""
port_decl = self.signal_decl().rstrip(";")
return f"output {port_decl}"
def copy(self, owner_instance: Optional["Instance"] = None, **kwargs) -> "Output":
"""
Copy the output signal. Driver is discarded.
I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.
:return: A new output signal with the same configuration.
"""
return Output(
name=self.name,
width=len(self),
signed=self.signed,
description=self.description,
owner_instance=owner_instance,
)
# Path: magia/memory.py
class Memory(Synthesizable):
"""
A memory object, storing an array of signals which can be accessed by another signal as an index.
The memory object represents only the memory itself, not the logic to access it.
This class is intended to be used to elaborate as a BRAM or Distributed RAM, especially on FPGA.
In case of ASIC, consider to adopt the SRAM with `Blackbox`,
especially when the memory module is created / compiled externally.
A write port can be added to the memory object with the `write_port` method.
A read port can be added to the memory object with the `read_port` method
A read/write port can be added to the memory object with the `rw_port` method
Args:
address_width (int): The width of the address bus.
data_width (int): The width of the data bus.
name (str, optional): The name of the memory object. Defaults to None.
r_port (int, optional): The number of read ports. Defaults to 0.
w_port (int, optional): The number of write ports. Defaults to 0.
rw_port (int, optional): The number of read/write ports. Defaults to 0.
rw_write_through (bool, optional): Whether the read/write port is write through. Defaults to True.
registered_read (bool, optional): Whether the reading output of the read port is registered. Defaults to False.
"""
new_mem_counter = count(0)
_MEM_DECL_TEMPLATE = string.Template("logic $width $name $size;")
def __init__(
self,
clk: Input, address_width: int, data_width: int,
name: Optional[str] = None,
r_port: int = 0,
w_port: int = 0,
rw_port: int = 0,
rw_write_through: bool = True,
registered_read: bool = True,
**kwargs
):
if not r_port and not w_port and not rw_port:
raise ValueError("Memory must have at least one port")
if rw_port > 2:
raise ValueError("Memory can have at most 2 read/write ports")
if rw_port == 2 and (r_port or w_port):
raise ValueError("Memory in True Dual Port mode cannot have read or write ports")
if rw_port and w_port:
raise ValueError("Memory with Read/Write port cannot have extra write port")
if not rw_port and w_port and not r_port:
raise ValueError("Memory with Write port must have at least one read port")
if rw_port and not registered_read:
raise ValueError("Memory with Read/Write port must have registered read port")
memory_size = 1 << address_width
if name is None:
name = f"{memory_size}_{data_width}_{next(Memory.new_mem_counter)}"
name = f"mem_{name}"
super().__init__(**kwargs)
self._config = MemoryConfig(
address_width=address_width,
data_width=data_width,
name=name,
)
self._clk = clk
self._read_ports = [MemReadPort(self, f"{i}", registered_read) for i in range(r_port)]
self._write_ports = [MemWritePort(self, f"{i}") for i in range(w_port)]
self._rw_ports = [MemRWPort(self, f"{i}", rw_write_through) for i in range(rw_port)]
@property
def port_count(self) -> tuple[int, int, int]:
return len(self._read_ports), len(self._write_ports), len(self._rw_ports)
def read_port(self, index: int = 0) -> MemReadPort:
return self._read_ports[index]
def write_port(self, index: int = 0) -> MemWritePort:
return self._write_ports[index]
def rw_port(self, index: int = 0) -> MemRWPort:
return self._rw_ports[index]
@property
def drivers(self) -> list[Signal]:
return [
signal
for port in self._read_ports + self._write_ports + self._rw_ports
for signal in port.signals
if not signal.drive_by_mem
]
@property
def clk(self) -> Input:
return self._clk
def elaborate(self) -> str:
mem_decl = self._MEM_DECL_TEMPLATE.substitute(
width=f"[{self.data_width - 1}:0]",
name=self.net_name,
size=f"[0:{self.size - 1}]",
)
port_impl = "\n".join(port.elaborate() for port in self._write_ports + self._rw_ports + self._read_ports)
return "\n".join((mem_decl, port_impl))
@property
def size(self) -> int:
return 1 << self._config.address_width
@property
def address_width(self) -> int:
return self._config.address_width
@property
def data_width(self) -> int:
return self._config.data_width
@property
def name(self) -> str:
return self._config.name
@property
def net_name(self) -> str:
"""
Memory does not belong to any bundle.
net_name is the same as name.
"""
return self._config.name
@classmethod
def sdp(cls, clk: Input, address_width: int, data_width: int, **kwargs) -> "Memory":
"""
Create a Simple Dual Port memory.
"""
return cls(clk, address_width, data_width, r_port=1, w_port=1, **kwargs)
@classmethod
def tdp(cls, clk: Input, address_width: int, data_width: int, **kwargs) -> "Memory":
"""
Create a True Dual Port memory.
"""
return cls(clk, address_width, data_width, rw_port=2, registered_read=True, **kwargs)
@classmethod
def sp(cls, clk: Input, address_width: int, data_width: int, **kwargs) -> "Memory":
"""
Create a Single Port memory.
"""
return cls(clk, address_width, data_width, rw_port=1, registered_read=True, **kwargs)
# Path: magia/module.py
class Module(Synthesizable):
"""
A module is a collection of signals and operations. It can also include other modules.
The module is the base class of specialized modules.
Developers can define the generic behavior of the module in a dynamic way,
while each `Module` objects is a specialized module initialized with specific parameters.
The SystemVerilog Keyword `parameters` is not used here.
It is because we can generate the code for the specialized module with parametrized values hard-coded.
The module can be instantiated with the `instance` method.
Designers shall implement the circuit logic in the `__init__` method.
However, we highly recommend designers to extract the logic implementation into a seperated method.
e.g.
def __init__(self, **kwargs):
self.io += Input("a", 8)
self.io += Output("q", 8)
self.implement()
def implement(self):
self.io.q <<= self.io.a + 1
"""
_MOD_DECL_TEMPLATE = Template("module $name (\n$io\n);")
_new_module_counter = count(0)
output_file: Optional[PathLike] = None
def __init__(self, name: Optional[str] = None, **kwargs):
super().__init__(**kwargs)
# Get the arguments passed to the __init__ method of the inherited class
# === DON'T REFACTOR BELOW. We are inspecting the stack and refactoring will affect the result ===
children_local = inspect.stack(0)[1].frame.f_locals
children_class = children_local.get("__class__")
func_signature = inspect.signature(children_class.__init__) if children_class else {}
self._mod_params = OrderedDict(**{
arg: children_local[arg]
for arg, param in func_signature.parameters.items()
if param.kind not in (param.VAR_KEYWORD, param.VAR_POSITIONAL) and arg != "self"
})
# === DON'T REFACTOR ABOVE ===
if name is None:
name = f"{self.__class__.__name__}_{next(self._new_module_counter)}"
self._config = ModuleConfig(
module_class=type(self),
name=name,
)
self.io = IOBundle()
def validate(self) -> list[Exception]:
undriven_outputs = [
output.net_name
for output in self.io.outputs
if output.driver() is None
]
if undriven_outputs:
return [
ValueError("Output not driven", output)
for output in undriven_outputs
]
return []
def mod_declaration(self) -> str:
mod_decl = self._MOD_DECL_TEMPLATE.substitute(
name=self.name,
io=",\n".join(
port.elaborate()
for port in self.io.inputs + self.io.outputs
),
)
return "\n".join((mod_decl, self._module_elab_doc))
def elaborate(self) -> tuple[str, set["Module"]]:
"""
Trace nets and operations from output ports
This method generates the SystemVerilog code for the module.
:return: The SystemVerilog code for the module, and the list of submodules of the instance in the module.
"""
violations = self.validate()
if violations:
raise ValueError(f"Module {self.name} is not valid.", violations)
mod_decl = self.mod_declaration()
signals, insts = self.trace()
mod_impl = [
inst.elaborate()
for inst in insts
]
mod_impl += [
signal.elaborate()
for signal in signals
]
mod_impl = "\n".join(mod_impl)
mod_output_assignment = "\n".join(
Signal._SIGNAL_ASSIGN_TEMPLATE.substitute(
name=output.net_name,
driver=output.driver().net_name,
)
for output in self.io.outputs
)
extra_code = self.post_elaborate()
mod_end = "endmodule"
sv_code = "\n".join((mod_decl, mod_impl, mod_output_assignment, extra_code, mod_end))
submodules = {inst.module for inst in insts}
return sv_code, submodules
def post_elaborate(self) -> str:
"""
Override this method to add extra code to the module.
The code will be added after the elaboration of the module.
Adding assertions to the module is a typical use case.
:return: The extra code to be added to the module.
"""
_ = self # Stub to avoid IDE/Lint warning
return ""
def trace(self) -> tuple[list[Union[Signal, Memory]], list["Instance"]]:
"""
Trace nets and instances from output ports
"""
traced_sig_id: set[int] = set()
traced_inst_id: set[int] = set()
traced_signal: list[Union[Signal, Memory]] = []
traced_inst: list[Instance] = []
sig_to_be_traced: dict[int, Signal] = {}
for output in self.io.outputs:
sig_to_be_traced |= {
id(sig): sig
for sig in output.drivers
}
while sig_to_be_traced:
next_trace = {}
for signal_id, signal in sig_to_be_traced.items():
# Tracing Instances with Output connected
if signal.type == SignalType.OUTPUT:
inst: Optional[Instance] = signal.owner_instance
if inst is not None and id(inst) not in traced_inst_id:
traced_inst_id.add(id(inst))
traced_inst.append(inst)
# The Input port of the instance is skipped
# We will go directly to the driver as it must be driven by another signal.
input_drivers = [i.driver() for i in inst.inputs.values()]
next_trace |= {
id_sig: sig
for sig in input_drivers
if (id_sig := id(sig)) not in traced_sig_id
}
elif signal.type != SignalType.INPUT and signal_id not in traced_sig_id:
traced_sig_id.add(signal_id)
traced_signal.append(signal)
next_trace |= {
id_sig: sig
for sig in signal.drivers
if sig.type not in (SignalType.INPUT,)
and (id_sig := id(sig)) not in traced_sig_id
}
if signal.type == SignalType.MEMORY:
signal: MemorySignal
if id(signal.memory) not in traced_sig_id:
traced_sig_id.add(id(signal.memory))
traced_signal.append(signal.memory)
next_trace |= {
id_sig: sig
for sig in signal.memory.drivers
if (id_sig := id(sig)) not in traced_sig_id
}
sig_to_be_traced = next_trace
traced_signal.reverse()
traced_inst.reverse()
# Check if we have name conflict on the signals and instances
sig_name_counter = Counter(sig.net_name for sig in traced_signal)
inst_name_counter = Counter(inst.name for inst in traced_inst)
sig_conflicts = [name for name, cnt in sig_name_counter.items() if cnt > 1]
inst_conflicts = [name for name, cnt in inst_name_counter.items() if cnt > 1]
if sig_conflicts:
raise ValueError(f"Signal name conflict: {sig_conflicts}")
if inst_conflicts:
raise ValueError(f"Instance name conflict: {inst_conflicts}")
return traced_signal, traced_inst
def instance(
self, name: Optional[str] = None,
io: Optional[dict[str, Signal]] = None
) -> "Instance":
"""
Create an instance of the module
:return: The created instance
"""
return Instance(
module=self,
name=name,
io=io,
)
@property
def name(self) -> str:
return self._config.name
@property
def params(self) -> dict[str, object]:
"""
Return the parameters used to specialize this module.
"""
return self._mod_params
@property
def _module_elab_doc(self) -> str:
"""
Generate the summary of a module and register it to the module.
It will be written into the SystemVerilog code during elaboration.
"""
doc = self._module_doc_str
if self.params:
doc += "\nModule Parameters:\n"
doc += "-----------------\n"
doc += "\n".join(
f"{k}: {v}"
for k, v in self.params.items()
) + "\n"
if doc:
doc = f"/*\n{doc}*/\n"
return doc
@property
def _module_doc_str(self) -> str:
doc = inspect.getdoc(self.__class__)
if doc is None or doc == inspect.getdoc(Module):
return ""
if not doc.endswith("\n"):
return doc + "\n"
return doc
@cached_property
def _module_init_param_doc(self) -> dict[str, str]:
params = [(k, f"{k}:") for k in self._mod_params]
doc = inspect.getdoc(self.__init__)
if doc is None:
return []
result_doc = {}
possible_param = [line.strip() for line in doc.split("\n") if ":" in line]
for line in possible_param:
for param, sep in params:
if sep in line:
result_doc[param] = line.split(sep, 1)[-1].strip()
return result_doc
@property
def spec(self) -> dict[str, object]:
"""
Return the "Specification" of a specialized Module.
It is a dictionary which can be further processed.
"""
return {
"name": self.name,
"description": self._module_doc_str.strip(),
"parameters": [
{
"name": k,
"value": v,
"description": self._module_init_param_doc.get(k, ""),
}
for k, v in self.params.items()
],
"ports": [
{
"name": alias,
"direction": signal.type.name,
"width": len(signal),
"signed": signal.signed,
"description": signal.description,
}
for alias, signal in self.io.signals.items()
],
}
# Path: tests/test_memory.py
from pathlib import Path
from cocotb.clock import Clock
from cocotb.triggers import FallingEdge
from cocotb_test.simulator import run as sim_run
from magia import Input, Memory, Module, Output
import cocotb
import tests.helper as helper
async def drive_spram(dut):
clock = Clock(dut.clk, 10, units="ns")
cocotb.start_soon(clock.start())
await FallingEdge(dut.clk) # Synchronize with the clock
dut.wen.value = 1
dut.addr.value = 0x12
| dut.din.value = 0xAB |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: batmanlab/DrasCLR
# Path: models/cnn3d.py
class Encoder(nn.Module):
def __init__(self, rep_dim, moco_dim, num_experts, num_coordinates):
super(Encoder, self).__init__()
self.rep_dim = rep_dim
self.moco_dim = moco_dim
self.num_experts = num_experts
self.num_coordinates = num_coordinates
self.conv1 = Conv3d(1, 8, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)
self.bn1 = nn.BatchNorm3d(8)
self.act = nn.ELU()
self.conv2 = Conv3d(8, 8, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)
self.bn2 = nn.BatchNorm3d(8)
self.downsample1 = Block(8, 16, self.num_experts, self.num_coordinates)
self.downsample2 = Block(16, 32, self.num_experts, self.num_coordinates)
self.downsample3 = Block(32, 64, self.num_experts, self.num_coordinates)
self.conv3 = Conv3d(64, 128, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)
self.bn3 = nn.BatchNorm3d(128)
self.conv4 = Conv3d(128, rep_dim, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)
self.bn4 = nn.BatchNorm3d(rep_dim)
self.fc = nn.Linear(rep_dim, moco_dim)
def forward(self, x, loc):
x = self.conv1(x, loc)
x = self.bn1(x)
x = self.act(x)
x = self.conv2(x, loc)
x = self.bn2(x)
x = self.act(x)
x = self.downsample1(x, loc)
x = self.downsample2(x, loc)
x = self.downsample3(x, loc)
x = self.conv3(x, loc)
x = self.bn3(x)
x = self.act(x)
x = self.conv4(x, loc)
x = self.bn4(x)
x = self.act(x)
h = torch.flatten(x, 1)
z = self.fc(h)
return z, h
# Path: models/builder.py
class DrasCLR(nn.Module):
def __init__(self, base_encoder, num_patch, rep_dim, moco_dim, num_experts, num_coordinates, K, m, T, mlp):
"""
dim: feature dimension (default: 128)
K: queue size; number of negative keys (default: 65536)
m: moco momentum of updating key encoder (default: 0.999)
T: softmax temperature (default: 0.07)
"""
super(DrasCLR, self).__init__()
self.K = K
self.m = m
self.T = T
self.num_locs = num_patch # add the new dimension of number of locations
# create the encoders
# num_classes is the output fc dimension
self.encoder_q = base_encoder(rep_dim=rep_dim, moco_dim=moco_dim, num_experts=num_experts, num_coordinates=num_coordinates)
self.encoder_k = base_encoder(rep_dim=rep_dim, moco_dim=moco_dim, num_experts=num_experts, num_coordinates=num_coordinates)
if mlp: # hack: brute-force replacement
dim_mlp = self.encoder_q.fc.weight.shape[1]
self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)
self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
# create the queue
self.register_buffer("queue", torch.randn(moco_dim, K, self.num_locs)) # the queue should be the size of (dim of reps) * (number of negative pairs) * (number of total locations)
self.queue = nn.functional.normalize(self.queue, dim=0) # normalize patch representation
self.register_buffer("queue_ptr", torch.zeros(self.num_locs, dtype=torch.long)) # set pointer in buffer to 1 for each path location
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""
Momentum update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys, patch_idx):
# gather keys before updating queue
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = self.queue_ptr
assert self.K % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr[patch_idx]:ptr[patch_idx] + batch_size, patch_idx] = keys.T
ptr[patch_idx] = (ptr[patch_idx] + batch_size) % self.K # move pointer
self.queue_ptr = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, patch_idx, pch_q, pch_k, ngb_q):
"""
Input:
im_q: a batch of query images
im_k: a batch of key images
Output:
logits, targets
"""
# compute query patch features
q, h_q = self.encoder_q(pch_q[0], pch_q[1]) # queries: NxC, encoder needs to take both pathces and their locations as inputs
q = nn.functional.normalize(q, dim=1)
# compute query neighbor features
ngb_flatten = ngb_q[0].reshape(-1, 32, 32, 32)
loc_flatten = ngb_q[1].reshape(-1, 3)
r, h_r = self.encoder_q(ngb_flatten[:, None, :, :, :], loc_flatten)
r = nn.functional.normalize(r, dim=1)
r = r.reshape(ngb_q[0].shape[0], ngb_q[0].shape[1], -1) # queries: N * R * C, samples * k-neighbors * channels
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
pch_k[0], idx_unshuffle = self._batch_shuffle_ddp(pch_k[0])
k, h_k = self.encoder_k(pch_k[0], pch_k[1]) # keys: N * C
k = nn.functional.normalize(k, dim=1)
# undo shuffle
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# patch InfoNCE logits
# Einstein sum is more intuitive
# positive logits: N * 1
l_pos_pch = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: N * K
negs = self.queue[:,:,patch_idx].clone().detach() # compute negative logits for each path in the batch conditioned on their locations
l_neg_pch = torch.einsum('nc,ck->nk', [q, negs])
# logits: N * (1+K)
logits_pch = torch.cat([l_pos_pch, l_neg_pch], dim=1)
# apply temperature
logits_pch /= self.T
# neighbor InfoNCE logits
# positive logits: N * 1
l_pos_ngb = torch.einsum('nrc, nc->n', [r, k]).unsqueeze(-1)
# negative logits: N * K
l_neg_ngb = torch.einsum('nrc, ck->nk', [r, negs])
# logits: N * (1+K)
logits_ngb = torch.cat([l_pos_ngb, l_neg_ngb], dim=1)
# apply temperature
logits_ngb /= self.T
# labels: positive key indicators
labels = torch.zeros(logits_pch.shape[0], dtype=torch.long).cuda()
# dequeue and enqueue
self._dequeue_and_enqueue(k, patch_idx) # consider location for each patch in the batch
return logits_pch, logits_ngb, labels
# Path: data/copd_patch.py
class COPD_dataset(Dataset):
def __init__(self, stage, args, patch_transforms=default_transform, neighbor_transforms=default_transform):
self.stage = stage
self.args = args
self.root_dir = args.root_dir
self.metric_dict = dict() # initialize metric dictionary
self.patch_transforms = patch_transforms
self.neighbor_transforms = neighbor_transforms
# atlas patch locations, our refernce file can be found at ./preprocess/misc/atlas_patch_loc.npy
self.patch_loc = np.load(self.args.root_dir + "19676E_INSP_STD_JHU_COPD_BSpline_Iso1_patch_loc.npy")
# pairwise distance
self.dists = pairwise_distances(self.patch_loc, metric='euclidean')
# normalize patch locations
self.patch_loc = (self.patch_loc / self.patch_loc.max(0)) * 2 - 1 # normalize position to [-1, 1]
self.patch_idx = 0
self.patch_data = np.load(self.args.root_dir+"grouped_patch/patch_loc_"+str(self.patch_idx)+".npy")
# top k nearest patches
self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]
neighbor_lst = []
for k in range(self.args.k_neighbors):
neighbor_data = np.load(self.args.root_dir+"grouped_patch/patch_loc_"+str(self.k_neighbor_idx[k])+".npy")
neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32
self.neighbor_data = np.concatenate(neighbor_lst, axis=0)
del neighbor_lst
if stage == 'training':
# Specific to COPDGene dataset, you can change depends on your needs
FILE = open(DATA_DIR + "phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt", "r")
mylist = FILE.readline().strip("\n").split("\t")
metric_idx = [mylist.index(label) for label in self.args.label_name]
race_idx = mylist.index("race")
for line in FILE.readlines():
mylist = line.strip("\n").split("\t")
tmp = [mylist[idx] for idx in metric_idx]
if "" in tmp:
continue
if self.args.nhw_only and mylist[race_idx] != "1":
continue
metric_list = []
for i in range(len(metric_idx)):
metric_list.append(float(tmp[i]))
self.metric_dict[mylist[0]] = metric_list
FILE.close()
if stage == 'testing':
# Specific to COPDGene dataset, you can change depends on your needs
self.label_name = self.args.label_name + self.args.label_name_set2
FILE = open(DATA_DIR + "phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt", "r")
mylist = FILE.readline().strip("\n").split("\t")
metric_idx = [mylist.index(label) for label in self.label_name]
for line in FILE.readlines():
mylist = line.strip("\n").split("\t")
tmp = [mylist[idx] for idx in metric_idx]
if "" in tmp[:3]:
continue
metric_list = []
for i in range(len(metric_idx)):
if tmp[i] == "":
metric_list.append(-1024)
else:
metric_list.append(float(tmp[i]))
self.metric_dict[mylist[0]] = metric_list + [-1024, -1024, -1024]
FILE = open(DATA_DIR + "CT_scan_datasets/CT_visual_scoring/COPDGene_CT_Visual_20JUL17.txt", "r")
mylist = FILE.readline().strip("\n").split("\t")
metric_idx = [mylist.index(label) for label in self.args.visual_score]
for line in FILE.readlines():
mylist = line.strip("\n").split("\t")
if mylist[0] not in self.metric_dict:
continue
tmp = [mylist[idx] for idx in metric_idx]
metric_list = []
for i in range(len(metric_idx)):
metric_list.append(float(tmp[i]))
self.metric_dict[mylist[0]][
-len(self.args.visual_score) - len(self.args.P2_Pheno):-len(self.args.P2_Pheno)] = metric_list
FILE.close()
FILE = open(
DATA_DIR + 'P1-P2 First 5K Long Data/Subject-flattened- one row per subject/First5000_P1P2_Pheno_Flat24sep16.txt',
'r')
mylist = FILE.readline().strip("\n").split("\t")
metric_idx = [mylist.index(label) for label in self.args.P2_Pheno]
for line in FILE.readlines():
mylist = line.strip("\n").split("\t")
if mylist[0] not in self.metric_dict:
continue
tmp = [mylist[idx] for idx in metric_idx]
metric_list = []
for i in range(len(metric_idx)):
metric_list.append(float(tmp[i]))
self.metric_dict[mylist[0]][-len(self.args.P2_Pheno):] = metric_list
FILE.close()
self.sid_list = []
for item in glob.glob(self.args.root_dir+"patch/"+"*_patch.npy"):
if item.split('/')[-1][:6] not in self.metric_dict:
continue
self.sid_list.append(item.split('/')[-1][:-10])
self.sid_list.sort()
assert len(self.sid_list) == self.patch_data.shape[0]
print("Fold: full")
self.sid_list = np.asarray(self.sid_list)
self.sid_list_len = len(self.sid_list)
print(stage+" dataset size:", self.sid_list_len)
def set_patch_idx(self, patch_idx):
self.patch_idx = patch_idx
self.patch_data = np.load(self.args.root_dir+"grouped_patch/patch_loc_"+str(self.patch_idx)+".npy")
# top k nearest patches
self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]
neighbor_lst = []
for k in range(self.args.k_neighbors):
neighbor_data = np.load(self.args.root_dir+"grouped_patch/patch_loc_"+str(self.k_neighbor_idx[k])+".npy")
neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32
self.neighbor_data = np.concatenate(neighbor_lst, axis=0)
del neighbor_lst
def __len__(self):
if self.stage == 'training':
return self.sid_list_len * self.args.num_patch
if self.stage == 'testing':
return self.sid_list_len
def __getitem__(self, idx):
if self.stage == 'training':
idx = idx % self.sid_list_len
# patch data
pch = self.patch_data[idx, :, :, :]
pch = np.clip(pch, -1024, 240) # clip input intensity to [-1024, 240]
pch = pch + 1024.
pch = self.patch_transforms(pch[None, :, :, :])
pch[0] = pch[0]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2
pch[1] = pch[1]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2
# patch location
patch_loc_idx = self.patch_loc[self.patch_idx, :]
# neighbor data
ngb = self.neighbor_data[:, idx, :, :, :]
ngb = np.clip(ngb, -1024, 240) # clip input intensity to [-1024, 240]
ngb = ngb + 1024.
ngb = self.neighbor_transforms(ngb)
ngb = ngb/632.-1 # Normalize to [-1,1], 632=(1024+240)/2
# neighbor location
neighor_loc_idx = self.patch_loc[self.k_neighbor_idx, :]
# labels
key = self.sid_list[idx][:6]
label = np.asarray(self.metric_dict[key])
return key, pch, patch_loc_idx, ngb, neighor_loc_idx, label
if self.stage == 'testing':
sid = self.sid_list[idx]
# read the entire image including 581 patches
img = np.load(self.root_dir + "patch/" + sid + "_patch.npy")
img = np.clip(img, -1024, 240) # clip input intensity to [-1024, 240]
img = img + 1024.
img = img[:, None, :, :, :] / 632. - 1 # Normalize to [-1,1], 632=(1024+240)/2
# patch locations for all 581 patches
patch_loc_idx = self.patch_loc
# study id
key = self.sid_list[idx][:6]
# labels
label = np.asarray(self.metric_dict[key]) # extract sid from the first 6 letters
return sid, img, patch_loc_idx, label
# Path: train.py
import os
import argparse
import builtins
import math
import random
import shutil
import time
import warnings
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models.loader as DrasCLR_Loader
from tensorboard_logger import configure, log_value
from models.cnn3d import Encoder
from models.builder import DrasCLR
from data.copd_patch import COPD_dataset
from monai.transforms import Compose, RandGaussianNoise, RandAffine, Rand3DElastic, RandAdjustContrast
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.2, type=float,
help='softmax temperature (default: 0.2)')
# options for moco v2
parser.add_argument('--mlp', action='store_false',
help='use mlp head')
parser.add_argument('--cos', action='store_false',
help='use cosine lr schedule')
# experiment configs
parser.add_argument('--adj-thres', default=0.18, type=float,
help='patch adjacent threshold (default: 0.18)')
parser.add_argument('--k-neighbors', default=2, type=int,
help='top k nearest neighbors of the anchor patch in the atlas image.')
parser.add_argument('--beta', default=1.0, type=float,
help='scaling factor of neighbor InfoNCE loss. (default: 1.0)')
parser.add_argument('--warm-up', default=0, type=int,
help='number of warm-up epochs before training neighbor contrastive loss.')
parser.add_argument('--num-experts', default=8, type=int,
help='number of experts in CondConv layer.')
parser.add_argument('--num-coordinates', default=1, type=int,
help='number of input coordinates.')
parser.add_argument('--augmentation', default='agc',
help='initials of augmentation including: (f)lip, (a)ffine, (e)lastic, (g)uassian, (c)ontrast.')
parser.add_argument('--exp-name', default='debug_patch', type=str,
help='experiment name')
def main():
# read configurations
args = parser.parse_args()
# define and create the experiment directory
exp_dir = os.path.join('./ssl_exp', args.exp_name)
if not os.path.isdir(exp_dir):
os.makedirs(exp_dir, exist_ok=True)
# save configurations to a dictionary
with open(os.path.join(exp_dir, 'configs.json'), 'w') as f:
json.dump(vars(args), f, indent=2)
f.close()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
print("Distributed:", args.distributed)
#ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = args.npgus_per_node
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
configure(os.path.join('./ssl_exp', args.exp_name))
# create patch-level encoder
model = DrasCLR(
Encoder,
args.num_patch, args.rep_dim, args.moco_dim, args.num_experts, \
args.num_coordinates, args.moco_k, args.moco_m, args.moco_t, args.mlp)
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.gpu])
else:
raise NotImplementedError("GPU number is unknown.")
| else: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CHDers/Traffic-Flow-Prediction-with-Graph-Neural-Networks
# Path: traffic_dataset.py
class LoadData(Dataset): # 这个就是把读入的数据处理成模型需要的训练数据和测试数据,一个一个样本能读取出来
def __init__(self, data_path, num_nodes, divide_days, time_interval, history_length, train_mode):
"""
:param data_path: list, ["graph file name" , "flow data file name"], path to save the data file names.
:param num_nodes: int, number of nodes.
:param divide_days: list, [ days of train data, days of test data], list to divide the original data.
:param time_interval: int, time interval between two traffic data records (mins).---5 mins
:param history_length: int, length of history data to be used.
:param train_mode: list, ["train", "test"].
"""
self.data_path = data_path
self.num_nodes = num_nodes
self.train_mode = train_mode
self.train_days = divide_days[0] # 59-14 = 45, train_data
self.test_days = divide_days[1] # 7*2 = 14 ,test_data
self.history_length = history_length # 30/5 = 6, 历史长度为6
self.time_interval = time_interval # 5 min
self.one_day_length = int(24 * 60 / self.time_interval) # 一整天的数据量
self.graph = get_adjacent_matrix(distance_file=data_path[0], num_nodes=num_nodes)
self.flow_norm, self.flow_data = self.pre_process_data(data=get_flow_data(data_path[1]),
norm_dim=1) # self.flow_norm为归一化的基
def __len__(self): # 表示数据集的长度
"""
:return: length of dataset (number of samples).
"""
if self.train_mode == "train":
return self.train_days * self.one_day_length - self.history_length # 训练的样本数 = 训练集总长度 - 历史数据长度
elif self.train_mode == "test":
return self.test_days * self.one_day_length # 每个样本都能测试,测试样本数 = 测试总长度
else:
raise ValueError("train mode: [{}] is not defined".format(self.train_mode))
def __getitem__(self, index): # 功能是如何取每一个样本 (x, y), index = [0, L1 - 1]这个是根据数据集的长度确定的
"""
:param index: int, range between [0, length - 1].
:return:
graph: torch.tensor, [N, N].
data_x: torch.tensor, [N, H, D].
data_y: torch.tensor, [N, 1, D].
"""
if self.train_mode == "train":
index = index # 训练集的数据是从时间0开始的,这个是每一个流量数据,要和样本(x,y)区别
elif self.train_mode == "test":
index += self.train_days * self.one_day_length # 有一个偏移量
else:
raise ValueError("train mode: [{}] is not defined".format(self.train_mode))
data_x, data_y = LoadData.slice_data(self.flow_data, self.history_length, index, self.train_mode) # 这个就是样本(x,y)
data_x = LoadData.to_tensor(data_x) # [N, H, D] # 转换成张量
data_y = LoadData.to_tensor(data_y).unsqueeze(1) # [N, 1, D] # 转换成张量,在时间维度上扩维
return {"graph": LoadData.to_tensor(self.graph), "flow_x": data_x, "flow_y": data_y} # 组成词典返回
@staticmethod
def slice_data(data, history_length, index, train_mode): # 根据历史长度,下标来划分数据样本
"""
:param data: np.array, normalized traffic data.
:param history_length: int, length of history data to be used.
:param index: int, index on temporal axis.
:param train_mode: str, ["train", "test"].
:return:
data_x: np.array, [N, H, D].
data_y: np.array [N, D].
"""
if train_mode == "train":
start_index = index # 开始下标就是时间下标本身,这个是闭区间
end_index = index + history_length # 结束下标,这个是开区间
elif train_mode == "test":
start_index = index - history_length # 开始下标,这个最后面贴图了,可以帮助理解
end_index = index # 结束下标
else:
raise ValueError("train model {} is not defined".format(train_mode))
data_x = data[:, start_index: end_index] # 在切第二维,不包括end_index
data_y = data[:, end_index] # 把上面的end_index取上
return data_x, data_y
@staticmethod
def pre_process_data(data, norm_dim): # 预处理,归一化
"""
:param data: np.array,原始的交通流量数据
:param norm_dim: int,归一化的维度,就是说在哪个维度上归一化,这里是在dim=1时间维度上
:return:
norm_base: list, [max_data, min_data], 这个是归一化的基.
norm_data: np.array, normalized traffic data.
"""
norm_base = LoadData.normalize_base(data, norm_dim) # 计算 normalize base
norm_data = LoadData.normalize_data(norm_base[0], norm_base[1], data) # 归一化后的流量数据
return norm_base, norm_data # 返回基是为了恢复数据做准备的
@staticmethod
def normalize_base(data, norm_dim): # 计算归一化的基
"""
:param data: np.array, 原始的交通流量数据
:param norm_dim: int, normalization dimension.归一化的维度,就是说在哪个维度上归一化,这里是在dim=1时间维度上
:return:
max_data: np.array
min_data: np.array
"""
max_data = np.max(data, norm_dim, keepdims=True) # [N, T, D] , norm_dim=1, [N, 1, D], keepdims=True就保持了纬度一致
min_data = np.min(data, norm_dim, keepdims=True)
return max_data, min_data # 返回最大值和最小值
@staticmethod
def normalize_data(max_data, min_data, data): # 计算归一化的流量数据,用的是最大值最小值归一化法
"""
:param max_data: np.array, max data.
:param min_data: np.array, min data.
:param data: np.array, original traffic data without normalization.
:return:
np.array, normalized traffic data.
"""
mid = min_data
base = max_data - min_data
normalized_data = (data - mid) / base
return normalized_data
@staticmethod
def recover_data(max_data, min_data, data): # 恢复数据时使用的,为可视化比较做准备的
"""
:param max_data: np.array, max data.
:param min_data: np.array, min data.
:param data: np.array, normalized data.
:return:
recovered_data: np.array, recovered data.
"""
mid = min_data
base = max_data - min_data
recovered_data = data * base + mid
return recovered_data # 这个就是原始的数据
@staticmethod
def to_tensor(data):
return torch.tensor(data, dtype=torch.float)
# Path: utils.py
class Evaluation(object):
def __init__(self):
pass
@staticmethod
def mae_(target, output):
return np.mean(np.abs(target - output))
@staticmethod
def mape_(target, output):
return np.mean(np.abs(target - output) / (target + 5)) # 加5是因为target有可能为0,当然只要不太大,加几都行
@staticmethod
def rmse_(target, output):
return np.sqrt(np.mean(np.power(target - output, 2)))
@staticmethod
def total(target, output):
mae = Evaluation.mae_(target, output)
mape = Evaluation.mape_(target, output)
rmse = Evaluation.rmse_(target, output)
return mae, mape, rmse
# Path: utils.py
def visualize_result(h5_file, nodes_id, time_se, visualize_file):
file_obj = h5py.File(h5_file, "r") # 获得文件对象,这个文件对象有两个keys:"predict"和"target"
prediction = file_obj["predict"][:][:, :, 0] # [N, T],切片,最后一维取第0列,所以变成二维了,要是[:, :, :1]那么维度不会缩减
target = file_obj["target"][:][:, :, 0] # [N, T],同上
file_obj.close()
plot_prediction = prediction[nodes_id][time_se[0]: time_se[1]] # [T1],将指定节点的,指定时间的数据拿出来
plot_target = target[nodes_id][time_se[0]: time_se[1]] # [T1],同上
plt.figure()
plt.grid(True, linestyle="-.", linewidth=0.5)
plt.plot(np.array([t for t in range(time_se[1] - time_se[0])]), plot_prediction, ls="-", marker=" ", color="r")
plt.plot(np.array([t for t in range(time_se[1] - time_se[0])]), plot_target, ls="-", marker=" ", color="b")
plt.legend(["prediction", "target"], loc="upper right")
plt.axis([0, time_se[1] - time_se[0],
np.min(np.array([np.min(plot_prediction), np.min(plot_target)])),
np.max(np.array([np.max(plot_prediction), np.max(plot_target)]))])
plt.savefig(visualize_file + ".png")
# Path: gcnnet.py
class GCN(nn.Module): # GCN模型,向空域的第一个图卷积
def __init__(self, in_c, hid_c, out_c):
super(GCN, self).__init__() # 表示继承父类的所有属性和方法
self.linear_1 = nn.Linear(in_c, hid_c) # 定义一个线性层
self.linear_2 = nn.Linear(hid_c, out_c) # 定义一个线性层
self.act = nn.ReLU() # 定义激活函数
def forward(self, data, device):
graph_data = data["graph"].to(device)[0] # [N, N] 邻接矩阵,并且将数据送入设备
graph_data = GCN.process_graph(graph_data) # 变换邻接矩阵 \hat A = D_{-1/2}*A*D_{-1/2}
flow_x = data["flow_x"].to(device) # [B, N, H, D] 流量数据
B, N = flow_x.size(0), flow_x.size(1) # batch_size、节点数
flow_x = flow_x.view(B, N, -1) # [B, N, H*D] H = 6, D = 1把最后两维缩减到一起了,这个就是把历史时间的特征放一起
# 第一个图卷积层
output_1 = self.linear_1(flow_x) # [B, N, hid_C],这个就是 WX,其中W是可学习的参数,X是输入的流量数据(就是flow_x)
output_1 = self.act(torch.matmul(graph_data, output_1)) # [B, N, N] ,[B, N, hid_c],就是 \hat AWX
# 第二个图卷积层
output_2 = self.linear_2(output_1) # WX
output_2 = self.act(torch.matmul(graph_data, output_2)) # [B, N, 1, Out_C] , 就是 \hat AWX
return output_2.unsqueeze(2) # 第2维的维度扩张
@staticmethod
def process_graph(graph_data): # 这个就是在原始的邻接矩阵之上,再次变换,也就是\hat A = D_{-1/2}*A*D_{-1/2}
N = graph_data.size(0) # 获得节点的个数
matrix_i = torch.eye(N, dtype=torch.float, device=graph_data.device) # 定义[N, N]的单位矩阵
graph_data += matrix_i # [N, N] ,就是 A+I
degree_matrix = torch.sum(graph_data, dim=1, keepdim=False) # [N],计算度矩阵,塌陷成向量,其实就是将上面的A+I每行相加
degree_matrix = degree_matrix.pow(-1) # 计算度矩阵的逆,若为0,-1次方可能计算结果为无穷大的数
degree_matrix[degree_matrix == float("inf")] = 0. # 让无穷大的数为0
degree_matrix = torch.diag(degree_matrix) # 转换成对角矩阵
return torch.mm(degree_matrix, graph_data) # 返回 \hat A=D^(-1) * A ,这个等价于\hat A = D_{-1/2}*A*D_{-1/2}
# Path: chebnet.py
class ChebNet(nn.Module): # 定义图网络的类
def __init__(self, in_c, hid_c, out_c, K):
"""
:param in_c: int, number of input channels.
:param hid_c: int, number of hidden channels.class
:param out_c: int, number of output channels.
:param K:
"""
super(ChebNet, self).__init__()
self.conv1 = ChebConv(in_c=in_c, out_c=hid_c, K=K) # 第一个图卷积层
self.conv2 = ChebConv(in_c=hid_c, out_c=out_c, K=K) # 第二个图卷积层
self.act = nn.ReLU() # 激活函数
def forward(self, data, device):
graph_data = data["graph"].to(device)[0] # [N, N]
flow_x = data["flow_x"].to(device) # [B, N, H, D] # B是batch size,N是节点数,H是历史数据长度,D是特征维度
B, N = flow_x.size(0), flow_x.size(1)
flow_x = flow_x.view(B, N, -1) # [B, N, H*D] H = 6, D = 1把最后两维缩减到一起了,这个就是把历史时间的特征放一起
output_1 = self.act(self.conv1(flow_x, graph_data))
output_2 = self.act(self.conv2(output_1, graph_data))
return output_2.unsqueeze(2) # 在第2维度,也就是时间维度上做扩张
# Path: gat.py
class GATNet(nn.Module):
def __init__(self, in_c, hid_c, out_c, n_heads):
super(GATNet, self).__init__()
self.subnet = GATSubNet(in_c, hid_c, out_c, n_heads)
def forward(self, data, device):
graph = data["graph"][0].to(device) # [N, N]
flow = data["flow_x"] # [B, N, T, C]
flow = flow.to(device) # 将流量数据送入设备
B, N = flow.size(0), flow.size(1)
flow = flow.view(B, N, -1) # [B, N, T * C]
"""
上面是将这一段的时间的特征数据摊平做为特征,这种做法实际上忽略了时序上的连续性
这种做法可行,但是比较粗糙,当然也可以这么做:
flow[:, :, 0] ... flow[:, :, T-1] 则就有T个[B, N, C]这样的张量,也就是 [B, N, C]*T
每一个张量都用一个SubNet来表示,则一共有T个SubNet,初始化定义 self.subnet = [GATSubNet(...) for _ in range(T)]
然后用nn.ModuleList将SubNet分别拎出来处理,参考多头注意力的处理,同理
"""
prediction = self.subnet(flow, graph).unsqueeze(2) # [B, N, 1, C],这个1加上就表示预测的是未来一个时刻
return prediction
# Path: traffic_prediction.py
import os
import time
import h5py
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import warnings
from torch.utils.data import DataLoader
from traffic_dataset import LoadData
from utils import Evaluation # 三种评价指标以及可视化类
from utils import visualize_result
from gcnnet import GCN
from chebnet import ChebNet
from gat import GATNet
from rich import print
from tqdm import tqdm
# @Time : 2020/8/25
# @Author : LeronQ
# @github : https://github.com/LeronQ
# Pytorch-基于GCN/GAT/Chebnet图神经网络实现的交通流预测(附代码): https://blog.csdn.net/yilulvxing/article/details/110306999
# traffic_prediction.py
# 这个就是上一小节处理数据自己写的的类,封装在traffic_dataset.py文件中
warnings.filterwarnings('ignore')
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # 配置GPU,因为可能有多个GPU,这里用了第0号GPU
# 第一步:准备数据(上一节已经准备好了,这里只是调用而已,链接在最开头)
train_data = LoadData(data_path=["PeMS_04/PeMS04.csv", "PeMS_04/PeMS04.npz"], num_nodes=307, divide_days=[45, 14],
time_interval=5, history_length=6,
train_mode="train")
# num_workers是加载数据(batch)的线程数目
train_loader = DataLoader(
train_data, batch_size=32, shuffle=True, num_workers=4)
test_data = LoadData(data_path=["PeMS_04/PeMS04.csv", "PeMS_04/PeMS04.npz"], num_nodes=307, divide_days=[45, 14],
time_interval=5, history_length=6,
train_mode="test")
test_loader = DataLoader(test_data, batch_size=32,
shuffle=False, num_workers=4)
print("🚀🚀🚀 [italic bold green]数据加载完成!!!")
# SECTION: 第二步:定义模型(这里其实只是加载模型,关于模型的定义在下面单独写了,先假设已经写好)
my_net = GCN(in_c=6, hid_c=6, out_c=1) # 加载GCN模型
# my_net = ChebNet(in_c=6, hid_c=6, out_c=1, K=2) # 加载ChebNet模型
# my_net = GATNet(in_c=6 * 1, hid_c=6, out_c=1, n_heads=2) # 加载GAT模型
print(my_net)
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu") # 定义设备
my_net = my_net.to(device) # 模型送入设备
# 第三步:定义损失函数和优化器
criterion = nn.MSELoss() # 均方损失函数
# 没写学习率,表示使用的是默认的,也就是lr=1e-3
optimizer = optim.Adam(params=my_net.parameters())
# 第四步:训练+测试
# Train model
Epoch = 20 # 训练的次数
my_net.train() # 打开训练模式
for epoch in tqdm(range(Epoch), colour="green", desc="Train"):
| epoch_loss = 0.0 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nickruggeri/hypergraph-message-passing
# Path: src/data/representation/incidence_hypergraph.py
class IncidenceHypergraph(BinaryHypergraph):
"""Representation of a binary hypergraph via its incidence matrix.
The incidence matrix B is of size N x E, with N number of nodes in the hypergraph
and E number of hyperedges. For each hyperedge e, the column of B with index e
contains ones for the nodes belonging to the hyperedge e, zeros for all other nodes.
"""
def __init__(
self,
B: np.ndarray | sparse.spmatrix,
sort_indices: bool = True,
):
"""
Parameters
----------
B: incidence matrix, of shape (N, E).
sort_indices: sort the indices in the internal sparse matrix representation.
"""
self.B = self._check_and_convert_incidence(B, sort_indices)
self.N, self.E = self.B.shape
hye_lengths = self.B.sum(axis=0)
hye_counter = dict(Counter(hye_lengths))
self.hye_count = hye_counter
self.max_hye_size = max(hye_counter.keys())
def get_repr(self) -> TYPE_INCIDENCE:
return self.B
def get_binary_incidence_matrix(self) -> TYPE_INCIDENCE:
return self.B
def sub_hyg(
self,
hyperedge_idx: np.ndarray | None = None,
) -> IncidenceHypergraph:
"""Produce a sub-hypergraph where only the specified hyperedges are present.
Parameters
----------
hyperedge_idx: the list of the hyperedges to keep, specified by their indices.
Returns
-------
The sub-hypergraph instance.
"""
if hyperedge_idx is None:
return self
B = self.B[:, hyperedge_idx]
return IncidenceHypergraph(B)
def __iter__(self) -> Iterable[np.ndarray]:
return incidence_matrix_to_hye(self.B)
def __str__(self):
return f"{self.__class__.__name__} with N={self.N}, E={self.E}"
@classmethod
def load_from_txt(
cls,
hye_file: str | Path,
N: int | None = None,
) -> IncidenceHypergraph:
"""Load a IncidenceHypergraph instance from a txt file, containing the list of
hyperedges.
Parameters
----------
hye_file: text file containing the hyperedges.
N: number of nodes in the hypergraph.
Returns
-------
An instance of IncidenceHypergraph.
"""
with open(hye_file, "r") as file:
hye = (map(int, line.split(" ")) for line in file.readlines())
return cls.load_from_hye_list(hye, N)
@classmethod
def load_from_hye_list(
cls, hye_list: list[Iterable[int]], N: int | None
) -> IncidenceHypergraph:
hye = list(set(tuple(sorted(set(hyperedge))) for hyperedge in hye_list))
shape = (N, len(hye)) if N else None
B = hye_list_to_binary_incidence(hye, shape=shape)
return IncidenceHypergraph(B)
@staticmethod
def _check_and_convert_incidence(
incidence: np.ndarray | sparse.spmatrix, sort_indices: bool
) -> TYPE_INCIDENCE:
incidence = TYPE_INCIDENCE(incidence)
# When converting to other sparse types, repeated entries are summed. In such
# case, there could be entries different from 1. Set them to 1.
# Similarly, if a weighted matrix is provided as configurations, flatten all non-zero
# entries to 1.
if not np.all(incidence.data == 1):
warnings.warn(
"The configurations matrix contains elements different from 0 and 1. "
"All non-zero elements will be converted to 1."
)
incidence = incidence > 0
if not np.all(incidence.data == 1):
raise ValueError("The incidence matrix can only contain 1 and 0 values.")
if sort_indices:
incidence.sort_indices()
return incidence
# Path: src/model/kappa.py
def compute_C_prime(max_hye_size: int) -> float:
r"""Compute the :math::`C'` constant defined as
.. math::
C' := \sum_{d=2}^D \binom{N-2}{d-2} / \kappa_d
where D is the maximum hyperedge size, N the number of nodes in the hypergraph, and
:math::`\kappa_d` the normalizing constant.
"""
if max_hye_size in C_PRIME_VALS:
return C_PRIME_VALS[max_hye_size]
hye_dims = np.arange(2, max_hye_size + 1)
c_prime = 2 * (1 / (hye_dims * (hye_dims - 1))).sum()
C_PRIME_VALS[max_hye_size] = c_prime
return c_prime
# Path: src/model/kappa.py
def compute_C_third(max_hye_size: int) -> float:
r"""Compute the :math::`C'` constant defined as
.. math::
C''' := \sum_{d=2}^D \frac{1-d}{\kappa_d} \binom{N-2}{d-2} /
where D is the maximum hyperedge size, N the number of nodes in the hypergraph, and
:math::`\kappa_d` the normalizing constant.
"""
hye_dims = np.arange(2, max_hye_size + 1)
return -2 * (1 / hye_dims).sum()
# Path: src/model/numerical.py
def hyperedge_pi(hye_comm_counts: list[int], p: np.ndarray) -> float:
r"""Compute the value of :math::`\pi_e` for a hyperedge :math::`e`.
The value is defined as:
.. math::
\pi_e := \sum_{i < j \in e} \p_{t_i t_j}
where p is the affinity matrix and :math::`t_i` the community assignment of node i.
Parameters
----------
hye_comm_counts: a list of length K, where K is the number of communities. Every
entry a of the list contains the number of nodes in the hyperedge belonging to
community a.
p: symmetric affinity matrix of probabilities in [0, 1].
Returns
-------
The value of :math::`\pi_e`.
"""
prob = 0
for a, b in itertools.combinations(range(len(hye_comm_counts)), 2):
prob += p[a, b] * hye_comm_counts[a] * hye_comm_counts[b]
for a, count in enumerate(hye_comm_counts):
prob += p[a, a] * count * (count - 1) / 2
return prob
# Path: src/model/numerical.py
def sparse_reduce_lse(
*args: sparse.csc_array | sparse.csr_array,
) -> sparse.csc_array | sparse.csr_array:
"""Perform the elementwise log-sum-exp operation on a sequence of sparse arrays.
The arrays are assumed to have all the same pattern of non-zero entries, and to have
sorted indices.
"""
data = np.stack([mat.data for mat in args], axis=1)
lse_vals = special.logsumexp(data, axis=1)
lse_mat = args[0].copy()
lse_mat.data = lse_vals
return lse_mat
# Path: src/model/dynamic_updates.py
def compute_psi_dynamic_programming(
hypergraph: IncidenceHypergraph,
model: "HypergraphBlockModel",
mask: np.ndarray | None = None,
) -> list[sparse.coo_array]:
"""Compute the psi quantities via dynamic programming.
"Message Passing on Hypergraphs: Detectability, Phase Transitions, and Higher-Order
Information", Ruggeri et al.
Parameters
----------
hypergraph: configurations hypergraph.
model: configurations stochastic block model.
mask: a boolean mask to compute the psi values only for specific (hyperedge, node)
pairs.
The mask needs to be a flattened boolean array with the same length as
hypergraphs.get_binary_incidence_matrix().data
Returns
-------
The psi values, results of the dynamic programming recursions.
"""
# The incidence matrix needs to be in CSC sparse format, the rest of the code
# doesn't work otherwise.
incidence: sparse.csc_array = hypergraph.get_binary_incidence_matrix()
assert isinstance(incidence, sparse.csc_array), "Incidence matrix is not CSC."
# For coherence with the returned COO array at the end, the incidence matrix needs
# to be in canonical sorted format. Otherwise, calling all_psi.tocsc() might result
# in a matrix where non-zero indices do not correspond.
assert incidence.has_sorted_indices, (
"The incidence matrix doesn't have a canonical sorted format. "
"To fix this, call the sort_indices() method of scipy CSC matrices.",
)
if mask is not None:
assert mask.shape == (len(incidence.data),), (
f"The mask has shape {mask.shape}, "
f"different from the incidence matrix data {incidence.data.shape}"
)
log_node_to_hye = [x.tocsc() for x in model.log_node_to_hye]
K = model.K
def hyperedge_psi_(hye: int):
nodes, psi = hyperedge_psi(
incidence,
hye,
model.p,
log_node_to_hye,
eta_tilde=False,
mask=mask,
)
return hye, nodes, psi
res = Parallel(n_jobs=N_JOBS)(
delayed(hyperedge_psi_)(hye) for hye in range(hypergraph.E)
)
nonzeros = mask.sum() if mask is not None else incidence.nnz
hye_idx = np.zeros(nonzeros)
node_idx = np.zeros(nonzeros)
psi_vals = np.zeros((nonzeros, K))
idx = itertools.count()
for hye, nodes, psi in res:
for i, node in enumerate(nodes):
idx_ = next(idx)
hye_idx[idx_] = hye
node_idx[idx_] = node
psi_vals[idx_, :] = psi[i, :]
all_psi = [
sparse.coo_array(
(psi_vals[:, a], (node_idx, hye_idx)),
shape=(hypergraph.N, hypergraph.E),
)
for a in range(K)
]
return all_psi
# Path: src/model/dynamic_updates.py
def compute_psi_tilde_dynamic_programming(
hypergraph: IncidenceHypergraph,
model: "HypergraphBlockModel",
) -> np.ndarray:
"""Compute the psi quantities via dynamic programming.
"Message Passing on Hypergraphs: Detectability, Phase Transitions, and Higher-Order
Information", Ruggeri et al.
Parameters
----------
hypergraph: configurations hypergraph.
model: configurations stochastic block model.
Returns
-------
The psi tilde values, results of the dynamic programming recursions.
"""
# Here we are assuming the incidence to be a CSC sparse array, the rest of the code
# doesn't work otherwise.
incidence: sparse.csc_array = hypergraph.get_binary_incidence_matrix()
assert isinstance(
incidence, sparse.csc_array
), "Incidence matrix is not is CSC sparse format."
log_node_to_hye = [x.tocsc() for x in model.log_node_to_hye]
def hyperedge_psi_(hye):
psi_tilde = hyperedge_psi(
incidence, hye, model.p, log_node_to_hye, eta_tilde=True
)
return hye, psi_tilde
res = Parallel(n_jobs=N_JOBS)(
delayed(hyperedge_psi_)(hye) for hye in range(hypergraph.E)
)
all_psi = np.zeros(hypergraph.E)
for hye, psi_val in res:
all_psi[hye] = psi_val
return all_psi
# Path: src/model/hypergraph_block_model.py
import logging
import numpy as np
from collections import Counter
from typing import Iterable
from scipy import sparse, special
from src.data.representation.incidence_hypergraph import IncidenceHypergraph
from src.model.kappa import compute_C_prime, compute_C_third
from src.model.numerical import hyperedge_pi, sparse_reduce_lse
from .dynamic_updates import (
compute_psi_dynamic_programming,
compute_psi_tilde_dynamic_programming,
)
absolute change in log-marginals below the mp_thresh, the message passing
procedure is stopped.
seed: random seed.
warm_start: whether to re-initialize the messages and marginal beliefs.
"""
logging.info("\tMessage passing...")
if seed is not None:
self.rng = np.random.default_rng(seed)
self._check_hypergraph_vs_model_params(hypergraph)
all_messages_init = (
self.log_hye_to_node is not None
and self.log_node_to_hye is not None
and self.log_marginals is not None
and self.external_field is not None
)
if not warm_start or not all_messages_init:
alpha = 10.0 * self.K if dirichlet_alpha is None else dirichlet_alpha
self._init_message_passing(hypergraph, dirichlet_alpha=alpha)
logging.debug(
f"\t\tInitialized hye to node:\n{self.log_hye_to_node[0].data[:5]}"
)
logging.debug(
f"\t\tInitialized node to hye:\n{self.log_node_to_hye[0].data[:5]}"
)
logging.debug(f"\t\tInitialized marginals:\n{self.log_marginals[:5]}")
logging.debug(f"\t\tInitialized external field:\n{self.external_field}")
self.log_marginal_diff.append(list())
patience_count = 0
for i in range(mp_iter):
old_log_marginals = self.log_marginals.copy()
self._parallel_message_passing_step(hypergraph, dropout)
self.log_marginal_diff[-1].append(
np.abs(old_log_marginals - self.log_marginals).sum()
)
logging.info(
f"\t\tMP step {i} - difference in log-marginals from previous iter: "
f"{self.log_marginal_diff[-1][-1]}"
)
if self.log_marginal_diff[-1][-1] <= mp_thresh:
patience_count += 1
else:
patience_count = 0
if patience_count == patience:
logging.info(
"\tMessage passing threshold passed. Message passing terminated."
)
break
def _parallel_message_passing_step(
self,
hypergraph: IncidenceHypergraph,
dropout: float = 0.99,
) -> None:
"""Perform one step of message passing, updating the messages from nodes to
factors, the messages from factors to nodes, the marginal probabilities and
external field."""
inc = hypergraph.get_binary_incidence_matrix()
# Update node to hye.
new_node_to_hye = [None] * self.K
for assignment in range(self.K):
col_sum = self.log_hye_to_node[assignment].sum(axis=1)
assert col_sum.shape == (self.N,)
col_sum += np.log(self.n[assignment]) - self.external_field[assignment]
col_sum = col_sum.reshape((self.N, 1))
new_node_to_hye[assignment] = (
TYPE_HYE_TO_NODE(inc * col_sum) - self.log_hye_to_node[assignment]
)
norm = sparse_reduce_lse(*new_node_to_hye)
for assignment in range(self.K):
new_node_to_hye[assignment].data -= norm.data
new_node_to_hye[assignment].data = np.clip(
new_node_to_hye[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX
)
# TODO dropout could be made more efficient here. Do it or not?
if dropout > 0:
non_dropout_mask = (
self.rng.random(len(self.log_node_to_hye[0].data)) >= dropout
)
for assignment in range(self.K):
self.log_node_to_hye[assignment].data[
non_dropout_mask
] = new_node_to_hye[assignment].data[non_dropout_mask]
else:
for assignment in range(self.K):
self.log_node_to_hye[assignment].data = new_node_to_hye[assignment].data
logging.debug(f"\t\tUpdated node to hye:\n{self.log_node_to_hye[0].data[:5]}")
# Update hye to node.
if dropout > 0:
non_dropout_mask = (
self.rng.random(len(self.log_hye_to_node[0].data)) >= dropout
)
else:
non_dropout_mask = None
new_hye_to_node = [
TYPE_HYE_TO_NODE(x)
for x in compute_psi_dynamic_programming(
hypergraph=hypergraph,
model=self,
mask=non_dropout_mask,
)
]
norm = sparse_reduce_lse(*new_hye_to_node)
for assignment in range(self.K):
new_hye_to_node[assignment].data -= norm.data
new_hye_to_node[assignment].data = np.clip(
new_hye_to_node[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX
)
for assignment in range(self.K):
| self.log_hye_to_node[assignment].data[non_dropout_mask] = new_hye_to_node[ |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: oVo-HxBots/URLUploadBot
# Path: Uploader/script.py
class Translation(object):
START_TEXT = """
Hi {}
I am Powerful Url Uploader Bot
"""
HELP_TEXT = """
# Send me the Google Drive | ytdl | direct links.
# Select the desired option.
# Then be relaxed your file will be uploaded soon..
"""
# give credit to developer
ABOUT_TEXT = """
<b>♻️ My Name</b> : Url Uploader Bot
<b>🌀 Channel</b> : <a href="https://t.me/TMWAD">@TMWAD</a>
<b>🌺 Heroku</b> : <a href="https://heroku.com/">Heroku</a>
<b>📑 Language :</b> <a href="https://www.python.org/">Python 3.10.5</a>
<b>🇵🇲 Framework :</b> <a href="https://docs.pyrogram.org/">Pyrogram 2.0.30</a>
<b>👲 Developer :</b> <a href="https://t.me/kinu6">@kinu6</a>
"""
PROGRESS = """
🔰 Speed : {3}/s\n\n
🌀 Done : {1}\n\n
🎥 Tᴏᴛᴀʟ sɪᴢᴇ : {2}\n\n
⏳ Tɪᴍᴇ ʟᴇғᴛ : {4}\n\n
"""
ID_TEXT = """
🆔 Your Telegram ID 𝐢𝐬 :- <code>{}</code>
"""
INFO_TEXT = """
🤹 First Name : <b>{}</b>
🚴♂️ Second Name : <b>{}</b>
🧑🏻🎓 Username : <b>@{}</b>
🆔 Telegram Id : <code>{}</code>
📇 Profile Link : <b>{}</b>
📡 Dc : <b>{}</b>
📑 Language : <b>{}</b>
👲 Status : <b>{}</b>
"""
START_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('❓ Help', callback_data='help'),
InlineKeyboardButton('🦊 About', callback_data='about')
], [
InlineKeyboardButton('📛 Close', callback_data='close')
]]
)
HELP_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏠 Home', callback_data='home'),
InlineKeyboardButton('🦊 About', callback_data='about')
], [
InlineKeyboardButton('📛 Close', callback_data='close')
]]
)
ABOUT_BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('🏠 Home', callback_data='home'),
InlineKeyboardButton('❓ Help', callback_data='help')
], [
InlineKeyboardButton('📛 Close', callback_data='close')
]]
)
BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('📛 Close', callback_data='close')
]]
)
FORMAT_SELECTION = "Now Select the desired formats"
SET_CUSTOM_USERNAME_PASSWORD = """"""
DOWNLOAD_START = "Trying to Download ⌛\n\n <i>{} </i>"
UPLOAD_START = "<i>{} </i>\n\n📤 Uploading Please Wait "
RCHD_TG_API_LIMIT = "Downloaded in {} seconds.\nDetected File Size: {}\nSorry. But, I cannot upload files greater than 2GB due to Telegram API limitations."
AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS = "Dᴏᴡɴʟᴏᴀᴅᴇᴅ ɪɴ {} sᴇᴄᴏɴᴅs.\n\nTʜᴀɴᴋs Fᴏʀ Usɪɴɢ Mᴇ\n\nUᴘʟᴏᴀᴅᴇᴅ ɪɴ {} sᴇᴄᴏɴᴅs"
FF_MPEG_DEL_ETED_CUSTOM_MEDIA = "✅ Media cleared succesfully."
CUSTOM_CAPTION_UL_FILE = " "
NO_VOID_FORMAT_FOUND = "ERROR... <code>{}</code>"
SLOW_URL_DECED = "Gosh that seems to be a very slow URL. Since you were screwing my home, I am in no mood to download this file. Meanwhile, why don't you try this:==> https://shrtz.me/PtsVnf6 and get me a fast URL so that I can upload to Telegram, without me slowing down for other users."
# Path: Uploader/functions/ran_text.py
def random_char(y):
return ''.join(random.choice(string.ascii_letters) for _ in range(y))
# Path: Uploader/functions/display_progress.py
async def progress_for_pyrogram(
current,
total,
ud_type,
message,
start
):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
# if round(current / total * 100, 0) % 5 == 0:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = TimeFormatter(milliseconds=elapsed_time)
estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)
progress = "[{0}{1}] \nP: {2}%\n".format(
''.join(["◾" for _ in range(math.floor(percentage / 5))]),
''.join(["◽" for _ in range(20 - math.floor(percentage / 5))]),
round(percentage, 2),
)
tmp = progress + "{0} of {1}\n\nSpeed: {2}/s\n\nETA: {3}\n\n".format(
humanbytes(current),
humanbytes(total),
humanbytes(speed),
# elapsed_time if elapsed_time != '' else "0 s",
estimated_total_time if estimated_total_time != '' else "0 s"
)
try:
await message.edit(text=f"{ud_type}\n {tmp}")
except Exception as e:
logger.info(f"Error {e}")
return
# Path: Uploader/functions/display_progress.py
def humanbytes(size):
# https://stackoverflow.com/a/49361727/4723940
# 2**10 = 1024
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}
while size > power:
size /= power
n += 1
return f"{str(round(size, 2))} {Dic_powerN[n]}B"
# Path: Uploader/button.py
import os
import json
import time
import shutil
import asyncio
import logging
import subprocess
from pyrogram.types import *
from datetime import datetime
from Uploader.utitles import *
from Uploader.config import Config
from sample_config import Config
from Uploader.script import Translation
from Uploader.functions.ran_text import random_char
from Uploader.functions.display_progress import progress_for_pyrogram, humanbytes
logger.info(t_response)
ad_string_to_replace = "please report this issue on https://github.com/kalanakt/All-Url-Uploader/issues"
if e_response and ad_string_to_replace in e_response:
error_message = e_response.replace(ad_string_to_replace, "")
await update.message.edit_caption(
text=error_message
)
return False
if t_response:
logger.info(t_response)
try:
os.remove(save_ytdl_json_path)
except FileNotFoundError as exc:
pass
end_one = datetime.now()
time_taken_for_download = (end_one - start).seconds
file_size = Config.TG_MAX_FILE_SIZE + 1
try:
file_size = os.stat(download_directory).st_size
except FileNotFoundError as exc:
download_directory = os.path.splitext(
download_directory)[0] + "." + "mkv"
# https://stackoverflow.com/a/678242/4723940
file_size = os.stat(download_directory).st_size
download_location = f"{Config.DOWNLOAD_LOCATION}/{update.from_user.id}.jpg"
thumb = download_location if os.path.isfile(
download_location) else None
if ((file_size > Config.TG_MAX_FILE_SIZE)):
await update.message.edit_caption(
caption=Translation.RCHD_TG_API_LIMIT.format(
time_taken_for_download, humanbytes(file_size))
)
else:
await update.message.edit_caption(
caption=Translation.UPLOAD_START.format(custom_file_name)
)
start_time = time.time()
if tg_send_type == "video":
width, height, duration = await Mdata01(download_directory)
await update.message.reply_video(
# chat_id=update.message.chat.id,
video=download_directory,
caption=description,
duration=duration,
width=width,
height=height,
supports_streaming=True,
thumb=thumb,
# reply_to_message_id=update.id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "audio":
duration = await Mdata03(download_directory)
await update.message.reply_audio(
# chat_id=update.message.chat.id,
audio=download_directory,
caption=description,
duration=duration,
thumb=thumb,
# reply_to_message_id=update.id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
elif tg_send_type == "vm":
width, duration = await Mdata02(download_directory)
await update.message.reply_video_note(
# chat_id=update.message.chat.id,
video_note=download_directory,
duration=duration,
length=width,
thumb=thumb,
# reply_to_message_id=update.id,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
else:
await update.message.reply_document(
# chat_id=update.message.chat.id,
document=download_directory,
caption=description,
# parse_mode=enums.ParseMode.HTML,
# reply_to_message_id=update.id,
thumb=thumb,
progress=progress_for_pyrogram,
progress_args=(
Translation.UPLOAD_START,
update.message,
start_time
)
)
end_two = datetime.now()
time_taken_for_upload = (end_two - end_one).seconds
try:
shutil.rmtree(tmp_directory_for_each_user)
except Exception:
pass
await update.message.edit_caption(
caption=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(
| time_taken_for_download, time_taken_for_upload) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: kramerlab/PeerLearning
# Path: dqn_peer.py
class DQNPeer(make_peer_class(DQN)):
"""
A DQN version to be used with peer learning. Therefore, it features
a critic function
"""
def critic(self, observations, actions):
q_values = self.q_net(observations).reshape(len(actions), -1, 1)
tmp = q_values[range(len(actions)), actions, :]
return tmp, tmp # SAC critic outputs multiple values, so this need
# to do the same
def get_action(self, *args, **kwargs):
action, _ = super().get_action(*args, **kwargs)
return action.reshape(-1), _
# Path: peer.py
class PeerGroup:
""" A group of peers who train together. """
def __init__(self, peers, use_agent_values=False, init_agent_values=200.,
lr=0.95, switch_ratio=0, use_advantage=False,
max_peer_epochs=1_000_000_000):
"""
:param peers: An iterable of peer agents
:param lr: The learning rate for trust and agent values
:param switch_ratio: switch_ratio == 0 means no switching
:param use_advantage: use advantage instead of value for AV updates
"""
self.peers = peers
self.lr = lr
self.switch_ratio = switch_ratio
self.active_peer = None # index of currently learning peer
self.solo_epoch = False
self.use_advantage = use_advantage
self.max_peer_epochs = max_peer_epochs
if use_agent_values:
self.agent_values = np.full(len(peers), init_agent_values,
dtype=np.float32)
key = "agent_values"
for peer in peers:
peer.n_peers = len(peers)
peer.group = self
# setup agent values
if use_agent_values:
peer.peer_values[key] = self.agent_values # noqa (Eq. 6)
peer.peer_value_functions[key] = self._update_agent_values
def _update_agent_values(self, batch_size=10):
""" Updates the agent values with samples from the peers' buffers"""
targets = np.zeros_like(self.peers, dtype=np.float32)
counts = np.zeros_like(self.peers, dtype=np.float32)
for peer in self.peers:
bs = batch_size // len(self.peers)
# reward, action, peer, new_obs, old_obs
if peer.buffer is not None:
batch = peer.buffer.sample(bs)
if batch is None: # buffer not sufficiently full
return
obs = np.array([b[3] for b in batch]).reshape(bs, -1)
v = peer.value(obs)
if self.use_advantage:
# previous observations
prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)
prev_v = peer.value(prev_obs)
else:
prev_v = np.zeros_like(v) # no advantage (see Eq. 5)
for i in range(len(batch)): # Eq. 8
target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]
counts[batch[i][2]] += 1
targets[batch[i][2]] += target
# ensure counts are >= 1, don't change these values
targets[counts == 0] = self.agent_values[counts == 0]
counts[counts == 0] = 1
targets /= counts
self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7
def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):
""" The outer peer learning routine. """
assert len(callbacks) == len(self.peers)
# more solo epochs
boost_single = 0 < self.switch_ratio < 1
if boost_single:
self.switch_ratio = 1 / self.switch_ratio
self.solo_epoch = False
peer_epochs = 0
for i in range(n_epochs):
# don't do peer learning forever
if peer_epochs < self.max_peer_epochs:
# ratio of 0 never performs a solo episode
if (i % (1 + self.switch_ratio) == 1) ^ boost_single:
self.solo_epoch = True
else:
peer_epochs += 1
else: # budget spent
self.solo_epoch = True
for p, peer, callback in zip(it.count(), self.peers, callbacks):
self.active_peer = p
peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,
callback=callback, tb_log_name=f"Peer{p}",
reset_num_timesteps=False,
log_interval=None, **kwargs)
# update epoch for temperature decay
peer.epoch += 1
self.active_peer = None
def __len__(self):
return len(self.peers)
# Path: peer.py
def make_peer_class(cls: Type[OffPolicyAlgorithm]):
""" Creates a mixin with the corresponding algorithm class.
:param cls: The learning algorithm (needs to have a callable critic).
:return: The mixed in peer agent class.
"""
class Peer(cls, ABC):
""" Abstract Peer class
needs to be mixed with a suitable algorithm. """
def __init__(self, temperature, temp_decay, algo_args, env,
use_trust=False, use_critic=False, init_trust_values=200,
buffer_size=1000, follow_steps=10, seed=None,
use_trust_buffer=True, solo_training=False,
peers_sample_with_noise=False,
sample_random_actions=False, sample_from_suggestions=True,
epsilon=0.0, env_args=None, only_follow_peers=False):
if env_args is None:
env_args = {}
super(Peer, self).__init__(**algo_args,
env=make_env(env, **env_args),
seed=seed)
# create noise matrix on the correct device
if hasattr(self.actor, "reset_noise"):
self.actor.reset_noise(self.env.num_envs)
self.solo_training = solo_training
self.init_values = dict()
# store all peer values, e.g., trust and agent values in a dict
self.peer_values = dict()
# store corresponding functions as well
self.peer_value_functions = dict()
self.buffer = SuggestionBuffer(buffer_size)
self.followed_peer = None
self.__n_peers = None
self.group = None
self.epoch = 0
if sample_random_actions:
epsilon = 1.0
if not solo_training:
# all peers suggest without noise
self.peers_sample_with_noise = peers_sample_with_noise
# actions are sampled instead of taken greedily
self.sample_actions = sample_from_suggestions
self.epsilon = epsilon
self.use_critic = use_critic
if use_trust:
self.trust_values = np.array([])
self.init_values["trust"] = init_trust_values
self.peer_value_functions["trust"] = self._update_trust
self.use_buffer_for_trust = use_trust_buffer
# sampling parameters
self.temperature = temperature
self.temp_decay = temp_decay
self.follow_steps = follow_steps
self.steps_followed = 0
self.only_follow_peers = only_follow_peers
@property
def n_peers(self):
return self.__n_peers
@n_peers.setter
def n_peers(self, n_peers):
self.__n_peers = n_peers
# Also reset the trust values
if "trust" in self.init_values.keys():
self.trust_values = np.full(self.__n_peers,
self.init_values["trust"],
dtype=np.float32)
self.peer_values["trust"] = self.trust_values
def critique(self, observations, actions) -> np.array:
""" Evaluates the actions with the critic. """
with torch.no_grad():
a = torch.as_tensor(actions, device=self.device)
o = torch.as_tensor(observations, device=self.device)
# Compute the next Q values: min over all critic targets
q_values = torch.cat(self.critic(o, a), dim=1) # noqa
q_values, _ = torch.min(q_values, dim=1, keepdim=True)
return q_values.cpu().numpy()
def get_action(self, obs, deterministic=False):
""" The core function of peer learning acquires the suggested
actions of the peers and chooses one based on the settings. """
# follow peer for defined number of steps
followed_steps = self.steps_followed
self.steps_followed += 1
self.steps_followed %= self.follow_steps
if 0 < followed_steps:
peer = self.group.peers[self.followed_peer]
det = (peer != self and not self.peers_sample_with_noise) or \
deterministic
action, _ = peer.policy.predict(obs, deterministic=det)
return action, None
# get actions
actions = []
for peer in self.group.peers:
# self always uses exploration, the suggestions of the other
# peers only do if the critic method isn't used.
det = (peer != self and not self.peers_sample_with_noise) or \
deterministic
action, _ = peer.policy.predict(obs, deterministic=det)
actions.append(action)
actions = np.asarray(actions).squeeze(1)
# critic (Eq. 3)
if self.use_critic:
observations = np.tile(obs, (self.n_peers, 1))
q_values = self.critique(observations, actions).reshape(-1)
self.peer_values['critic'] = q_values # part of Eq. 9
# calculate peer values, e.g., trust and agent values
values = np.zeros(self.n_peers)
for key in self.peer_values.keys():
# part of Eq. 9 incl. Footnote 7
values += self.__normalize(self.peer_values[key])
if self.sample_actions:
# sample action from probability distribution (Eq. 2)
temp = self.temperature * np.exp(-self.temp_decay * self.epoch)
p = np.exp(values / temp)
p /= np.sum(p)
self.followed_peer = np.random.choice(self.n_peers, p=p)
elif self.only_follow_peers:
p = np.full(self.n_peers, 1 / (self.n_peers - 1))
p[self.group.peers.index(self)] = 0
self.followed_peer = np.random.choice(self.n_peers, p=p)
else:
# act (epsilon) greedily
if np.random.random(1) >= self.epsilon:
self.followed_peer = np.argmax(values)
else:
self.followed_peer = np.random.choice(self.n_peers)
action = actions[self.followed_peer].reshape(1, -1)
return action, None
@staticmethod
def __normalize(values):
""" Normalize the values based on their absolute maximum. """
return values / np.max(np.abs(values))
def value(self, observations) -> np.ndarray:
""" Calculates the value of the observations. """
actions, _ = self.policy.predict(observations, False)
return self.critique(observations, actions)
def _update_trust(self, batch_size=10):
""" Updates the trust values with samples from the buffer.
(Eq. 5 and 8)
"""
if self.use_buffer_for_trust:
batch = self.buffer.sample(batch_size)
else:
batch = self.buffer.latest()
batch_size = 1
if batch is None: # buffer not sufficiently full
return
# next observations
obs = np.array([b[3] for b in batch]).reshape(batch_size, -1)
v = self.value(obs)
if self.group.use_advantage:
# previous observations
prev_obs = np.array([b[4] for b in batch]).reshape(batch_size,
-1)
prev_v = self.value(prev_obs)
else:
prev_v = np.zeros_like(v) # no comparison to own act (Eq. 5)
targets = np.zeros(self.n_peers)
counts = np.zeros(self.n_peers)
for i in range(batch_size):
target = (batch[i][0] + self.gamma * v[i]) - prev_v[i] # Eq. 8
counts[batch[i][2]] += 1
targets[batch[i][2]] += target
# ensure counts are >= 1, don't change these values
targets[counts == 0] = self.trust_values[counts == 0]
counts[counts == 0] = 1
targets /= counts
# Eq. 4
self.trust_values += self.group.lr * (targets - self.trust_values)
def _on_step(self):
""" Adds updates of the peer values, e.g., trust or agent
values. """
super(Peer, self)._on_step() # noqa
if not self.group.solo_epoch:
# update values, e.g., trust and agent values after ever step
for key in self.peer_value_functions.keys():
self.peer_value_functions[key]()
def _store_transition(self, replay_buffer, buffer_action, new_obs,
reward, dones, infos):
""" Adds suggestion buffer handling. """
# get previous observations
old_obs = self._last_obs
super(Peer, self)._store_transition(replay_buffer, # noqa
buffer_action, new_obs,
reward, dones, infos)
if not self.group.solo_epoch:
# store transition in suggestion buffer as well
self.buffer.add(reward, buffer_action, self.followed_peer,
new_obs, old_obs)
def _predict_train(self, observation, state=None,
episode_start=None, deterministic=False):
""" The action selection during training involves the peers. """
if deterministic:
return self.policy.predict(observation, state=state,
episode_start=episode_start,
deterministic=deterministic)
else:
return self.get_action(observation)
def learn(self, solo_episode=False, **kwargs):
""" Adds action selection with help of peers. """
predict = self.predict # safe for later
# use peer suggestions only when wanted
if not (self.solo_training or solo_episode):
self.predict = self._predict_train
else:
self.followed_peer = self.group.peers.index(self)
result = super(Peer, self).learn(**kwargs)
self.predict = predict # noqa
return result
def _excluded_save_params(self):
""" Excludes attributes that are functions. Otherwise, the save
method fails. """
ex_list = super(Peer, self)._excluded_save_params()
ex_list.extend(["peer_value_functions", "peer_values",
"group", "predict"])
return ex_list
return Peer
# Path: callbacks.py
class PeerEvalCallback(EvalCallback):
"""
Callback to track collective measurements about peers.
.. warning::
When using multiple environments, each call to ``env.step()``
will effectively correspond to ``n_envs`` steps.
To account for that, you can use
``eval_freq = max(eval_freq // n_envs, 1)``
:param peer_group: The group of peers
:param eval_env: The environment used for initialization
:param n_eval_episodes: The number of episodes to test the agent
:param eval_freq: Evaluate the agent every ``eval_freq`` call of the
callback.
:param log_path: Path to a folder where the evaluations
(``evaluations.npz``) will be saved. It will be updated at each
evaluation.
:param deterministic: Whether the evaluation should
use a stochastic or deterministic actions.
:param render: Whether to render or not the environment during evaluation
:param verbose:
:param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has
not been wrapped with a Monitor wrapper)
"""
def __init__(
self,
peer_group: PeerGroup,
eval_envs: List[Union[gym.Env, VecEnv]],
n_samples=100,
**kwargs
):
self.peer_group = peer_group
self.eval_envs = eval_envs
self.n_samples = n_samples
self.last_logged_matrix = None
self.follow_matrix = np.zeros((len(peer_group), len(peer_group)))
self.start_time = time.time()
super().__init__(**kwargs)
def _on_step(self) -> bool:
self.accumulate_followed_peers() # needs to be done at every step
# log time for debugging etc.
self.logger.record("time/time_elapsed",
time.time() - self.start_time,
exclude="tensorboard")
super()._on_step()
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
if 'agent_values' in self.peer_group.__dict__:
self.track_agent_values()
if 'trust_values' in self.peer_group.peers[0].__dict__:
self.track_trust_values()
self.track_followed_agent(self.peer_group.active_peer)
peer = self.peer_group.active_peer
eval_values = {
f"Peer{peer}_0/eval/mean_reward": self.last_mean_reward,
}
if peer == len(self.peer_group) - 1:
eval_values["global_step"] = self.n_calls
wandb.log(eval_values, commit=True)
else:
wandb.log(eval_values, commit=False)
return True
def track_agent_values(self):
n_agents = len(self.peer_group.peers)
for i in range(n_agents):
agent_value = self.peer_group.agent_values[i]
wandb.log({'Peer{}_0/eval/agent_value'.format(i): agent_value},
commit=False)
return True
def track_trust_values(self):
peer = self.peer_group.active_peer
trust_i = self.peer_group.peers[peer].trust_values
for j, el in np.ndenumerate(trust_i):
wandb.log({'Peer{}_0/eval/trust_{}'.format(peer, j[0]): el},
commit=False)
return True
def accumulate_followed_peers(self):
peer = self.peer_group.active_peer
followed_peer = self.peer_group.peers[peer].followed_peer
if followed_peer is not None:
self.follow_matrix[peer, followed_peer] += 1
def track_followed_agent(self, active_peer):
if self.last_logged_matrix is None:
diff = self.follow_matrix
else:
diff = self.follow_matrix - self.last_logged_matrix
for (followed_peer,), count in np.ndenumerate(
self.follow_matrix[active_peer]):
wandb.log({'Peer{}_0/eval/follow_count{}'.format(
active_peer, followed_peer): count}, commit=False)
# also log difference
wandb.log({'Peer{}_0/eval/follow_count_{}diff'.format(
active_peer, followed_peer): diff[active_peer, followed_peer]},
commit=False)
self.last_logged_matrix = np.copy(self.follow_matrix)
def commit_global_step(self, timesteps):
if self.peer_group.active_peer == len(self.peer_group) - 1:
eval_values = {"global_step": self.n_calls + self.eval_freq}
wandb.log(eval_values, commit=True)
self.n_calls += timesteps
# Path: utils.py
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# Path: utils.py
def add_default_values_to_parser(parser):
parser.add_argument("--job_id", type=str,
default=wandb.util.generate_id())
parser.add_argument("--agent-count", type=int, help="Number of agents.",
default=4)
parser.add_argument("--device", type=str, default="auto",
choices=["cpu", "cuda", "auto"],
help="Device to use, either 'cpu', 'cuda' for GPU or "
"'auto'.")
parser.add_argument("--env", type=str, default="HalfCheetahBulletEnv-v0",
help="OpenAI Gym environment to perform algorithm on.")
parser.add_argument("--env_args", action=StoreDictKeyPair,
nargs='*', metavar="KEY=VAL", default={})
parser.add_argument("--seed", type=int, default=1,
help="Random seed in [0, 2 ** 32)")
parser.add_argument("--wandb", type=str, default='offline',
choices=["online", "offline", "disabled"])
parser.add_argument("--discrete-actions", type=str2bool, nargs="?",
const=False, default=False)
parser.add_argument("--save-dir", type=Path,
default=Path.cwd().joinpath("Experiments"))
# Agents
agent_parser = parser.add_argument_group("Agent")
agent_parser.add_argument("--mix-agents", type=str, nargs='*',
default=["SAC"])
agent_parser.add_argument("--net-arch", type=int, nargs='*',
action='append')
agent_parser.add_argument("--load_paths", type=str, nargs='*',
default=[])
agent_parser.add_argument("--agents_to_store", type=int, nargs='*',
default=[])
return parser
# Path: utils.py
def log_reward_avg_in_wandb(callbacks):
results = []
for callback in callbacks:
eval_callback = callback[-1]
result = eval_callback.evaluations_results
results.append(np.mean(result))
wandb.log({'reward_avg': np.mean(results)})
# Path: utils.py
def add_default_values_to_train_parser(training_parser):
training_parser.add_argument("--steps", type=int, default=3_000_000,
help="Total number of time steps to train "
"the agent.")
training_parser.add_argument("--eval-interval", type=int,
default=10_000,
help="Interval in time steps between "
"evaluations.")
training_parser.add_argument("--n-eval-episodes", type=int,
default=10,
help="Number of episodes for each "
"evaluation.")
training_parser.add_argument("--buffer-size", type=int,
default=1_000_000)
training_parser.add_argument("--buffer-start-size", type=int,
default=1_000,
help="Minimum replay buffer size before "
"performing gradient updates.")
training_parser.add_argument("--batch-size", type=int,
default=100,
help="Minibatch size")
training_parser.add_argument("--min-epoch-length", type=int,
default=10_000,
help="Minimal length of a training_parser "
"epoch.")
training_parser.add_argument("--learning_rate", type=str2func, nargs='*',
default=[3e-4],
help='Learning rate for adam optimizer, '
'the same learning rate will be used '
'for all networks (Q-Values, Actor and '
'Value function) it can be a function'
' of the current progress remaining '
'(from 1 to 0)')
training_parser.add_argument("--tau", type=float, default=0.005)
training_parser.add_argument("--gamma", type=float, default=0.99)
training_parser.add_argument("--gradient_steps", type=int,
default=1)
training_parser.add_argument("--train_freq", type=int,
default=1)
training_parser.add_argument("--target_update_interval", type=int,
default=1)
dqn_parser = training_parser.add_argument_group("DQN")
dqn_parser.add_argument("--exploration-fraction", type=float, default=0.1)
dqn_parser.add_argument("--exploration-final-eps", type=float,
default=0.05)
return training_parser
# Path: utils.py
def new_random_seed():
return np.random.randint(np.iinfo(np.int32).max)
# Path: utils.py
def make_env(env_str, n_envs=1, **env_args):
envs = []
for _ in range(n_envs):
def env_func():
env = Monitor(gym.make(env_str, **env_args))
env.seed(new_random_seed())
return env
envs.append(env_func)
return DummyVecEnv(envs)
# Path: utils.py
class ControllerArguments:
def __init__(self, number_agents):
self.number_agents = number_agents
def argument_for_every_agent(self, arguments, i):
if type(arguments) is list:
if len(arguments) == 1:
return arguments[0]
elif len(arguments) == self.number_agents:
return arguments[i]
else:
raise AssertionError(f'number of arguments ({len(arguments)}) '
f'has to be 1 or == number of agents '
f'({self.number_agents}) input is'
f' {arguments}')
else:
raise AssertionError(f'input is not a list input is{arguments} '
f'{type(arguments)}')
# Path: run_peer.py
import argparse
import datetime
import gym
import wandb
import predefined_agents # noqa: F401
import env as local_envs # noqa: F401
from pathlib import Path
from stable_baselines3 import SAC, TD3
from stable_baselines3.common.utils import set_random_seed, \
update_learning_rate
from wandb.integration.sb3 import WandbCallback
from dqn_peer import DQNPeer
from peer import PeerGroup, make_peer_class
from callbacks import PeerEvalCallback
from utils import str2bool, add_default_values_to_parser, \
log_reward_avg_in_wandb, add_default_values_to_train_parser, \
new_random_seed, make_env, ControllerArguments
def add_args():
# create arg parser
parser = argparse.ArgumentParser(description="Peer learning.")
# General
parser.add_argument("--save-name", type=str, default="delete_me")
parser = add_default_values_to_parser(parser)
# Training
training = parser.add_argument_group("Training")
add_default_values_to_train_parser(training)
# Peer Learning
peer_learning = parser.add_argument_group("Peer Learning")
peer_learning.add_argument("--follow-steps", type=int, default=10)
peer_learning.add_argument("--switch-ratio", type=float, default=1,
help="How many times peer training compared to "
"solo training Ratio of peer learning "
"episodes to solo episodes; 0 -> only "
"peer learning episodes."
"ratio 0 {'solo': 0, 'peer': 100}"
"ratio 0.2 {'solo': 83, 'peer': 17}"
"ratio 0.25 {'solo': 80, 'peer': 20}"
"ratio 0.333333 {'solo': 75, 'peer': 25}"
"ratio 0.5 {'solo': 67, 'peer': 33}"
"ratio 1 {'solo': 50, 'peer': 50}"
"ratio 2 {'solo': 33, 'peer': 67}"
"ratio 3 {'solo': 25, 'peer': 75}"
"ratio 4 {'solo': 20, 'peer': 80}"
"ratio 5 {'solo': 17, 'peer': 83}")
peer_learning.add_argument("--peer-learning", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--peers-sample-with-noise", type=str2bool,
nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-agent-value", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-trust", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--use-trust-buffer", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--trust-buffer-size", type=int, default=1000)
peer_learning.add_argument("--use-critic", type=str2bool, nargs="?",
const=True, default=True)
peer_learning.add_argument("--sample_random_actions", type=str2bool,
nargs="?", const=True, default=False)
| peer_learning.add_argument("--trust-lr", type=float, default=0.001) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ZS-YANG/FemtoDet-v3
# Path: mmdet/models/dense_heads/atss_head.py
class ATSSHead(AnchorHead):
"""Detection Head of `ATSS <https://arxiv.org/abs/1912.02424>`_.
ATSS head structure is similar with FCOS, however ATSS use anchor boxes
and assign label by Adaptive Training Sample Selection instead max-iou.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
pred_kernel_size (int): Kernel size of ``nn.Conv2d``
stacked_convs (int): Number of stacking convs of the head.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to ``dict(type='GN', num_groups=32,
requires_grad=True)``.
reg_decoded_bbox (bool): If true, the regression loss would be
applied directly on decoded bounding boxes, converting both
the predicted boxes and regression targets to absolute
coordinates format. Defaults to False. It should be `True` when
using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
loss_centerness (:obj:`ConfigDict` or dict): Config of centerness loss.
Defaults to ``dict(type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0)``.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict.
"""
def __init__(self,
num_classes: int,
in_channels: int,
pred_kernel_size: int = 3,
stacked_convs: int = 4,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='GN', num_groups=32, requires_grad=True),
reg_decoded_bbox: bool = True,
loss_centerness: ConfigType = dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
init_cfg: MultiConfig = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='atss_cls',
std=0.01,
bias_prob=0.01)),
**kwargs) -> None:
self.pred_kernel_size = pred_kernel_size
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
reg_decoded_bbox=reg_decoded_bbox,
init_cfg=init_cfg,
**kwargs)
self.sampling = False
self.loss_centerness = MODELS.build(loss_centerness)
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
pred_pad_size = self.pred_kernel_size // 2
self.atss_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
self.pred_kernel_size,
padding=pred_pad_size)
self.atss_reg = nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=pred_pad_size)
self.atss_centerness = nn.Conv2d(
self.feat_channels,
self.num_base_priors * 1,
self.pred_kernel_size,
padding=pred_pad_size)
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.prior_generator.strides])
def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor]]:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
return multi_apply(self.forward_single, x, self.scales)
def forward_single(self, x: Tensor, scale: Scale) -> Sequence[Tensor]:
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
centerness (Tensor): Centerness for a single scale level, the
channel number is (N, num_anchors * 1, H, W).
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.atss_cls(cls_feat)
# we just follow atss, not apply exp in bbox_pred
bbox_pred = scale(self.atss_reg(reg_feat)).float()
centerness = self.atss_centerness(reg_feat)
return cls_score, bbox_pred, centerness
def loss_by_feat_single(self, anchors: Tensor, cls_score: Tensor,
bbox_pred: Tensor, centerness: Tensor,
labels: Tensor, label_weights: Tensor,
bbox_targets: Tensor, avg_factor: float) -> dict:
"""Calculate the loss of a single scale level based on the features
extracted by the detection head.
Args:
cls_score (Tensor): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W).
bbox_pred (Tensor): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W).
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor with
shape (N, num_total_anchors, 4).
avg_factor (float): Average factor that is used to average
the loss. When using sampling method, avg_factor is usually
the sum of positive and negative priors. When using
`PseudoSampler`, `avg_factor` is usually equal to the number
of positive priors.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
anchors = anchors.reshape(-1, 4)
cls_score = cls_score.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels).contiguous()
bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
centerness = centerness.permute(0, 2, 3, 1).reshape(-1)
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
# classification loss
loss_cls = self.loss_cls(
cls_score, labels, label_weights, avg_factor=avg_factor)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_anchors = anchors[pos_inds]
pos_centerness = centerness[pos_inds]
centerness_targets = self.centerness_target(
pos_anchors, pos_bbox_targets)
pos_decode_bbox_pred = self.bbox_coder.decode(
pos_anchors, pos_bbox_pred)
# regression loss
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_bbox_targets,
weight=centerness_targets,
avg_factor=1.0)
# centerness loss
loss_centerness = self.loss_centerness(
pos_centerness, centerness_targets, avg_factor=avg_factor)
else:
loss_bbox = bbox_pred.sum() * 0
loss_centerness = centerness.sum() * 0
centerness_targets = bbox_targets.new_tensor(0.)
return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
centernesses: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
centernesses (list[Tensor]): Centerness for each scale
level with shape (N, num_anchors * 1, H, W)
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, batch_img_metas, device=device)
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore=batch_gt_instances_ignore)
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, avg_factor) = cls_reg_targets
avg_factor = reduce_mean(
torch.tensor(avg_factor, dtype=torch.float, device=device)).item()
losses_cls, losses_bbox, loss_centerness, \
bbox_avg_factor = multi_apply(
self.loss_by_feat_single,
anchor_list,
cls_scores,
bbox_preds,
centernesses,
labels_list,
label_weights_list,
bbox_targets_list,
avg_factor=avg_factor)
bbox_avg_factor = sum(bbox_avg_factor)
bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()
losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
return dict(
loss_cls=losses_cls,
loss_bbox=losses_bbox,
loss_centerness=loss_centerness)
def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor:
"""Calculate the centerness between anchors and gts.
Only calculate pos centerness targets, otherwise there may be nan.
Args:
anchors (Tensor): Anchors with shape (N, 4), "xyxy" format.
gts (Tensor): Ground truth bboxes with shape (N, 4), "xyxy" format.
Returns:
Tensor: Centerness between anchors and gts.
"""
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
l_ = anchors_cx - gts[:, 0]
t_ = anchors_cy - gts[:, 1]
r_ = gts[:, 2] - anchors_cx
b_ = gts[:, 3] - anchors_cy
left_right = torch.stack([l_, r_], dim=1)
top_bottom = torch.stack([t_, b_], dim=1)
centerness = torch.sqrt(
(left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
(top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
assert not torch.isnan(centerness).any()
return centerness
def get_targets(self,
anchor_list: List[List[Tensor]],
valid_flag_list: List[List[Tensor]],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
unmap_outputs: bool = True) -> tuple:
"""Get targets for ATSS head.
This method is almost the same as `AnchorHead.get_targets()`. Besides
returning the targets as the parent method does, it also returns the
anchors as the first element of the returned tuple.
"""
num_imgs = len(batch_img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
num_level_anchors_list = [num_level_anchors] * num_imgs
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_bbox_weights, pos_inds_list, neg_inds_list,
sampling_results_list) = multi_apply(
self._get_targets_single,
anchor_list,
valid_flag_list,
num_level_anchors_list,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore,
unmap_outputs=unmap_outputs)
# Get `avg_factor` of all images, which calculate in `SamplingResult`.
# When using sampling method, avg_factor is usually the sum of
# positive and negative priors. When using `PseudoSampler`,
# `avg_factor` is usually equal to the number of positive priors.
avg_factor = sum(
[results.avg_factor for results in sampling_results_list])
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, bbox_weights_list, avg_factor)
def _get_targets_single(self,
flat_anchors: Tensor,
valid_flags: Tensor,
num_level_anchors: List[int],
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None,
unmap_outputs: bool = True) -> tuple:
"""Compute regression, classification targets for anchors in a single
image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors ,4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
num_level_anchors (List[int]): Number of anchors of each scale
level.
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It usually includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: N is the number of total anchors in the image.
labels (Tensor): Labels of all anchors in the image with shape
(N,).
label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4)
pos_inds (Tensor): Indices of positive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).
sampling_result (:obj:`SamplingResult`): Sampling results.
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg['allowed_border'])
if not inside_flags.any():
raise ValueError(
'There is no valid anchor inside the image boundary. Please '
'check the image size and anchor sizes, or set '
'``allowed_border`` to -1 to skip the condition.')
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
num_level_anchors_inside = self.get_num_level_anchors_inside(
num_level_anchors, inside_flags)
pred_instances = InstanceData(priors=anchors)
assign_result = self.assigner.assign(pred_instances,
num_level_anchors_inside,
gt_instances, gt_instances_ignore)
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
if self.reg_decoded_bbox:
pos_bbox_targets = sampling_result.pos_gt_bboxes
else:
pos_bbox_targets = self.bbox_coder.encode(
sampling_result.pos_priors, sampling_result.pos_gt_bboxes)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
labels[pos_inds] = sampling_result.pos_gt_labels
if self.train_cfg['pos_weight'] <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg['pos_weight']
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (anchors, labels, label_weights, bbox_targets, bbox_weights,
pos_inds, neg_inds, sampling_result)
def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
"""Get the number of valid anchors in every level."""
split_inside_flags = torch.split(inside_flags, num_level_anchors)
num_level_anchors_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_anchors_inside
# Path: mmdet/models/utils/misc.py
def images_to_levels(target, num_levels):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = stack_boxes(target, 0)
level_targets = []
start = 0
for n in num_levels:
end = start + n
# level_targets.append(target[:, start:end].squeeze(0))
level_targets.append(target[:, start:end])
start = end
return level_targets
# Path: mmdet/models/utils/misc.py
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
# Path: mmdet/registry.py
MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])
# Path: mmdet/utils/dist_utils.py
def reduce_mean(tensor):
""""Obtain the mean of tensor on different GPUs."""
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
# Path: mmdet/utils/typing_utils.py
# Path: projects/CO-DETR/codetr/co_atss_head.py
from typing import List
from torch import Tensor
from mmdet.models.dense_heads import ATSSHead
from mmdet.models.utils import images_to_levels, multi_apply
from mmdet.registry import MODELS
from mmdet.utils import InstanceList, OptInstanceList, reduce_mean
import torch
@MODELS.register_module()
class CoATSSHead(ATSSHead):
def loss_by_feat(
| self, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mit-ll-ai-technology/maite
# Path: src/maite/_internals/interop/artifact_hub/deduction.py
def make_entrypoint_deduction_filter(
require_deduction: bool = True,
name: str | None = None,
filter_type: ArtifactName | None = None,
filter_task: TaskName | None = None,
filter_regex: str | Pattern[str] | None = None,
filter_str: str | List[str] | None = None,
):
"""Creates a filter for listed entrypoints on a module
Parameters
----------
require_deduction: bool, default True
Require that the entrypoint has return annotations and they are runtime compatible with the Artifact Protocols
name: str, default None
Optionally require an exact match on name
filter_type: ArtifactName | None
model - entrypoint return implements the `Model` protocol
dataset - entrypoint return implements the `Dataset` protocol
metric - entrypoint return implements the `Metric` protocol
None - entrypoint returns any of the above
filter_task: 'image-classification' | 'object-detection' | None
image-classification - entrypoint implements the specialization of the type protocol for image classification
object-detection - entrypoint implements the specialization of the type protocol for object detection
None: No additional restrictions placed on the entrypoint
filter_str: str | List[str] , default None
Optionally filter endpoints to those which begin with the name, or partial names provided
"""
# Note: The composition of filters is very practical, but also redundant. If this
# becomes a bottleneck we could make a smarter expression composition system which
# unpacks and/or and simplifies. However, it is very likely that would be more
# expensive than just double-checking some conditions as we are here. The
# expressions are already built such that more general ones are evaluated first,
# also `and` compositions will short-circuit.
type_filters = {
"model": is_model_entrypoint,
"dataset": is_dataset_entrypoint,
"metric": is_metric_entrypoint,
None: or_filters(
is_model_entrypoint, is_dataset_entrypoint, is_metric_entrypoint
),
}
task_filters = {
"object-detection": or_filters(
is_object_detection_dataset_entrypoint, is_object_detector_entrypoint
),
# NOTE: the lack of parity in naming b/t dataset type and model type for OD above, and IC below
"image-classification": or_filters(
is_vision_dataset_entrypoint, is_image_classifier_entrypoint
),
None: identity,
}
# Even without requiring deduction we can enforce that the entrypoint must be a callable
# This is general enough, any symbol for which <symbol>(...) is a valid expression will pass.
filter = callable
if require_deduction:
# if we require deduction, then lookup should return some filter for the
# "<xyz>_filter" passed. this is the key, and both tables define that entry
type_filter = type_filters.get(filter_type, None)
if type_filter is None:
raise InvalidArgument(
f"Invalid type filter: {filter_type}, expected one of {get_args(ArtifactName)} or None"
)
task_filter = task_filters.get(filter_task, None)
if task_filter is None:
raise InvalidArgument(
f"Invalid task filter: {filter_task} excepted one of {get_args(TaskName)} or None"
)
filter = and_filters(filter, task_filter, type_filter)
else:
if filter_type is not None or filter_task is not None:
raise InvalidArgument(
"Filtering on Artifact type or Task category is only possible with `require_deduction=True`"
)
if filter_regex is not None:
filter = and_filters(filter, make_name_regex_filter(filter_regex))
if filter_str is not None:
strings = [filter_str] if not isinstance(filter_str, list) else filter_str
name_filters = or_filters(*(make_name_startswith_filter(s) for s in strings))
filter = and_filters(filter, name_filters)
if name is not None:
# this is the most restrictive condition that can be applied so putting it first makes sense as it will eagerly short circuit
filter = and_filters(make_name_match_filter(name), filter)
return filter
# Path: src/maite/_internals/interop/artifact_hub/module_utils.py
def import_hubconf(local_dir: str | os.PathLike[str], module_name: str) -> ModuleType:
_local_dir = Path(local_dir)
with _add_to_sys_path(str(_local_dir)):
hub_module = _try_import(module_name, _local_dir / module_name)
# Note: This check does not really work as intended. Unless the import of the
# missing dependency happens at a scope that is not evaluated on import (say inside
# a function). We must import the hubconf module to check the dependencies var, but
# can't do that without triggering the import error first above. The error message
# will still be informative to user, but it is not very likely this will actually be
# raised as the source of signal on missing dependencies.
# We may want to consider modifying the extended hubconf spec to place this list in
# a special location making it possible to pre-parse it before import if there is a
# lot of user confusion around this point
deps = getattr(hub_module, "dependencies", [])
missing_deps = [pkg for pkg in deps if not _check_module_exists(pkg)]
if len(missing_deps) != 0:
raise RuntimeError(
f"Missing dependencies for hub endpoint: {', '.join(missing_deps)}"
)
return hub_module
# Path: src/maite/_internals/interop/artifact_hub/registry.py
class HubEndpointRegistry:
"""The registry for ArtifactHubProvider endpoint types.
This class holds the mapping of key names -> endpoint implementations
Attributes
----------
registered_endpoints : ClassVar[Dict[str, ]]
Maps registered names -> provider implementation types
Methods
-------
register_impl(impl_type: Type[ArtifactHubEndpoint], spec_tag: str)
Register an Endpoint type with a spec prefix tag
get_impl(spec_tag: str) -> Type[ArtifactHubEndpoint]
Lookup the Endpoint type associated with the given spec tag
list_registered_specs() -> List[str]
List Endpoint spec tags registered
"""
registered_endpoints = {}
def __init__(self, *args, **kwargs):
# Error on __init__ no need to instantiate the class. Technically, this would be
# "safe" in that the instance would hold a reference to the dict defined above,
# but there is no need to allow/encourage doing that
raise InternalError(
"The ProviderRegistry functionality should be accessed "
"through the class itself, do not instantiate this type"
)
@classmethod
def register_impl(cls, impl_type: Type[ArtifactHubEndpoint], spec_tag: str):
"""Register an Endpoint implementation
Parameters
----------
impl_type : Type[ArtifactHubEndpoint]
Some type implementing the required protocol to act as a hub endpoint
spec_tag : str
The spec prefix that will indicate this Endpoint type is to be used
"""
if spec_tag in cls.registered_endpoints:
warnings.warn(
f"Attempting to register endpoint {impl_type.__name__} under "
f"spec tag {spec_tag} which will overwrite the existing endpoint "
f"{cls.registered_endpoints[spec_tag].__name__} registered under "
"that name"
)
if not isinstance(impl_type, ArtifactHubEndpoint):
raise InvalidArgument(
"Attempting to register a hub endpoint type which does not satisfy the protocol for a hub endpoint."
)
cls.registered_endpoints[spec_tag] = impl_type
@classmethod
def list_registered_specs(cls):
return list(cls.registered_endpoints.keys())
@classmethod
def get_endpoint_impl(cls, spec):
"""Get the type associated with the given spec
Parameters
----------
spec : str
The spec tag used to register the type
"""
spec_tag = spec
impl_type = cls.registered_endpoints.get(spec_tag, None)
if impl_type is None:
registered_msg = "\n\t".join(cls.registered_endpoints.keys())
raise InvalidArgument(
f"Unable to find interface for spec tag {spec_tag}, the following are registered: \n{registered_msg}\n"
"Register a custom endpoint interface by inheriting from 'HubEndpoint'."
)
return impl_type
# Path: src/maite/_internals/interop/artifact_hub/api.py
from types import ModuleType
from typing import (
Any,
Callable,
Iterable,
List,
Optional,
Pattern,
Type,
TypeVar,
cast,
overload,
)
from maite._internals.interop.provider import ArtifactName, register_provider
from maite._internals.protocols.typing import (
AnyEntrypoint,
DatasetEntrypoint,
MetricEntrypoint,
ModelEntrypoint,
)
from maite.errors import InvalidArgument
from maite.protocols import ArtifactHubEndpoint, Dataset, Metric, Model, TaskName
from .deduction import make_entrypoint_deduction_filter
from .module_utils import import_hubconf
from .registry import HubEndpointRegistry
task=task,
require_deduction=True,
filter_regex=filter_regex,
filter_str=filter_str,
)
def load_dataset(
self,
*,
dataset_name: str,
task: TaskName | None = None,
split: str | None = None,
**entrypoint_options: Any,
) -> Dataset[Any]:
ep = self.get_entrypoint(dataset_name)
filter = make_entrypoint_deduction_filter(
filter_type="dataset", filter_task=task, require_deduction=True
)
if not filter(ep):
raise InvalidArgument(
f"Entrypoint {dataset_name} does exists, but does not return a Dataset suitable for {task}."
)
# cast is required, type checker can't detect filter limiting the type of ep
ep = cast(DatasetEntrypoint, ep)
return ep(split=split, **entrypoint_options)
# Metric Provider protocol
def list_metrics(
self,
*,
metric_name: str | None = None,
task: TaskName | None = None,
filter_regex: str | Pattern[str] | None = None,
filter_str: str | List[str] | None = None,
) -> Iterable[str]:
return self.list(
artifact_type="metric",
name=metric_name,
task=task,
require_deduction=True,
filter_regex=filter_regex,
filter_str=filter_str,
)
def load_metric(
self,
*,
metric_name: str,
task: TaskName | None = None,
**entrypoint_options: Any,
) -> Metric[Any, Any]:
ep = self.get_entrypoint(metric_name)
filter = make_entrypoint_deduction_filter(
filter_type="metric", filter_task=task, require_deduction=True
)
if not filter(ep):
raise InvalidArgument(
f"Entrypoint {metric_name} does exists, but does not return a Metric suitable for {task}."
)
# cast is required, type checker can't detect filter limiting the type of ep
ep = cast(MetricEntrypoint, ep)
return ep(**entrypoint_options)
# Model Provider protocol
def list_models(
self,
*,
filter_str: str | List[str] | None = None,
model_name: str | None = None,
task: TaskName | None = None,
filter_regex: str | Pattern[str] | None = None,
) -> Iterable[str]:
return self.list(
artifact_type="model",
task=task,
name=model_name,
require_deduction=True,
filter_regex=filter_regex,
filter_str=filter_str,
)
def load_model(
self,
*,
model_name: str,
task: TaskName | None = None,
**entrypoint_options: Any,
) -> Model[Any, Any]:
ep = self.get_entrypoint(model_name)
filter = make_entrypoint_deduction_filter(
filter_type="model", filter_task=task, require_deduction=True
)
if not filter(ep):
raise InvalidArgument(
f"Entrypoint {model_name} does exists, but does not return a Model suitable for {task}."
)
# cast is required, type checker can't detect filter limiting the type of ep
ep = cast(ModelEntrypoint, ep)
return ep(**entrypoint_options)
#
# Endpoint Registration API
#
@overload
@staticmethod
def register_endpoint(
endpoint_type: Type[HubEP_T],
) -> Type[HubEP_T]:
...
@overload
@staticmethod
def register_endpoint(
endpoint_type: None,
) -> Callable[[Type[HubEP_T]], Type[HubEP_T]]:
...
| @overload |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Tps-F/rvc-onnx-test
# Path: onnxlib/attentions.py
class Encoder(nn.Module):
class Decoder(nn.Module):
class MultiHeadAttention(nn.Module):
class FFN(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=10,
**kwargs
):
def forward(self, x, x_mask):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
proximal_bias=False,
proximal_init=True,
**kwargs
):
def forward(self, x, x_mask, h, h_mask):
def __init__(
self,
channels,
out_channels,
n_heads,
p_dropout=0.0,
window_size=None,
heads_share=True,
block_length=None,
proximal_bias=False,
proximal_init=False,
):
def forward(
self, x: torch.Tensor, c: torch.Tensor, attn_mask: Optional[torch.Tensor] = None
):
def attention(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None,
):
def _matmul_with_relative_values(self, x, y):
def _matmul_with_relative_keys(self, x, y):
def _get_relative_embeddings(self, relative_embeddings, length: int):
def _relative_position_to_absolute_position(self, x):
def _absolute_position_to_relative_position(self, x):
def _attention_bias_proximal(self, length: int):
def __init__(
self,
in_channels,
out_channels,
filter_channels,
kernel_size,
p_dropout=0.0,
activation: str = None,
causal=False,
):
def padding(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor:
def forward(self, x: torch.Tensor, x_mask: torch.Tensor):
def _causal_padding(self, x):
def _same_padding(self, x):
# Path: onnxlib/commons.py
def init_weights(m, mean=0.0, std=0.01):
def get_padding(kernel_size, dilation=1):
def kl_divergence(m_p, logs_p, m_q, logs_q):
def rand_gumbel(shape):
def rand_gumbel_like(x):
def slice_segments(x, ids_str, segment_size=4):
def slice_segments2(x, ids_str, segment_size=4):
def rand_slice_segments(x, x_lengths=None, segment_size=4):
def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
def subsequent_mask(length):
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
def convert_pad_shape(pad_shape: List[List[int]]) -> List[int]:
def shift_1d(x):
def sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):
def generate_path(duration, mask):
def clip_grad_value_(parameters, clip_value, norm_type=2):
# Path: onnxlib/modules.py
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
class ConvReluNorm(nn.Module):
class DDSConv(nn.Module):
class WN(torch.nn.Module):
class ResBlock1(torch.nn.Module):
class ResBlock2(torch.nn.Module):
class Log(nn.Module):
class Flip(nn.Module):
class ElementwiseAffine(nn.Module):
class ResidualCouplingLayer(nn.Module):
class ConvFlow(nn.Module):
def __init__(self, channels, eps=1e-5):
def forward(self, x):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
def forward(self, x, x_mask):
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
def forward(self, x, x_mask, g: Optional[torch.Tensor] = None):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
def forward(
self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None
):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
def forward(self, x: torch.Tensor, x_mask: Optional[torch.Tensor] = None):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
def forward(self, x, x_mask: Optional[torch.Tensor] = None):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
def __init__(self, channels):
def forward(self, x, x_mask, reverse=False, **kwargs):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse: bool = False,
):
def remove_weight_norm(self):
def __prepare_scriptable__(self):
def __init__(
self,
in_channels,
filter_channels,
kernel_size,
n_layers,
num_bins=10,
tail_bound=5.0,
):
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
g: Optional[torch.Tensor] = None,
reverse=False,
):
# Path: onnxlib/commons.py
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
# Path: onnxlib/commons.py
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
# Path: onnxlib/models_onnx.py
import logging
import math
import numpy as np
import torch
from torch import nn
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from onnxlib import attentions, commons, modules
from onnxlib.commons import get_padding, init_weights
x, _ = flow(x, x_mask, g=g, reverse=reverse)
return x
def remove_weight_norm(self):
for i in range(self.n_flows):
self.flows[i * 2].remove_weight_norm()
class PosteriorEncoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
x.dtype
)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
def remove_weight_norm(self):
self.enc.remove_weight_norm()
class Generator(torch.nn.Module):
def __init__(
self,
initial_channel,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=0,
):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(
initial_channel, upsample_initial_channel, 7, 1, padding=3
)
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
upsample_initial_channel // (2**i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)
):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
self.ups.apply(init_weights)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
def forward(self, x, g=None):
x = self.conv_pre(x)
if g is not None:
x = x + self.cond(g)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, modules.LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
| return x |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zengydd/ProphDR
# Path: utils/optimizer.py
def load_config(path):
with open(path, 'r') as f:
return EasyDict(yaml.safe_load(f))
# Path: utils/optimizer.py
def get_optimizer(cfg, model):
if cfg.type == 'adam':
return torch.optim.Adam(
model.parameters(),
lr=cfg.lr,
weight_decay=cfg.weight_decay,
betas=(cfg.beta1, cfg.beta2, )
)
else:
raise NotImplementedError('Optimizer not supported: %s' % cfg.type)
# Path: utils/optimizer.py
def get_scheduler(cfg, optimizer):
if cfg.type == 'plateau':
return torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
factor=cfg.factor,
patience=cfg.patience,
min_lr=cfg.min_lr
)
else:
raise NotImplementedError('Scheduler not supported: %s' % cfg.type)
# Path: utils/load.py
def load_pickle(path):
f = open(path, "rb")
data = pickle.load(f)
f.close()
return data
# Path: utils/load.py
def save_pickle(data, path):
f = open(path, "wb")
pickle.dump(data, f)
f.close()
# Path: utils/load.py
def set_file(root_path, task, method, down_sample):
if task=='binary':
if method =='orio':
res_df = pd.read_csv(root_path + 'unify_thred_Iorio.csv')
elif method =='only2':
res_df = pd.read_csv(root_path + 'unify_thred_only2.csv')
else:
res_df = pd.read_csv(root_path + 'unify_thred_only2.csv')
return res_df
# Path: utils/load.py
def set_random_seed(seed=4):
"""Set random seed.
Parameters
----------
seed : int
Random seed to use
"""
random.seed(seed)
np.random.seed(seed)
# dgl.random.seed(seed)
# dgl.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
# Path: utils/load.py
def nested_dict_factory():
return defaultdict(nested_dict_factory)
# Path: utils/mydata.py
class mydata(data.Dataset):
def __init__(self, list_ID, label, res_df, drug_smiles_df, omic_encode_dict):
'Initialization'
self.list_ID = list_ID
self.label = label
self.res_df = res_df
self.drug_smiles_df = drug_smiles_df
self.omic_encode_dict = omic_encode_dict
def __len__(self):
'Denotes the total number of samples'
return len(self.list_ID)
def __getitem__(self, index):
label = self.label[index]
ID = self.list_ID[index]
drug_id = self.res_df.iloc[ID]['DRUG_ID']
cosmic_id = self.res_df.iloc[ID]['COSMIC_ID']
drug_f = self.drug_smiles_df.loc[drug_id]['smiles']
omic_f = self.omic_encode_dict[str(cosmic_id)]
return drug_id, cosmic_id, drug_f, omic_f, label
# Path: utils/mydata.py
def dataset_split(res_df, random=4, stratify=None):
if stratify == None:
train_set, val_test_set = train_test_split(res_df, test_size=0.2, random_state=random)
val_set, test_set = train_test_split(val_test_set, test_size=0.5, random_state=random)
else:
train_set, val_test_set = train_test_split(res_df, test_size=0.2, random_state=random, stratify=res_df[stratify])
# print('ct', val_test_set['binary'].tolist())
val_set, test_set = train_test_split(val_test_set, test_size=0.5, random_state=random, stratify=val_test_set[stratify])
print('Responses:{}'.format(res_df.shape[0]))
print('Train:{}'.format(train_set.shape[0]))
print('Val:{}'.format(val_set.shape[0]))
print('Test:{}'.format(test_set.shape[0]))
print('train_DRUG:{}, val_DRUG:{}, test_DRUG:{}'.format(len(train_set['DRUG_ID'].value_counts()), len(set(val_set['DRUG_ID'])), len(set(test_set['DRUG_ID']))))
print('train_cell:{}, val_cell:{}, test_cell:{}'.format(len(set(train_set['COSMIC_ID'])), len(set(val_set['COSMIC_ID'])), len(set(test_set['COSMIC_ID']))))
return train_set, val_set, test_set
# Path: Models/RCCA_ca.py
class CCNet(nn.Module):
def __init__(self, dim, recurrence=2):
super(CCNet, self).__init__()
self.ccnet = RCCAModule(dim, in_channels=1, out_channels=512, recurrence=recurrence)
def forward(self, x):
output, attn_list = self.ccnet(x)
return output, attn_list
# Path: Models/cross_attention_dual.py
class cross_EncoderBlock_G(nn.Module):
"""Transformer编码器块"""
def __init__(self, query_size, key_size, value_size, num_hiddens,
num_heads, norm_shape,
dropout=0.1, bias=False, **kwargs):
super(cross_EncoderBlock_G, self).__init__(**kwargs)
self.cross_attention = cross_MultiHeadAttention_G(
query_size, key_size, value_size, num_hiddens, num_heads, dropout, bias)
self.addnorm_q = AddNorm_Q(norm_shape, query_size, num_hiddens, dropout)
self.linear = nn.Linear(num_hiddens, num_hiddens)
def forward(self, q, k, v, valid_lens):
attn_output, attn_w = self.cross_attention(q, k, v, valid_lens)
out = self.addnorm_q(q, attn_output)
return out, attn_w
# Path: Models/cross_attention_dual.py
class cross_EncoderBlock_D(nn.Module):
"""Transformer编码器块"""
def __init__(self, query_size, key_size, value_size, num_hiddens,
num_heads, norm_shape,
dropout=0.1, bias=False, **kwargs):
super(cross_EncoderBlock_D, self).__init__(**kwargs)
# print('query_size', query_size)
self.cross_attention = cross_MultiHeadAttention_D(
query_size, key_size, value_size, num_hiddens, num_heads, dropout, bias)
# self.norm_shape = [self.len_q, self.h_dim]
self.addnorm_q = AddNorm_Q(norm_shape, query_size, num_hiddens, dropout)
# self.addnorm = AddNorm(norm_shape, dropout)
self.linear = nn.Linear(num_hiddens, num_hiddens)
def forward(self, q, k, v, valid_lens):
attn_output, attn_w = self.cross_attention(q, k, v, valid_lens)
# print('attn_output', attn_output.shape)
# print('attn_w', attn_w.shape)
out = self.addnorm_q(q, attn_output)
return out, attn_w
# Path: Models/Proph_DR.py
class gbz_main_cross(object):
def __init__(self, task, omic_dim, res_df, omic_encode_dict, model_dir):
# self.model_drug = bert_atom_embedding
self.task = task
self.model_dir = model_dir
self.model = Predictor(h_dim=128, num_heads=4, omic_dim=omic_dim)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.omic_encode_dict = omic_encode_dict
self.record_file = os.path.join(self.model_dir, "valid_markdowntable.txt")
self.pkl_file = os.path.join(self.model_dir, "loss_curve_iter.pkl")
self.res_df = res_df
if self.task=='binary':
self.label = 'binary'
self.loss_fct = FocalLoss(logits=True)
elif self.task=='IC50':
self.label = 'LN_IC50'
self.loss_fct = torch.nn.MSELoss()
elif self.task=='AUC':
self.label='AUC'
self.loss_fct = torch.nn.MSELoss()
def validate(self, generator, model):
torch.cuda.empty_cache()
loss_fct = self.loss_fct
model.eval()
y_label = []
y_pred = []
with torch.no_grad():
for i, (drug_id, cosmic_id, drug_fs, omic_f, label) in enumerate(generator):
torch.cuda.empty_cache()
label = Variable(torch.from_numpy(np.array(label)).float()).to(self.device)
# score = model(drug_id, omic_f, cosmic_id)
encode_D, valid_lens = encoder_D(drug_id)
score = model(drug_id, encode_D, omic_f, valid_lens, cosmic_id)
score_flatten = score.flatten().to(self.device)
loss = loss_fct(score_flatten, label).to(self.device)
y_label.append(label.view(-1,1))
y_pred.append(score_flatten.view(-1, 1))
y_label = torch.cat(y_label, dim=0).cpu().numpy().flatten()
y_pred = torch.cat(y_pred, dim=0).cpu().numpy().flatten()
# Metrics
if self.task=='binary':
metric = {}
y_pred = torch.sigmoid(torch.tensor(y_pred)).tolist()
# print('y_label:{},\ny_pred:{}'.format(y_label, y_pred))
metric['AUC'] = roc_auc_score(y_label, y_pred)
metric['pr_score'] = average_precision_score(y_label, y_pred)
false_positive_rate,true_positive_rate,thresholds = roc_curve(y_label, y_pred)
recall, precision, thresholds = precision_recall_curve(y_label, y_pred)
print('roc_curve data:', [false_positive_rate,true_positive_rate,thresholds])
print('PR_curve data:', [recall, precision])
to_binary = lambda x: 1 if x > 0.5 else 0
y_pred_cls = list(map(to_binary, y_pred))
metric['acc'] = accuracy_score(y_label, y_pred_cls)
metric['F1'] = f1_score(y_label, y_pred_cls, average='binary')
print('metric_resut_{}{}:'.format(self.task, metric))
else:
metric = {}
metric['r2'] = r2_score(y_label, y_pred)
metric['MAE'] = mean_absolute_error(y_label, y_pred)
metric['mse'] = mean_squared_error(y_label, y_pred)
metric['rmse'] = torch.sqrt(torch.tensor(metric['mse']))
metric['spearman'] = spearmanr(y_label, y_pred)[0]
metric['pearson'] = pearsonr(y_label, y_pred)[0]
metric['ci'] = concordance_index(y_label, y_pred)
print('metric_resut_{}{}:'.format(self.task, metric))
model.train()
return metric, loss
def train(self, train_set, val_set, **param):
torch.cuda.empty_cache()
self.model = self.model.to(self.device)
print(self.model)
label = self.label
loss_fct = self.loss_fct
BATCH_SIZE = param['bs']
train_epoch = param['te']
patience = param['pt']
opt = getattr(torch.optim, param['optimizer'])(self.model.parameters(),
lr=param['lr'],
weight_decay=param['decay'])
params = {'batch_size': BATCH_SIZE,
'shuffle': True,
'num_workers': 0,
'drop_last': False}
# loader
train_generator = data.DataLoader(
mydata(
train_set.index.values,
train_set[label].values,
self.res_df,
drug_smiles_df,
self.omic_encode_dict
),
**params)
val_generator = data.DataLoader(
mydata(
val_set.index.values,
val_set[label].values,
self.res_df,
drug_smiles_df,
self.omic_encode_dict
),
**params)
max_MSE = 10000
model_max = copy.deepcopy(self.model)
writer = SummaryWriter(self.model_dir)
table = PrettyTable()
table.title = 'valid'
t_start = time.time()
loss_history = []
early_stopping = EarlyStopping(patience=patience, verbose=False)
for epo in range(train_epoch):
torch.cuda.empty_cache()
for i, (drug_id, cosmic_id, drug_fs, omic_f, label) in enumerate(train_generator):
torch.backends.cudnn.enabled = False
# score = self.model(drug_id, omic_f, cosmic_id)
encode_D, valid_lens = encoder_D(drug_id)
score = self.model(drug_id, encode_D, omic_f, valid_lens, cosmic_id)
# print('score:'.format(type(score), score))
label = Variable(torch.from_numpy(np.array(label))).float().to(self.device)
n = torch.squeeze(score, 1).float()
n = n.squeeze(-1)
loss = loss_fct(n, label)
loss_history.append(loss.item())
writer.add_scalar("Loss/train", loss.item(), epo)
opt.zero_grad()
loss.backward()
opt.step()
if (i % 1000 == 0):
t_now = time.time()
print('Training at Epoch ' + str(epo + 1) +
' iteration ' + str(i) + \
' with loss ' + str(loss.cpu().detach().numpy())[:7] + \
' with lr ' + str(opt.param_groups[0]['lr']) + \
". Total time " + str(int(t_now - t_start) / 3600)[:7] + " hours")
metric_result, loss_val = self.validate(val_generator, self.model)
print('Validation at Epoch:{} \nMetric_result:{}'.format(str(epo + 1), metric_result))
# mark
table.field_names = ['# epoch'] + list(metric_result.keys()) + ['loss']
valid_content_lst = ['epo'+str(epo)]+list(map(float2str, metric_result.values()))+[str(loss_val)]
table.add_row(valid_content_lst)
# tensorboard
for k, v in metric_result.items():
writer.add_scalar("valid/{}".format(k), v, epo)
writer.add_scalar("Loss/valid", loss_val.item(), epo)
# early_stop
early_stopping(loss, self.model, self.model_dir)
if early_stopping.early_stop:
print("Early stopping at epoch{}".format(epo))
break
lowest_val = 1e9
if loss_val < lowest_val:
lowest_val = lowest_val
self.save_model(self.model, self.model_dir)
print(f'Val Loss: {loss_val}')
# self.model = model_max
with open(self.record_file, 'w') as fp:
fp.write(table.get_string())
with open(self.pkl_file, 'wb') as pck:
pickle.dump(loss_history, pck)
print('--- Training Finished ---')
writer.flush()
writer.close()
return metric_result, loss_val
def test(self, test_set):
self.model = self.model.to(self.device)
label = self.label
params = {'batch_size': 200,
'shuffle': True,
'num_workers': 0,
'drop_last': False}
# loader
test_generator = data.DataLoader(
mydata(
test_set.index.values,
test_set[label].values,
self.res_df,
drug_smiles_df,
self.omic_encode_dict
),
**params)
print("=====testing...")
self.model.load_state_dict(torch.load(self.model_dir + '/checkpoint.pt')['model_state_dict'])
metric_result, loss = self.validate(test_generator, self.model)
return metric_result, loss
def pred(self, smiles_list, cosmic_id_list, pt_path=os.path.join(root, 'ckpt/checkpoint.pt'), drug_id=0):
with torch.no_grad():
score_list = []
smi_list = []
cell_list = []
for smiles in smiles_list:
smi_list.append(smiles)
for cosmic_id in cosmic_id_list:
cell_list.append(str(cosmic_id))
self.model = self.model.to(self.device)
omic_f = self.omic_encode_dict[str(cosmic_id)]
omic_f = omic_f.unsqueeze(0)
self.model.load_state_dict(torch.load(pt_path, map_location='cpu')['model_state_dict'])
encode_D_pred, valid_lens = encoder_D_pred(smiles)
score = self.model(drug_id, encode_D_pred, omic_f, valid_lens, cosmic_id)
score = score.flatten().to(self.device).cpu().numpy().item()
score_list.append(score)
res = pd.DataFrame()
res['LN(IC50)'] = pd.Series(score_list)
res['smiles'] = smi_list
res['cosmic'] = cell_list
return res
def save_model(self, model, model_dir):
torch.save({'model_state_dict': model.state_dict()}, model_dir + '/checkpoint.pt')
print('model_saved:{}'.format(model_dir))
# Path: pred.py
import os, sys
import pandas as pd
import numpy as np
import random
import copy
import time
import datetime
import math
import pickle
import optuna
import yaml
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data
from torch.nn.parallel import DataParallel
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
from sklearn.model_selection import train_test_split, KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import SequentialSampler
from prettytable import PrettyTable
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, roc_curve, f1_score, precision_recall_curve
from lifelines.utils import concordance_index
from scipy.stats import pearsonr,spearmanr
from utils.optimizer import load_config, get_optimizer, get_scheduler
from easydict import EasyDict
from collections import defaultdict
from utils.load import load_pickle, save_pickle, set_file, set_random_seed, nested_dict_factory
from utils.mydata import mydata, dataset_split
from Models.RCCA_ca import CCNet
from Models.cross_attention_dual import cross_EncoderBlock_G, cross_EncoderBlock_D
from Models.Proph_DR import gbz_main_cross
os.environ['NUMEXPR_MAX_THREADS'] = '32'
sys.path.append("..")
torch.set_default_dtype(torch.float32)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
float2str = lambda x: '%0.4f' % x
# FILE path
root = os.getcwd()
data_dir = os.path.join(root, 'data_collect/')
unify_dir = os.path.join(root, 'data_collect/unify/')
# omics data
omic_encode = os.path.join(unify_dir, 'omics_std/omics_stk_dict.pkl')
mut_encode = os.path.join(unify_dir, 'omics_std/mut_dict.pkl')
cnv_encode = os.path.join(unify_dir, 'omics_std/cnv_dict.pkl')
exp_encode = os.path.join(unify_dir, 'omics_std/exp_dict.pkl')
mut_cnv = os.path.join(unify_dir, 'omics_std/omics_stk_dict_mut_cnv.pkl')
mut_exp = os.path.join(unify_dir, 'omics_std/omics_stk_dict_mut_exp.pkl')
exp_cnv = os.path.join(unify_dir, 'omics_std/omics_stk_dict_exp_cnv.pkl')
if __name__ == '__main__':
set_random_seed()
| model_dir = os.path.join(root, 'Models/') |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhenqincn/FedKSeed
# Path: server.py
class Server(object):
def __init__(self, args, eval_loader, candidate_seeds, log_dir):
self.args = args
self.eval_loader = eval_loader
self.candidate_seeds = candidate_seeds
self.tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
self.log_dir = log_dir
self.tokenizer.model_max_length = self.args.max_length
special_tokens = dict()
if self.tokenizer.pad_token is None:
special_tokens["pad_token"] = DefaultToken.PAD_TOKEN.value
if self.tokenizer.eos_token is None:
special_tokens["eos_token"] = DefaultToken.EOS_TOKEN.value
if self.tokenizer.bos_token is None:
special_tokens["bos_token"] = DefaultToken.BOS_TOKEN.value
if self.tokenizer.unk_token is None:
special_tokens["unk_token"] = DefaultToken.UNK_TOKEN.value
self.tokenizer.add_special_tokens(special_tokens)
self.model = AutoModelForCausalLM.from_pretrained(args.model, device_map='cpu', torch_dtype=torch.float16, trust_remote_code=True)
from copy import deepcopy
self.model_w0 = deepcopy(self.model)
self.seed_pool = {seed: 0.0 for seed in self.candidate_seeds}
self.device = torch.device(f'cuda:{self.args.device}')
if self.args.bias_sampling:
# initialize the probabilities of seeds
self.gradient_history = {seed: [self.args.grad_initial] for seed in self.candidate_seeds}
self.probabilities = [1.0 / float(len(self.candidate_seeds)) for _ in range(len(self.candidate_seeds))]
else:
self.gradient_history = None
self.probabilities = None
def create_model_by_seedpool(self, cur_round):
tmp_model = deepcopy(self.model_w0)
tmp_model.to(self.device)
lr = self.args.lr * math.pow(self.args.lr_decay, cur_round - 1)
if self.args.lr_decay != 1.0:
raise ValueError('currently seed pool only supports constant learning rate')
# replace local model with initial weights
framework = MeZOFramework(tmp_model, args=self.args, lr=lr, candidate_seeds=self.candidate_seeds)
progress_bar = tqdm(range(len(self.seed_pool)))
# pull the latest model via accumulated {seed, grad} pairs on the server
for seed, grad in self.seed_pool.items():
if grad != 0:
framework.zo_update(seed=seed, grad=grad)
progress_bar.update(1)
progress_bar.set_description(f'pull global model at round{cur_round}')
tmp_model = tmp_model.cpu()
return tmp_model
def aggregate_seed_pool(self, selected_client_list):
if self.args.equal_weight:
weight_array = np.array([1.0 for _ in selected_client_list], dtype=np.float64)
weight_array /= float(len(selected_client_list))
else:
weight_array = np.array([len(client.train_loader) for client in selected_client_list], dtype=np.float64)
weight_array /= float(np.sum(weight_array))
for client_idx in range(len(selected_client_list)):
local_seed_pool = selected_client_list[client_idx].local_seed_pool
for seed, grad in local_seed_pool.items():
self.seed_pool[seed] += grad * weight_array[client_idx]
for client in selected_client_list:
client.clear_model()
def update_global_model_by_seed_pool(self):
self.model = deepcopy(self.model_w0)
self.model.to(self.device)
framework = MeZOFramework(self.model, args=self.args, lr=self.args.lr, candidate_seeds=self.candidate_seeds)
progress_bar = tqdm(range(len(self.seed_pool)))
# pull the latest model via accumulated {seed, grad} pairs on the server
for seed, grad in self.seed_pool.items():
if grad != 0.0:
framework.zo_update(seed=seed, grad=grad)
progress_bar.update(1)
progress_bar.set_description(f'server update global model')
def prepare_aggregate(self):
self.model_for_aggregate = deepcopy(self.model)
for _, v in self.model_for_aggregate.named_parameters():
if v.requires_grad:
v.data.zero_()
def online_aggregate(self, client, selected_client_list):
if self.args.equal_weight:
weight_array = np.array([1.0 for _ in selected_client_list], dtype=np.float64)
weight_array /= float(len(selected_client_list))
else:
weight_array = np.array([len(client.train_loader) for client in selected_client_list], dtype=np.float64)
weight_array /= float(np.sum(weight_array))
cur_client_index = 0
for c in selected_client_list:
if client.idx == c.idx:
break
cur_client_index += 1
cur_weight = weight_array[cur_client_index]
for k, v in self.model_for_aggregate.named_parameters():
if v.requires_grad:
v.data += client.model.state_dict()[k].data * cur_weight
client.clear_model()
def finish_aggregate(self):
self.model = self.model_for_aggregate
def calculate_probabilities(self):
history_list = [self.gradient_history[seed] for seed in self.candidate_seeds]
mean_grad_history = np.array([np.mean(np.abs(np.clip(history_cur_seed, -self.args.bias_loss_clip, self.args.bias_loss_clip))) for history_cur_seed in history_list])
self.probabilities = softmax(min_max_norm(mean_grad_history))
sum_prob = np.sum(self.probabilities)
if sum_prob != 1.0:
self.probabilities /= sum_prob
return self.probabilities
def eval(self, cur_round, eval_avg_acc):
if self.args.eval_metric == 'loss':
eval_metric = self.eval_loss(cur_round)
else:
eval_metric = self.eval_generate(cur_round)
if self.args.save and cur_round > 0:
save_dir = self.log_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if (self.args.eval_metric == 'loss' and eval_metric < np.min(eval_avg_acc)) or (self.args.eval_metric != 'none' and eval_metric > np.max(eval_avg_acc)):
for file_name in os.listdir(save_dir):
if 'best' in file_name:
os.remove(os.path.join(save_dir, file_name))
torch.save(self.model.state_dict(), os.path.join(save_dir, f'model_state_dict_best_round{cur_round}.bin'))
for file_name in os.listdir(save_dir):
if 'final' in file_name:
os.remove(os.path.join(save_dir, file_name))
torch.save(self.model.state_dict(), os.path.join(save_dir, f'model_state_dict_final_round{cur_round}.bin'))
return eval_metric
def eval_loss(self, cur_round):
self.model = self.model.to(self.device)
self.model.eval()
progress_bar_eval = tqdm(range(len(self.eval_loader)))
loss_total_eval = 0.0
num_eval = 0
with torch.inference_mode():
for batch in self.eval_loader:
batch = {
'input_ids': batch['input_ids'].to(self.device),
'labels': batch['labels'].to(self.device),
'attention_mask': batch['attention_mask'].to(self.device)
}
outputs = self.model(**batch)
loss = outputs.loss
progress_bar_eval.update(1)
if torch.isnan(loss):
continue
loss_total_eval += loss
num_eval += len(batch['input_ids'])
if num_eval == 0:
num_eval = 1e-10
progress_bar_eval.set_description(f'eval at round {cur_round}, loss: {loss_total_eval / num_eval}')
print()
print()
self.model = self.model.cpu()
return (loss_total_eval / num_eval).item()
def eval_generate(self, cur_round):
self.model = self.model.to(self.device)
self.model.eval()
progress_bar_eval = tqdm(range(len(self.eval_loader)))
acc_total_eval = 0.0
num_eval = 0
with torch.inference_mode():
for batch in self.eval_loader:
input_ids = batch['input_ids'].to(self.device)
label_ids = batch['labels'].to(self.device)
output_ids = self.model.generate(
input_ids=input_ids,
max_new_tokens=128,
num_beams=1,
)
acc_total_eval += rouge_score(output_ids[0][len(input_ids[0]):], label_ids[0], self.tokenizer)
progress_bar_eval.update(1)
num_eval += len(batch['input_ids'])
if num_eval == 0:
num_eval = 1e-10
progress_bar_eval.set_description(f'eval at round {cur_round}, metric: {acc_total_eval / num_eval}')
print()
print()
self.model = self.model.cpu()
return acc_total_eval / num_eval
# Path: client.py
class Client(object):
def __init__(self, idx, args, candidate_seeds, train_loader):
self.idx = idx
self.args = args
self.train_loader = train_loader
self.train_iterator = iter(self.train_loader)
self.model = None
self.device = torch.device(f'cuda:{args.device}')
self.candidate_seeds = candidate_seeds
def local_train_with_seed_pool(self, pulled_model, cur_round, memory_record_dic=None, probabilities=None, gradient_history=None):
self.model = pulled_model
self.model.to(self.device)
if memory_record_dic is not None:
torch.cuda.empty_cache()
# initialize a seed pool
self.local_seed_pool = {seed: 0.0 for seed in self.candidate_seeds}
lr = self.args.lr
if self.args.batch_or_epoch == 'epoch':
iter_steps = self.args.local_step * len(self.train_loader)
else:
iter_steps = self.args.local_step
if self.args.bias_sampling:
assert probabilities is not None
framework = MeZOBiasOptimizer(self.model, args=self.args, lr=lr, candidate_seeds=self.candidate_seeds, probabilities=probabilities, gradient_history=gradient_history)
else:
framework = MeZOFramework(self.model, args=self.args, lr=lr, candidate_seeds=self.candidate_seeds)
self.model.eval()
with torch.inference_mode():
if self.args.batch_or_epoch == 'batch':
loss_total_train = 0.0
num_trained = 0
progress_bar = tqdm(range(iter_steps))
for cur_step in range(iter_steps):
# init epoch progress bar
if self.args.batch_or_epoch == 'epoch':
if cur_step % len(self.train_loader) == 0:
loss_total_train = 0.0
num_trained = 0
progress_bar = tqdm(range(len(self.train_loader)))
try:
batch = next(self.train_iterator)
except StopIteration:
self.train_iterator = iter(self.train_loader)
batch = next(self.train_iterator)
batch = {
'input_ids': batch['input_ids'].to(self.device),
'labels': batch['labels'].to(self.device),
'attention_mask': batch['attention_mask'].to(self.device)
}
logits, loss = framework.zo_step(batch, local_seed_pool=self.local_seed_pool)
progress_bar.update(1)
if (not torch.isnan(loss)) and (self.args.grad_clip <= 0 or loss != 0.0):
loss_total_train += loss
num_trained += len(batch['input_ids'])
if self.args.batch_or_epoch == 'epoch':
progress_bar.set_description(f'client {self.idx} train at epoch {int(cur_step / len(self.train_loader)) + 1}, loss: {loss_total_train / num_trained if num_trained != 0 else 0.0}')
else:
progress_bar.set_description(f'client {self.idx} train at step {cur_step}, loss: {loss_total_train / num_trained if num_trained != 0 else 0.0}')
# save both CPU and GPU memory
del framework
self.model = None
if memory_record_dic is not None:
memory_record_dic[self.device.index] = {}
memory_record_dic[self.device.index]['max_memory_allocated'] = torch.cuda.max_memory_allocated(self.device)
memory_record_dic[self.device.index]['max_memory_reserved'] = torch.cuda.max_memory_reserved(self.device)
def clear_model(self):
# clear model to same memory
self.model = None
def migrate(self, device):
"""
migrate a client to a new device
"""
self.device = device
def pull(self, forked_global_model):
"""
pull model from the server
"""
self.model = forked_global_model
# Path: utils_data/load_data.py
def get_loaders(args, only_eval=False):
"""
Return: list of train_loaders, eval_loader
"""
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
tokenizer.model_max_length = args.max_length
special_tokens = dict()
if tokenizer.pad_token is None:
special_tokens["pad_token"] = DefaultToken.PAD_TOKEN.value
if tokenizer.eos_token is None:
special_tokens["eos_token"] = DefaultToken.EOS_TOKEN.value
if tokenizer.bos_token is None:
special_tokens["bos_token"] = DefaultToken.BOS_TOKEN.value
if tokenizer.unk_token is None:
special_tokens["unk_token"] = DefaultToken.UNK_TOKEN.value
tokenizer.add_special_tokens(special_tokens)
# Generation task
if args.dataset == 'dolly':
from utils_data.llm_dataset import LLMDataset, LLMDataCollator
if args.eval_metric == 'loss':
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=False)
else:
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=True)
data_collator = LLMDataCollator(tokenizer=tokenizer)
# only use a subset of raw dataset
raw_datasets, _ = torch.utils.data.dataset.random_split(raw_datasets, [int(len(raw_datasets) * args.dataset_subsample), len(raw_datasets) - int(len(raw_datasets) * args.dataset_subsample)])
y_all = np.array([item['categories'] for item in raw_datasets])
index_eval = np.where(y_all == args.zerotask)[0]
# delete the indices of eval samples from the all set
index_train = np.delete(np.arange(len(y_all)), index_eval)
raw_datasets = np.array(raw_datasets)
train_set = raw_datasets[index_train]
eval_set = raw_datasets[index_eval]
y_train = np.array([item['categories'] for item in train_set])
counter = Counter(y_train)
noniid = args.iid
if 'dir' in noniid:
split_dic = partition_idx_labeldir(y_train, n_parties=args.num_clients, alpha=float(noniid[3:]), num_classes=len(counter))
split_trainsets = []
for _, sample_indices in split_dic.items():
split_trainsets.append(Subset(train_set, indices=sample_indices))
else:
n_parts = [int(len(train_set) / args.num_clients) for _ in range(args.num_clients - 1)]
n_parts.append(len(train_set) - sum(n_parts))
split_trainsets = torch.utils.data.dataset.random_split(train_set, n_parts)
list_train_loader = [
DataLoader(
subset, shuffle=True, batch_size=args.batch_size, collate_fn=data_collator
) for subset in split_trainsets
]
eval_loader = DataLoader(
eval_set, batch_size=args.batch_size, collate_fn=data_collator
)
elif args.dataset in ['instruct']:
from utils_data.natural_instruction_loader import get_instruction_dataset
list_train_loader, eval_loader = get_instruction_dataset(args, tokenizer, only_eval=only_eval)
else:
raise AttributeError(f'dataset {args.dataset} not implemented')
return list_train_loader, eval_loader, tokenizer
# Path: main.py
import argparse
import os
import time
import random
import numpy as np
import torch
import yaml
import json
from server import Server
from client import Client
from utils_data.load_data import get_loaders
from copy import deepcopy
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Federation
parser.add_argument('--num_clients', type=int, default=200, help='N in our paper')
parser.add_argument('-m', type=float, default=0.05, help='ratio of activate clients in each round')
parser.add_argument('--rounds', type=int, default=40, help='the total number of rounds')
parser.add_argument('--local_step', type=int, default=200, help=r'$\tau in our paper')
parser.add_argument('--batch_or_epoch', type=str, default='batch', choices=['epoch', 'batch'])
parser.add_argument('--equal_weight', default=False, action='store_true', help='if `true`, the weights among clients for aggregation are the same')
# Data
## Arguments related to data on both datasets
parser.add_argument('--dataset', type=str, default='instruct', choices=['instruct', 'dolly'])
parser.add_argument('--batch_size', type=int, default=1, help='batch size > 1 may cause error during running')
parser.add_argument('--max_length', type=int, default=1024, help='the max number of tokens of a data instance')
parser.add_argument('--use_prompts', default=True, help='if `true`, the prompt template from alpaca is adopted')
## Arguments related to data only for Dolly-15K
parser.add_argument('--iid', type=str, default='dir0.5', help=r'`dir{alpha}` means that \alpha in Dirichlet distribution, `0` means IID split')
parser.add_argument('--zerotask', default=7, type=int, help='the index of the task for evaluation in dolly-15K')
parser.add_argument('--dataset_subsample', type=float, default=1.0, help='used for sampling a subset from the original dataset, only effective for dolly-15K')
# Model
parser.add_argument('--model', type=str, default='datajuicer/LLaMA-1B-dj-refine-150B')
# Training
parser.add_argument('--lr', type=float, default=0.001, help=r'learning rate \eta')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay in MeZO')
parser.add_argument('--grad_clip', type=float, default=-100.0, help='clip the over large loss value, if < 0, disable this feature')
# Training args only for `FedKSeed`
parser.add_argument('-K', type=int, default=4096, help='ratio of active clients in each round')
parser.add_argument('--zo_eps', type=float, default=0.0005, help=r'\eps in MeZO')
# Training args only for `FedKSeed-Pro`
parser.add_argument('--bias_sampling', default=False, action='store_true', help='if `true`, the probabilities of candidate seeds to be sampled are not identical, i.e., FedKSeed-Pro')
parser.add_argument('--bias_loss_clip', default=1000.0, type=float, help='scalar gradient whose abstract values exceeds this value will be cliped')
parser.add_argument('--grad_initial', default=0.0, type=float, help='initial value of scalar gradient history corresponding to each candidate seed')
# Environment
parser.add_argument('--device', type=int, default=0, help='index of the targeted cuda device')
parser.add_argument('--log', default=False, action='store_true', help='if `true`, running logs will be recorded in files')
parser.add_argument('--log_root', default='logs', help='root path of log files')
parser.add_argument('--seed', default=42, type=int, help='global seed, for reproducibility')
# Evaluation
parser.add_argument('--eval_metric', default='rouge', type=str, choices=['rouge', 'loss'], help='metric to evaluate global model in the last round')
# Checkpoints
parser.add_argument('--save', default=False, action='store_true', help='if `true`, the checkpoint of tuned models will be stored')
time_stamp = str(time.time())
args = parser.parse_args()
eval_avg_acc = []
memory_record_dic = {}
previous_metric = args.eval_metric
args.eval_metric = 'loss'
# set CUDA visibility to targeted cuda device, to avoid the several hundred MB memory consumption of device 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device)
setup_seed(args.seed)
list_train_loader, eval_loader, _ = get_loaders(args)
if args.dataset == 'instruct':
args.iid = 'meta'
log_dir = time_stamp
if args.log_root != '':
log_dir = os.path.join(args.log_root, log_dir)
| if args.log: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: merlresearch/PixPNet
# Path: pixpnet/protonets/models/layers.py
class GroupedLinear(nn.Linear):
"""
Equivalent to (but faster than):
>>> conv = nn.Conv1d(in_features, out_features, kernel_size=1,
>>> groups=groups, bias=bias, **kwargs)
>>> conv(input[:, :, None]).squeeze(dim=2)
"""
def __init__(self, in_features, out_features, groups, bias=True, **kwargs):
if in_features % groups != 0:
raise ValueError("in_features must be divisible by groups")
self.groups = groups
self.in_features_per_group = in_features // groups
super().__init__(in_features=self.in_features_per_group, out_features=out_features, bias=bias, **kwargs)
def forward(self, input: torch.Tensor) -> torch.Tensor:
if self.groups == 1:
return super().forward(input)
# Otherwise, group input
input_grouped = torch.reshape(input, (input.size()[0], self.groups, self.in_features_per_group))
# Batched matrix multiplications using einsum
out = torch.einsum("bji,ji->bj", input_grouped, self.weight)
# Add bias if using bias
if self.bias is not None:
out += self.bias[None, :]
return out
# Path: pixpnet/protonets/models/protonet.py
class ProtoNet(nn.Module):
# Buffers
ones: torch.Tensor
corresponding_sample_idxs: torch.Tensor
min_fmap_idxs: torch.Tensor
prototype_class_identity: Optional[torch.Tensor]
# Parameters
prototype_vectors: torch.nn.Parameter
# Constants
prototype_layer_stride = 1
def __init__(
self,
features: nn.Module,
feature_layer: str,
rf_slices: Optional[SlicesType],
num_prototypes: int,
prototype_dim: int,
prototype_kernel_size: int,
num_classes: int,
init_weights: bool = True,
prototype_activation: Union[str, Callable] = "log",
add_on_layers_type: str = "regular",
class_specific: bool = True,
epsilon: float = 1e-6,
learn_prototypes: bool = True,
incorrect_strength: float = -0.5,
correct_strength: float = 1,
readout_type: str = "linear",
distance: str = "l2",
):
""""""
super().__init__()
self.prototype_shape = (num_prototypes, prototype_dim, prototype_kernel_size, prototype_kernel_size)
self.num_prototypes = num_prototypes
self.prototype_dim = prototype_dim
self.prototype_kernel_size = prototype_kernel_size
self.num_classes = num_classes
self.epsilon = epsilon
self.learn_prototypes = learn_prototypes
# prototype_activation could be 'log', 'linear',
# or a callable that converts distance to similarity score
self.prototype_activation = prototype_activation
self.distance = distance
self.feature_layer = feature_layer
self.rf_slices = rf_slices
self.rf_idxs = None
self.rf_sizes = None
if self.rf_slices is not None:
Hz = len(self.rf_slices)
Wz = len(self.rf_slices[0])
self.rf_sizes = torch.zeros((Hz, Wz, 2), dtype=torch.int)
self.rf_idxs = torch.zeros((Hz, Wz, 4), dtype=torch.int)
for h in range(Hz):
for w in range(Wz):
# for patch h,w
if len(self.rf_slices[h][w]) > 1:
raise NotImplementedError
for h_s, w_s in self.rf_slices[h][w]:
# Start weighting approach
h_size = h_s.stop - h_s.start
w_size = w_s.stop - w_s.start
self.rf_sizes[h, w] = torch.tensor([h_size, w_size], dtype=torch.int)
self.rf_idxs[h, w] = torch.tensor([h_s.start, h_s.stop, w_s.start, w_s.stop], dtype=torch.int)
self.incorrect_strength = incorrect_strength
self.correct_strength = correct_strength
self.class_specific = class_specific
if self.class_specific:
# Here we are initializing the class identities of the prototypes.
# Without domain specific knowledge we allocate the same number of
# prototypes for each class
assert self.num_prototypes % self.num_classes == 0
# a one-hot indication matrix for each prototype's class identity
self.register_buffer(
"prototype_class_identity", torch.zeros(self.num_prototypes, self.num_classes, dtype=torch.int)
)
num_prototypes_per_class = self.num_prototypes // self.num_classes
for j in range(self.num_prototypes):
self.prototype_class_identity[j, j // num_prototypes_per_class] = 1
# this has to be named features to allow the precise loading
self.features = features
self._init_add_on_layers(add_on_layers_type)
self.register_parameter(
"prototype_vectors", nn.Parameter(torch.rand(self.prototype_shape), requires_grad=learn_prototypes)
)
self.register_buffer("ones", torch.ones(self.prototype_shape))
self.register_buffer("corresponding_sample_idxs", torch.full((self.num_prototypes,), -1))
self.register_buffer("min_fmap_idxs", torch.full((self.num_prototypes, 4), -1))
self.readout_type = readout_type
self._init_last_layer()
if init_weights:
self._initialize_weights()
def _init_last_layer(self):
# do not use bias to aid interpretability
if self.readout_type == "linear": # standard linear
self.last_layer = nn.Linear(self.num_prototypes, self.num_classes, bias=False)
elif self.readout_type == "sparse": # sparse linear
if not self.class_specific:
raise ValueError('`readout_type` cannot be "sparse" if ' "`class_specific` is False")
self.last_layer = GroupedLinear(self.num_prototypes, self.num_classes, groups=self.num_classes, bias=False)
elif self.readout_type == "proto": # prototype sim sums as prediction
if not self.class_specific:
raise ValueError('`readout_type` cannot be "proto" if ' "`class_specific` is False")
# Note that this assumes that `prototype_class_identity` is still
# uniform across classes when class_specific is True
self.last_layer = GroupedSum(self.num_prototypes, self.num_classes)
else:
raise NotImplementedError(f"readout_type = {self.readout_type}")
def _init_add_on_layers(self, add_on_layers_type):
in_channels = self.features.out_channels
final_act, final_act_str = nn.Sigmoid(), "sigmoid"
if add_on_layers_type == "bottleneck":
add_on_layers = []
current_in_channels = in_channels
conv_idx = 1
while current_in_channels > self.prototype_dim or not len(add_on_layers):
current_out_channels = max(self.prototype_dim, (current_in_channels // 2))
if current_out_channels > self.prototype_dim:
conv2_str, act2, act2_str = (f"conv{conv_idx + 1}", nn.ReLU(), f"relu{conv_idx + 1}")
else:
assert current_out_channels == self.prototype_dim
conv2_str, act2, act2_str = ("conv_last", final_act, final_act_str)
add_on_layers.extend(
(
(
f"conv{conv_idx}",
nn.Conv2d(
in_channels=current_in_channels, out_channels=current_out_channels, kernel_size=1
),
),
(f"relu{conv_idx}", nn.ReLU()),
(
conv2_str,
nn.Conv2d(
in_channels=current_out_channels, out_channels=current_out_channels, kernel_size=1
),
),
(act2_str, act2),
)
)
current_in_channels = current_in_channels // 2
conv_idx += 2
elif add_on_layers_type == "regular":
add_on_layers = (
("conv1", nn.Conv2d(in_channels=in_channels, out_channels=self.prototype_dim, kernel_size=1)),
("relu1", nn.ReLU()),
(
"conv_last",
nn.Conv2d(in_channels=self.prototype_dim, out_channels=self.prototype_dim, kernel_size=1),
),
(final_act_str, final_act),
)
else:
raise ValueError(add_on_layers_type)
add_on_layers = OrderedDict(add_on_layers)
self.add_on_layers = nn.Sequential(add_on_layers)
def conv_features(self, x):
"""
the feature input to prototype layer
"""
x = self.features(x)
log_once(logger.info, f'features output shape: {("N", *x.size()[1:])}')
x = self.add_on_layers(x)
log_once(logger.info, f'add_on_layers output shape: {("N", *x.size()[1:])}')
return x
def compute_distances(self, x):
return compute_distances(self.distance, x, self.prototype_vectors, self.ones)
def prototype_distances(self, x):
"""
x is the raw input
"""
conv_features = self.conv_features(x)
distances = self.compute_distances(conv_features)
return conv_features, distances
def dist_2_sim(self, distances):
if self.prototype_activation == "log":
# equivalent:
# log((distances + 1) / (distances + epsilon)) # noqa: E800
# but this one is numerically more accurate
return torch.log(1 / (distances + self.epsilon) + 1)
elif self.prototype_activation == "linear":
if self.distance == "cosine":
# dists = 1 - sim --> sim = 1 - dists
return 1 - distances
else:
return -distances
else:
return self.prototype_activation(distances)
def forward(self, x, return_features=False):
result = self.prototype_distances(x)
conv_features, distances = result
outputs = self.classify_head(x, distances)
if return_features:
outputs["features"] = conv_features
return outputs
def classify_head(self, x, distances):
return self._classify_head_proto2patch(distances)
def pixel_space_map(self, x_i, proto_dists, sigma_factor=1.0):
# Note: one sample at a time! otherwise there will definitely be
# memory issues on most hardware and ProtoNets
dtype = proto_dists.dtype
device = proto_dists.device
# validate shape
if x_i.ndim == 4:
assert x_i.shape[0] == 1, x_i.shape
x_i = torch.squeeze(x_i, 0)
else:
assert x_i.ndim == 3, x_i.shape
if proto_dists.ndim == 4:
assert proto_dists.shape[0] == 1, proto_dists.shape
proto_dists = torch.squeeze(proto_dists, 0)
else:
assert proto_dists.ndim == 3, proto_dists.shape
C, H, W = x_i.shape
P, Hz, Wz = proto_dists.shape
# dists --> sims
proto_sims = self.dist_2_sim(proto_dists)
# Sim maps
heat_map_max = torch.zeros((P, H, W), dtype=dtype, device=device)
heat_map_avg = torch.zeros_like(heat_map_max)
heat_map_counts = torch.zeros_like(heat_map_avg, dtype=torch.int)
rf_h = self.rf_sizes[:, :, 0].max()
rf_w = self.rf_sizes[:, :, 1].max()
do_super_rfs = rf_h >= H or rf_w >= W
if do_super_rfs:
# increase true rf_h/w
where_big = torch.where((self.rf_sizes[:, :, 0] >= H) | (self.rf_sizes[:, :, 1] >= W))
do_super_rfs = len(where_big[0]) > 1
if do_super_rfs:
# linear stretching assumption for super-100% RF networks
naive_midpoints_h = torch.round((torch.arange(Hz) + 0.5) * H / Hz).int()
naive_midpoints_w = torch.round((torch.arange(Wz) + 0.5) * W / Wz).int()
im_midpoints = (H - 1) / 2, (W - 1) / 2
pad_h = torch.round((im_midpoints[0] - naive_midpoints_h[where_big[0]]).abs().max()).int()
pad_w = torch.round((im_midpoints[1] - naive_midpoints_w[where_big[1]]).abs().max()).int()
# increase the RFs by the discovered padding amount
rf_h = rf_h + 2 * pad_h
rf_w = rf_w + 2 * pad_w
k_size = max(rf_h, rf_w)
sigma = k_size * sigma_factor
g_kern = gaussian_kernel(k_size, sigma=sigma, device=device)
for h in range(Hz):
for w in range(Wz):
# for patch h,w
sims_hw = proto_sims[:, h, w][:, None, None] # P x 1 x 1
h_size, w_size = self.rf_sizes[h, w] # rf_sizes: Hz x Wz x 2
hs0, hs1, ws0, ws1 = self.rf_idxs[h, w]
if do_super_rfs:
mh, mw = naive_midpoints_h[h], naive_midpoints_w[w]
hs0_ = mh - rf_h // 2
hs1_ = mh + ceil(rf_h // 2)
ws0_ = mw - rf_w // 2
ws1_ = mw + ceil(rf_w // 2)
h_pad0 = max(-hs0_, 0)
h_pad1 = max(hs1_ - H - max(hs0_, 0), 0)
w_pad0 = max(-ws0_, 0)
w_pad1 = max(ws1_ - W - max(ws0_, 0), 0)
if h_size < H:
if hs0 != 0:
h_pad0 += H - h_size
else:
h_pad1 += H - h_size
if w_size < W:
if ws0 != 0:
w_pad0 += W - w_size
else:
w_pad1 += W - w_size
g_kern_hw = g_kern[int(h_pad0) : k_size - ceil(h_pad1), int(w_pad0) : k_size - ceil(w_pad1)]
else:
h_pad0 = h_pad1 = 0
w_pad0 = w_pad1 = 0
if h_size < rf_h:
if hs1 - rf_h < 0:
h_pad0 += rf_h - h_size
else:
h_pad1 += rf_h - h_size
if w_size < rf_w:
if ws1 - rf_w < 0:
w_pad0 += rf_w - w_size
else:
w_pad1 += rf_w - w_size
g_kern_hw = g_kern[int(h_pad0) : k_size - ceil(h_pad1), int(w_pad0) : k_size - ceil(w_pad1)]
sims_hw_full = sims_hw * g_kern_hw[None, :, :]
heat_map_avg[:, hs0:hs1, ws0:ws1] += sims_hw_full
heat_map_counts[:, hs0:hs1, ws0:ws1] += 1
heat_map_max[:, hs0:hs1, ws0:ws1] = torch.maximum(sims_hw_full, heat_map_max[:, hs0:hs1, ws0:ws1])
# take element-wise averages according to overlap tensor (counts)
heat_map_sum = heat_map_avg.clone()
heat_map_avg /= heat_map_counts
return heat_map_max, heat_map_avg, heat_map_sum # each is P x H x W
def pixel_space_upscale(self, x_i, proto_dists):
# validate shape
if x_i.ndim == 4:
assert x_i.shape[0] == 1, x_i.shape
x_i = torch.squeeze(x_i, 0)
else:
assert x_i.ndim == 3, x_i.shape
if proto_dists.ndim == 4:
assert proto_dists.shape[0] == 1, proto_dists.shape
proto_dists = torch.squeeze(proto_dists, 0)
else:
assert proto_dists.ndim == 3, proto_dists.shape
C, H, W = x_i.shape
# dists --> sims
proto_sims = self.dist_2_sim(proto_dists)
# Sim maps
heat_map = torch.nn.functional.interpolate(proto_sims[None], (H, W), mode="bicubic")
# 1 x P x H x W --> P x H x W
heat_map = heat_map.squeeze(dim=0)
return heat_map
def pixel_space_bboxes(self, min_dist_idxs, proto_dists):
if not (self.prototype_kernel_size == self.prototype_layer_stride == 1):
raise NotImplementedError((self.prototype_kernel_size, self.prototype_layer_stride))
N, P = min_dist_idxs.shape
# N x P, N x P
fmap_h_start, fmap_w_start = unravel_index(min_dist_idxs, proto_dists.shape[-2:])
bboxes = []
for i in range(N):
bboxes_i = []
for j in range(P):
h, w = fmap_h_start[i, j], fmap_w_start[i, j]
slices_hw = self.rf_slices[h][w]
assert len(slices_hw) == 1, "unsupported at the moment"
slice_h, slice_w = slices_hw[0]
x1, y1 = slice_w.start, slice_h.start
x2, y2 = slice_w.stop, slice_h.stop
bboxes_i.append([x1, y1, x2, y2])
bboxes.append(bboxes_i)
bboxes = torch.tensor(bboxes)
return bboxes # N x P x 4
def pixel_space_centers_upscale(self, x, min_dist_idxs, proto_dists):
if not (self.prototype_kernel_size == self.prototype_layer_stride == 1):
raise NotImplementedError((self.prototype_kernel_size, self.prototype_layer_stride))
_, _, H, W = x.shape
Hz, Wz = proto_dists.shape[-2:]
# N x P, N x P
fmap_h_start, fmap_w_start = unravel_index(min_dist_idxs, [Hz, Wz])
naive_midpoints_h = torch.round((torch.arange(Hz) + 0.5) * H / Hz).int()
naive_midpoints_w = torch.round((torch.arange(Wz) + 0.5) * W / Wz).int()
centers_x = naive_midpoints_w[fmap_w_start.cpu()]
centers_y = naive_midpoints_h[fmap_h_start.cpu()]
return centers_x, centers_y # NxP each
def _classify_head_proto2patch(self, distances):
# global min pooling (N x P x H x W --> N x P x 1 x 1)
# I.e., the KxK patch of the latent representations z of the input
# images that is most similar to each of the P prototypes. Output
# indicates how present each prototype is in the image.
min_distances, min_dist_idxs = self.global_min_pool(distances)
# Convert distances to similarity using the log/linear function
prototype_activations = self.dist_2_sim(min_distances)
# Compute logits (N x C)
logits = self.last_layer(prototype_activations)
return {
"logits": logits, # N x C
"min_distances": min_distances, # N x P
"min_dist_idxs": min_dist_idxs, # N x P
"distances": distances, # N x P x H x W
"max_similarities": prototype_activations, # N x P
}
@staticmethod
def global_min_pool(distances):
"""
To gather `min_distances` using `min_dist_idxs`:
```python
distances.flatten(start_dim=2).gather(
dim=2, index=min_dist_idxs.flatten(start_dim=2)
).view_as(min_dist_idxs)
```
:param distances:
:return:
"""
with warnings.catch_warnings():
# You'd think they would've checked for positionally passed args...
warnings.filterwarnings(
"ignore", ".*order of the arguments: ceil_mode and " "return_indices will change.*", UserWarning
)
min_distances, min_dist_idxs = F.max_pool2d(
-distances, kernel_size=(distances.size()[2], distances.size()[3]), return_indices=True
)
min_distances = -min_distances
# N x P x 1 x 1 --> N x P
min_distances = min_distances.view(min_distances.shape[0], min_distances.shape[1])
min_dist_idxs = min_dist_idxs.view(min_dist_idxs.shape[0], min_dist_idxs.shape[1])
return min_distances, min_dist_idxs
def push_forward(self, x):
"""this method is needed for the pushing operation"""
return self.prototype_distances(x)
def set_prototypes(self, new_prototype_vectors, corresponding_sample_idxs=None, min_fmap_idxs=None):
self.prototype_vectors.data.copy_(new_prototype_vectors)
err_msg = "both min_fmap_idxs and corresponding_sample_idxs should be" " None or not None"
if corresponding_sample_idxs is not None:
assert min_fmap_idxs is not None, err_msg
self.corresponding_sample_idxs = corresponding_sample_idxs
self.min_fmap_idxs = min_fmap_idxs
else:
assert min_fmap_idxs is None, err_msg
def prune_prototypes(self, prototypes_to_prune):
"""
prototypes_to_prune: a list of indices each in
[0, current number of prototypes - 1] that indicates the prototypes to
be removed
"""
prototypes_to_keep = [*({*range(self.num_prototypes)} - {*prototypes_to_prune})]
self.register_parameter(
"prototype_vectors",
nn.Parameter(self.prototype_vectors.data[prototypes_to_keep, ...], requires_grad=self.learn_prototypes),
)
self.corresponding_sample_idxs = self.corresponding_sample_idxs[prototypes_to_keep, ...]
self.min_fmap_idxs = self.min_fmap_idxs[prototypes_to_keep, ...]
self.prototype_shape = tuple(self.prototype_vectors.size())
self.num_prototypes = self.prototype_shape[0]
# changing self.last_layer in place
# changing in_features and out_features make sure the numbers are
# consistent
if self.readout_type != "linear":
raise NotImplementedError(
f"Removing prototypes for readout_type={self.readout_type}" f" is not implemented yet"
)
self.last_layer.in_features = self.num_prototypes
self.last_layer.out_features = self.num_classes
self.last_layer.weight.data = self.last_layer.weight.data[:, prototypes_to_keep]
# self.ones is nn.Parameter
self.ones = self.ones[prototypes_to_keep, ...]
# self.prototype_class_identity is torch tensor
# so it does not need .data access for value update
if self.class_specific:
self.prototype_class_identity = self.prototype_class_identity[prototypes_to_keep, :]
def set_last_layer_incorrect_connection(self):
"""
Initialize weight of last_layer to correct_strength if
prototype_class_identity is 1 (i.e., the prototype is for that class),
and to incorrect_strength if prototype_class_identity is 0 (i.e., the
prototype is not for that class)
"""
positive_one_weights_locations = torch.t(self.prototype_class_identity)
negative_one_weights_locations = 1 - positive_one_weights_locations
self.last_layer.weight.data.copy_(
self.correct_strength * positive_one_weights_locations
+ self.incorrect_strength * negative_one_weights_locations
)
def _initialize_weights(self):
for name, m in self.add_on_layers.named_children():
if isinstance(m, nn.Conv2d):
if name == "conv_last":
# for the sigmoid activation
nn.init.xavier_normal_(m.weight, gain=1.0)
else:
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if self.class_specific and self.readout_type == "linear":
# This is not needed (or valid) for sparse linear or proto
self.set_last_layer_incorrect_connection()
elif self.class_specific and self.readout_type == "sparse":
nn.init.ones_(self.last_layer.weight)
# Path: pixpnet/protonets/loss.py
import torch
from torch import Tensor, nn
from pixpnet.protonets.models.layers import GroupedLinear
from pixpnet.protonets.models.protonet import ProtoNet
# Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
#
# SPDX-License-Identifier: AGPL-3.0-or-later
class ClusterLoss(nn.Module):
def __init__(self, class_specific=True):
super().__init__()
self.class_specific = class_specific
def forward(self, min_distances: Tensor, target: Tensor, model: ProtoNet) -> Tensor:
# min_distances: N x P
| if self.class_specific: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Dinghow/UIM
# Path: util/dataset.py
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
BASE_DIR = 'Combined_Dataset'
class Composition1KMatting(Dataset):
class Combined4ClassesMatting(Dataset):
class RealWorldMatting(Dataset):
def __init__(self,
root='dataset',
split='train',
task='matting',
num_bgs=10,
transform=None,
preprocess=False,
retname=True):
def __getitem__(self, index):
def _composite_fg(self, fg, alpha, idx):
def _composite(self, fg, bg, a, w, h, trimap):
def __len__(self):
def __str__(self):
def __init__(self,
root='dataset',
split='all',
transform=None,
retname=True):
def __getitem__(self, index):
def __len__(self):
def __str__(self):
def __init__(self,
root='dataset',
transform=None,
retname=True):
def __getitem__(self, index):
def __len__(self):
def __str__(self):
# Path: util/util.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# Path: util/util.py
def compute_mse(pred, alpha, trimap=None):
if trimap is not None:
num_pixels = float((trimap == 128).sum())
return ((pred - alpha) ** 2 * (trimap == 128) ).sum() / (num_pixels + 1e-8)
else:
num_pixels = float(np.prod(alpha.shape))
return ((pred - alpha) ** 2).sum() / (num_pixels + 1e-8)
# Path: util/util.py
def compute_sad(pred, alpha, trimap=None):
diff = np.abs(pred - alpha)
if trimap is not None:
return np.sum(diff * (trimap == 128)) / 1000
else:
return np.sum(diff) / 1000
# Path: util/util.py
def compute_gradient(pred, target, trimap=None):
pred_x, pred_y = gaussgradient(pred, 1.4)
target_x, target_y = gaussgradient(target, 1.4)
pred_amp = np.sqrt(pred_x ** 2 + pred_y ** 2)
target_amp = np.sqrt(target_x ** 2 + target_y ** 2)
error_map = (pred_amp - target_amp) ** 2
if trimap is not None:
loss = np.sum(error_map[trimap == 128])
else:
loss = np.sum(error_map)
return loss / 1000.
# Path: util/util.py
def compute_connectivity(pred, target, trimap=None, step=0.1):
h, w = pred.shape
thresh_steps = list(np.arange(0, 1 + step, step))
l_map = np.ones_like(pred, dtype=np.float) * -1
for i in range(1, len(thresh_steps)):
pred_alpha_thresh = (pred >= thresh_steps[i]).astype(np.int)
target_alpha_thresh = (target >= thresh_steps[i]).astype(np.int)
omega = getLargestCC(pred_alpha_thresh * target_alpha_thresh).astype(np.int)
flag = ((l_map == -1) & (omega == 0)).astype(np.int)
l_map[flag == 1] = thresh_steps[i - 1]
l_map[l_map == -1] = 1
pred_d = pred - l_map
target_d = target - l_map
pred_phi = 1 - pred_d * (pred_d >= 0.15).astype(np.int)
target_phi = 1 - target_d * (target_d >= 0.15).astype(np.int)
if trimap is not None:
loss = np.sum(np.abs(pred_phi - target_phi)[trimap == 128])
else:
loss = np.sum(np.abs(pred_phi - target_phi))
return loss / 1000.
# Path: util/util.py
def get_cuda_devices():
os.system('nvidia-smi -q -d Memory | grep -A4 GPU | grep Free > tmp')
memory_gpu = np.array([int(x.split()[2]) for x in open('tmp', 'r').readlines()])
clen = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
devices = heapq.nlargest(clen, range(len(memory_gpu)), memory_gpu.take)
test_gpu = devices
os.environ['CUDA_VISIBLE_DEVICES']=str(devices)[1:-1]
os.system('rm tmp')
return devices
# Path: util/util.py
def get_unknown_tensor_from_pred(pred, rand_width=30, train_mode=True):
### pred: N, 1 ,H, W
N, C, H, W = pred.shape
pred = pred.data.cpu().numpy()
uncertain_area = np.ones_like(pred, dtype=np.uint8)
uncertain_area[pred<1.0/255.0] = 0
uncertain_area[pred>1-1.0/255.0] = 0
for n in range(N):
uncertain_area_ = uncertain_area[n,0,:,:] # H, W
if train_mode:
width = np.random.randint(1, rand_width)
else:
width = rand_width // 2
uncertain_area_ = cv2.dilate(uncertain_area_, Kernels[width])
uncertain_area[n,0,:,:] = uncertain_area_
weight = np.zeros_like(uncertain_area)
weight[uncertain_area == 1] = 1
weight = torch.from_numpy(weight).cuda()
return weight
# Path: util/custom_transforms.py
class interactiveMattingTransform(object):
# modified from transform.py of dingrunyu
def __init__(self, channel, no_crop = False, diff_width = False,\
relax_crop = 50, zero_pad_crop = True, use_iogpoints = False,\
use_roimasking = False, use_trimap = False, use_bbox = False,\
use_in_point = False, use_iogdextr = False, use_extreme_points = False,\
use_scribble = False, rotate_degree = 30, scale = [0.8, 1.25], shear = 10,\
flip = 0.5, crop_size = 512, trimap_type = 'standard', mask_type = 'alpha',\
bbox_type = 'area', trimap_one_hot=True):
self.channel = channel
self.no_crop = no_crop
self.diff_width = diff_width
self.relax_crop = relax_crop
self.zero_pad_crop = zero_pad_crop
self.rotate_degree = rotate_degree
self.scale = scale
self.shear = shear
self.flip = flip
self.crop_size = crop_size
self.trimap_type = trimap_type
self.mask_type = mask_type
self.bbox_type = bbox_type
self.use_roimasking = use_roimasking
self.trimap_one_hot = trimap_one_hot
self.use_trimap = use_trimap
self.use_extreme_points = use_extreme_points
self.use_bbox = use_bbox
self.use_in_point = use_in_point
self.use_iogpoints = use_iogpoints
self.use_iogdextr = use_iogdextr
self.use_scribble = use_scribble
def getTrainTransform(self):
transform_tr = [
RandomAffine(degrees=self.rotate_degree, scale=self.scale, shear=self.shear, flip=self.flip),
GenTrimap(),
RandomCrop(output_size=(self.crop_size, self.crop_size)),
RandomJitter(),
Composite(),
GenMask(mask_type=self.mask_type),
MattingToTensor(phase="train", trimap_type=self.trimap_type, in_channels=self.channel, trimap_one_hot=self.trimap_one_hot)]
tr_ep = ExtremePoints(sigma=10, pert=5, elem='alpha')
tr_in = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='in')
tr_out = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='out')
tr_bbox = OutPoints(sigma=10, pert=5, elem='alpha')
tr_crop = CropFromMask(crop_elems=('alpha', 'fg'), \
relax=self.relax_crop, zero_pad=self.zero_pad_crop, \
crop=False if self.no_crop else True, use_roimasking = self.use_roimasking,\
is_matting=True)
tr_scribble = GenerateScribble(elem='alpha')
if not self.no_crop:
transform_tr.insert(0, tr_crop)
if self.channel == 5 and self.use_iogpoints:
print('Use foreground/background points')
transform_tr.insert(-3, tr_in)
transform_tr.insert(-3, tr_out)
transform_tr.insert(-3, ToImage(norm_elem=('in_points', 'out_points')))
elif self.channel == 6 and self.use_trimap:
print('Use trimap (one-hot)')
elif self.channel == 4 and self.use_trimap:
print('Use trimap')
elif self.channel == 4 and self.use_bbox:
print('Use bounding box')
if self.bbox_type == 'points':
transform_tr.insert(-3, tr_out)
elif self.bbox_type == 'area':
transform_tr.insert(-3, tr_bbox)
else:
raise RuntimeError('Wrong bbox type.')
transform_tr.insert(-3, ToImage(norm_elem=('out_points')))
elif self.channel == 4 and self.use_in_point:
print('Use inside point')
transform_tr.insert(-3, tr_in)
transform_tr.insert(-3, ToImage(norm_elem=('in_points')))
elif self.channel == 4 and self.use_extreme_points:
print('Use extreme points')
transform_tr.insert(-3, tr_ep)
transform_tr.insert(-3, ToImage(norm_elem='extreme_points'))
elif self.channel == 4 and self.use_scribble:
print('Use scribble')
transform_tr.insert(-3, tr_scribble)
transform_tr.insert(-3, ToImage(norm_elem='scribble'))
elif self.channel == 3:
print('Use no annotation')
else:
raise NotImplementedError('Wrong interactive method.')
print([str(tran) for tran in transform_tr])
return transforms.Compose(transform_tr)
def getTestTransform(self, reserveGT = False):
transform_ts = [
MattingToTensor(phase="test", in_channels=self.channel, trimap_one_hot=self.trimap_one_hot)]
tr_ep = ExtremePoints(sigma=10, pert=5, elem='alpha')
tr_in = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='in')
tr_out = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='out')
tr_bbox = OutPoints(sigma=10, pert=5, elem='alpha')
tr_crop = CropFromMask(crop_elems=('image', 'alpha', 'trimap'), \
relax=self.relax_crop, zero_pad=self.zero_pad_crop, \
crop=False if self.no_crop else True, use_roimasking = self.use_roimasking,\
is_matting=True)
tr_scribble = GenerateScribble(elem='alpha')
if not self.no_crop:
transform_ts.insert(0, tr_crop)
if self.channel == 5 and self.use_iogpoints:
print('Use foreground/background points')
transform_ts.insert(-1, tr_in)
transform_ts.insert(-1, tr_out)
transform_ts.insert(-1, ToImage(norm_elem=('in_points', 'out_points')))
elif self.channel == 6 and self.use_trimap:
print('Use trimap (one-hot)')
elif self.channel == 4 and self.use_trimap:
print('Use trimap')
elif self.channel == 4 and self.use_bbox:
print('Use bounding box')
if self.bbox_type == 'points':
transform_ts.insert(-1, tr_out)
elif self.bbox_type == 'area':
transform_ts.insert(-1, tr_bbox)
else:
raise RuntimeError('Wrong bbox type.')
transform_ts.insert(-1, ToImage(norm_elem=('out_points')))
elif self.channel == 4 and self.use_in_point:
print('Use inside point')
transform_ts.insert(-1, tr_in)
transform_ts.insert(-1, ToImage(norm_elem=('in_points')))
elif self.channel == 4 and self.use_extreme_points:
print('Use extreme points')
transform_ts.insert(-1, tr_ep)
transform_ts.insert(-1, ToImage(norm_elem='extreme_points'))
elif self.channel == 4 and self.use_scribble:
print('Use scribble')
transform_ts.insert(-1, tr_scribble)
transform_ts.insert(-1, ToImage(norm_elem='scribble'))
elif self.channel == 3:
print('Use no annotation')
else:
raise NotImplementedError('Wrong interactive method.')
print([str(tran) for tran in transform_ts])
return transforms.Compose(transform_ts)
# Path: util/dataset.py
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
BASE_DIR = 'Combined_Dataset'
class Composition1KMatting(Dataset):
class Combined4ClassesMatting(Dataset):
class RealWorldMatting(Dataset):
def __init__(self,
root='dataset',
split='train',
task='matting',
num_bgs=10,
transform=None,
preprocess=False,
retname=True):
def __getitem__(self, index):
def _composite_fg(self, fg, alpha, idx):
def _composite(self, fg, bg, a, w, h, trimap):
def __len__(self):
def __str__(self):
def __init__(self,
root='dataset',
split='all',
transform=None,
retname=True):
def __getitem__(self, index):
def __len__(self):
def __str__(self):
def __init__(self,
root='dataset',
transform=None,
retname=True):
def __getitem__(self, index):
def __len__(self):
def __str__(self):
# Path: util/config.py
class CfgNode(dict):
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
def __getattr__(self, name):
def __setattr__(self, name, value):
def __str__(self):
def _indent(s_, num_spaces):
def __repr__(self):
def load_cfg_from_cfg_file(file):
def merge_cfg_from_list(cfg, cfg_list):
def _decode_cfg_value(v):
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
def conditional_cast(from_type, to_type):
def _assert_with_logging(cond, msg):
# Path: util/helpers.py
def dilate(im, kernel=20):
def tens2image(im):
def crop2fullmask(crop_mask, bbox, im=None, im_size=None, zero_pad=False, relax=0, mask_relax=True,
#interpolation=cv2.INTER_CUBIC, scikit=False):
interpolation=cv2.INTER_LINEAR, scikit=False):
def align2fullmask(crop_mask, im_size, points, relax=0):
def overlay_mask(im, ma, colors=None, alpha=0.5):
def overlay_masks(im, masks, alpha=0.5):
def extreme_points(mask, pert):
def find_point(id_x, id_y, ids):
def getPositon(distance_transform):
def in_points(mask, pert):
def out_points(mask, pert):
def out_points_mask(mask, pert):
def get_bbox(mask, points=None, pad=0, zero_pad=False, use_roimasking=False):
def crop_from_bbox(img, bbox, zero_pad=False, use_roimasking=False):
def fixed_resize(sample, resolution, flagval=None):
def crop_from_mask(img, mask, relax=0, zero_pad=False, use_roimasking = False):
def make_gaussian(size, sigma=10, center=None, d_type=np.float64):
def make_gt(img, labels, sigma=10, one_mask_per_point=False):
def make_gt_bbox(img, labels, sigma=10, one_mask_per_point=False):
def cstm_normalize(im, max_value):
def generate_param_report(logfile, param):
def color_map(N=256, normalized=False):
def bitget(byteval, idx):
def save_mask(results, mask_path):
def B_spline(control_points, num_i, s=0.5):
def generate_scribble_strictly(mask, num_c=3, num_i=50, coverage_area=0.1, width=10, best_out_of=5):
def generate_trimap_with_gaussian(mask):
def clamp(input, min=None, max=None):
def produce_trimap(mask):
def unified_trimap_transform(trimap, sample_name, split_dir):
H, W = mask.shape
# Path: seg_matting_tool/test_4classes.py
import numpy as np
import os.path
import logging
import argparse
import cv2
import torch.nn.parallel
import numpy as np
import util.helpers as helpers
import os
import random
import time
import cv2
import numpy as np
import logging
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.multiprocessing as mp
import torch.distributed as dist
from PIL import Image
from util import dataset
from util.util import AverageMeter, compute_mse, compute_sad, compute_gradient, compute_connectivity, get_cuda_devices, get_unknown_tensor_from_pred
from torch.nn.functional import upsample
from torchvision import transforms
from tensorboardX import SummaryWriter
from util.custom_transforms import interactiveMattingTransform
from util import dataset, config, helpers
from model.mattingnet import Unified_Interactive_Matting
from model.mattingnet import Unified_Interactive_Matting_trimap
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def get_relax_pad(relax_pad, extreme_points):
if relax_pad <= 0:
return 0
if relax_pad >= 1:
return int(relax_pad)
x_min, y_min = np.min(extreme_points, axis=0)
x_max, y_max = np.max(extreme_points, axis=0)
x_len = x_max - x_min + 1
y_len = y_max - y_min + 1
return max(20, int(relax_pad * max(x_len, y_len)))
def main():
global args, logger, writer
use_void_pixels=True
logger = get_logger()
args = get_parser()
# writer = SummaryWriter(args.save_folder)
if args.test_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
else:
args.test_gpu = get_cuda_devices()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(args)# 在屏幕上打印信息
if args.manual_seed is not None:
random.seed(args.manual_seed)
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
cudnn.benchmark = False
cudnn.deterministic = True
# transform and dataloader
_interactive_matting_transform = interactiveMattingTransform(channel=args.in_channels, no_crop=args.no_crop, relax_crop=args.relax_crop,\
use_iogpoints=args.use_iogpoints, use_roimasking=args.use_roimasking, use_trimap=args.use_trimap,\
use_in_point=args.use_in_point, use_bbox=args.use_bbox, use_iogdextr=args.use_iogdextr, use_extreme_points=args.use_extreme_points, use_scribble=args.use_scribble,\
rotate_degree=args.rotate_degree, scale=args.scale, shear=args.shear,\
flip=args.flip, crop_size=args.crop_size, mask_type=args.mask_type, bbox_type=args.bbox_type)
composed_transforms_ts = _interactive_matting_transform.getTestTransform()
val_data = dataset.Combined4ClassesMatting(root=args.data_root, split=args.test_split,transform=composed_transforms_ts)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size_test, shuffle=False, num_workers=args.workers_test, pin_memory=True, sampler=None)
# model
if args.arch == 'uim':
model = Unified_Interactive_Matting(n_classes=args.classes, in_channels=args.in_channels, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, fusion_method=args.fusion_method)
elif args.arch == 'uim_trimap':
model = Unified_Interactive_Matting_trimap(n_classes=args.classes, in_channels=args.in_channels, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, fusion_method=args.fusion_method)
else:
raise RuntimeError('Wrong arch.')
logger.info(model)
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
model = model.to(device)
model.eval()
# checkpoint
model_path = args.model_path
if os.path.isfile(model_path):
logger.info("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint)
logger.info("=> loaded checkpoint '{}'".format(model_path))
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(model_path))
# evaluate
print('evaluating Network')
eval_result = dict()
eval_result['all_mse_tri_free'] = AverageMeter()
eval_result['all_sad_tri_free'] = AverageMeter()
eval_result['all_grad_tri_free'] = AverageMeter()
eval_result['all_connectivity_tri_free'] = AverageMeter()
eval_result["per_categ_mse"] = dict()
eval_result["per_categ_sad"] = dict()
eval_result["per_categ_grad"] = dict()
eval_result["per_categ_conn"] = dict()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
with torch.no_grad():
# Main Testing Loop
for ii, sample in enumerate(val_loader):
if ii % 10 == 0:
print('Evaluating: {} of {} batches'.format(ii, len(val_loader)))
# predict result and gt
image = sample['image']
alpha = sample['alpha']
trimap_ori = sample['trimap_ori']
alpha_shape = sample['alpha_shape']
alpha_ori = sample['alpha_ori']
metas = sample["meta"]
cat = int(metas['category'][0])
if args.use_iogpoints:
interactive = torch.cat((sample['in_points'],sample['out_points']), dim=1)
elif args.use_trimap:
interactive = sample['trimap']
elif args.use_bbox:
interactive = sample['out_points']
elif args.use_in_point:
interactive = sample['in_points']
elif args.use_extreme_points:
interactive = sample['extreme_points']
elif args.use_scribble:
interactive = sample['scribble']
else:
interactive = None
if 'gca' in args.arch or 'aim' in args.arch or 'uim' in args.arch or 'interactive_matting' in args.arch:
pred = model.forward(image, interactive)
else:
pred = model.forward(input)
if not args.arch == 'mgmatting':
| pred = pred.to(torch.device('cpu')) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SJTU-Quant/SUNNY-GNN
# Path: models/utils.py
def aug_mask(g, n_pos, n_neg, topk, k):
edge_mask = g.edata['e_att']
pos_masks = None
neg_masks = None
cts_idxs = torch.tensor([]).long()
e_d_mask = torch.zeros_like(edge_mask)
e_d_mask[:, 1] = distance_coef(1) * edge_mask[:, 1]
e_d_mask[:, 0] = distance_coef(2) * edge_mask[:, 0]
e_d_mask[e_d_mask > 1] = 1.
g.edata['e_att'] = e_d_mask
gs = dgl.unbatch(g)
for i in range(len(gs)):
e_h_mask = gs[i].edata['e_h_mask'].T.cpu()
relevant_edges = torch.nonzero(e_h_mask[0]).shape[0] + torch.nonzero(e_h_mask[1]).shape[0]
mask = gs[i].edata['e_att'].T.cpu()
mask = mask.view(-1)
pos_mask, neg_mask, cts_idxs = \
perturb_mask(mask, n_pos, n_neg, topk, k, cts_idxs, i, relevant_edges)
if pos_masks is None:
pos_masks = pos_mask.view(n_pos, 2, -1).transpose(0, 1)
neg_masks = neg_mask.view(n_neg, 2, -1).transpose(0, 1)
else:
pos_masks = torch.cat((pos_masks, pos_mask.view(n_pos, 2, -1).transpose(0, 1)), dim=2)
neg_masks = torch.cat((neg_masks, neg_mask.view(n_neg, 2, -1).transpose(0, 1)), dim=2)
del gs
return pos_masks, neg_masks, cts_idxs
# Path: models/utils.py
def aug_mask_hetero(g, n_pos, n_neg, topk, k):
edge_mask = g.edata['e_att']
pos_masks = {etp: [] for etp in g.etypes}
neg_masks = {etp: [] for etp in g.etypes}
cts_idxs = torch.tensor([]).long()
for etp in edge_mask.keys():
e_d_mask = torch.zeros_like(edge_mask[etp])
e_d_mask[:, 1] = distance_coef(1) * edge_mask[etp][:, 1]
e_d_mask[:, 0] = distance_coef(2) * edge_mask[etp][:, 0]
g.edata['e_att'][etp] = e_d_mask
gs = dgl.unbatch(g)
for i in range(len(gs)):
mask = torch.tensor([])
e_num = torch.tensor([], dtype=torch.uint8)
m = gs[i].edata['e_att']
e_h_m = gs[i].edata['e_h_mask']
relevant_edges = 0
for i, etp in enumerate(m):
mask = torch.cat((mask, m[etp].T.cpu().view(-1)))
relevant_edges += torch.nonzero(e_h_m[etp]).shape[0]
e_num = torch.cat((e_num, torch.tensor([i]*m[etp].shape[0]*m[etp].shape[1])))
pos_mask, neg_mask, cts_idxs = \
perturb_mask(mask, n_pos, n_neg, topk, k, cts_idxs, i, relevant_edges)
for i, etp in enumerate(pos_masks):
pos_masks[etp].append(pos_mask[:, e_num==i])
neg_masks[etp].append(neg_mask[:, e_num==i])
del gs
for etp in pos_masks:
pos_masks[etp] = torch.cat(pos_masks[etp], dim=1)
neg_masks[etp] = torch.cat(neg_masks[etp], dim=1)
return pos_masks, neg_masks, cts_idxs
# Path: models/snexgnn.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from models.utils import aug_mask, aug_mask_hetero
def loss(self, edge_mask, logits, labels):
pred_loss = F.cross_entropy(logits, labels)
sparsity_loss = self.sparsity(edge_mask)
return pred_loss + sparsity_loss
def batched_emb(self, g, x, batched_edge_mask, idx):
n_samples = batched_edge_mask.shape[1]
g.ndata['x'] = x
gs = dgl.batch([g] * n_samples)
x = gs.ndata.pop('x')
self.encoder.set_graph(gs)
h = self.encoder.get_emb(x, batched_edge_mask.view(2, -1, 1))
h = h.view(g.number_of_nodes(), n_samples, -1, h.shape[-1]).mean(2)
offset = torch.cat([torch.tensor([0], device=gs.device),
gs.batch_num_nodes().cumsum(dim=0)[:int(gs.batch_size / n_samples) - 1]])
proj = self.proj_head(h[offset])
logits = self.encoder(h, offset).view(g.batch_size, n_samples, -1)
logits = torch.softmax(logits, dim=2)
del gs
return proj[idx], logits[idx]
def get_edge_att(self, g, all_emb, e_batch, h_target):
def calc_att(mask, hop_batch, k=1):
n_map = g.ndata['_ID']
e = g.edges()[0][mask], g.edges()[1][mask]
emb = torch.cat([self.f[k - 1](all_emb[k - 1][n_map[e[0]]]),
self.f[k](all_emb[k][n_map[e[1]]]),
h_target[hop_batch]], dim=1)
att = self.extractor(emb)
return att
e_h_mask = g.edata['e_h_mask'].T
one_hop_mask = torch.nonzero(e_h_mask[0]).view(-1)
one_hop_att = calc_att(one_hop_mask, e_batch[one_hop_mask], k=2)
two_hop_mask = torch.nonzero(e_h_mask[1]).view(-1)
two_hop_att = calc_att(two_hop_mask, e_batch[two_hop_mask], k=1)
edge_att = torch.zeros((2, g.number_of_edges(), 1), device=g.device)
edge_att[:, :, :] = self.MIN_WEIGHT
edge_att[0][two_hop_mask] = two_hop_att
edge_att[1][one_hop_mask] = one_hop_att
return edge_att
def forward(self, g, all_emb, labels, training=False, explain=False, epoch=0):
x = all_emb[0][g.ndata['_ID']]
with g.local_scope():
offset_node = torch.cat([torch.tensor([0], device=g.device),
g.batch_num_nodes().cumsum(dim=0)[:-1]])
h_target = self.f[2](all_emb[2][g.ndata['_ID'][offset_node]])
e_batch = torch.repeat_interleave(torch.arange(g.batch_size, device=g.device),
g.batch_num_edges())
edge_att = self.get_edge_att(g, all_emb, e_batch, h_target)
if explain:
return edge_att
edge_att = self.sampling(edge_att, training)
self.encoder.set_graph(g)
enc_emb = self.encoder.get_emb(x, edge_att)
enc_logits = self.encoder(enc_emb, offset_node)
if training:
pred_loss = self.loss(edge_att, enc_logits, labels)
g.edata['e_att'] = edge_att.view(2, g.number_of_edges()).T
enc_proj = self.proj_head(enc_emb[offset_node])
topk = (self.max_topk - (self.max_topk - self.min_topk) * epoch / self.max_epoch)
pos_edge_att, neg_edge_att, cts_idxs = self.get_cts_mask(g, topk, self.k)
pos_enc_proj, pos_enc_logits = self.batched_emb(g, x, pos_edge_att.to(g.device), cts_idxs)
neg_enc_proj, neg_enc_logits = self.batched_emb(g, x, neg_edge_att.to(g.device), cts_idxs)
cts_loss = self.cts_loss(enc_proj[cts_idxs], pos_enc_proj, neg_enc_proj, pos_enc_logits,
neg_enc_logits, labels[cts_idxs])
return enc_logits, [pred_loss, cts_loss]
return enc_logits, None
class SNexHGN(SNexGNN):
def __init__(self, pret_encoder, encoder, extractor, in_dim, target_ntype, n_heads=1, dropout=0.5):
super(SNexHGN, self).__init__(pret_encoder, encoder, extractor, in_dim, target_ntype, n_heads, dropout)
self.sparsity_mask_coef = 1e-5
self.sparsity_ent_coef = 1e-5
def get_edge_att_hetero(self, g, all_emb, e_batch, h_target, etype, ntypes):
def calc_att(mask, hop_batch, k):
n_map_src = g.ndata['_ID'][ntypes[0]]
n_map_dst = g.ndata['_ID'][ntypes[1]]
e = g.edges(etype=etype)[0][mask], g.edges(etype=etype)[1][mask]
src_emb = self.f[k - 1][ntypes[0]](all_emb[k - 1][ntypes[0]][n_map_src[e[0]]])
dst_emb = self.f[k][ntypes[1]](all_emb[k][ntypes[1]][n_map_dst[e[1]]])
emb = torch.cat([src_emb, dst_emb, h_target[hop_batch]], dim=1)
att = self.extractor(emb)
return att
e_h_mask = g.edata['e_h_mask'][(ntypes[0], etype, ntypes[1])].T
one_hop_mask = torch.nonzero(e_h_mask[0]).view(-1)
if one_hop_mask.shape[0] > 0:
one_hop_att = calc_att(one_hop_mask, e_batch[one_hop_mask], k=2)
else:
one_hop_att = torch.tensor([]).view(0, 1).to(g.device)
two_hop_mask = torch.nonzero(e_h_mask[1]).view(-1)
if two_hop_mask.shape[0] > 0:
two_hop_att = calc_att(two_hop_mask, e_batch[two_hop_mask], k=1)
else:
two_hop_att = torch.tensor([]).view(0, 1).to(g.device)
edge_att = torch.zeros((2, g.num_edges(etype), 1), device=g.device)
edge_att[:, :, :] = self.MIN_WEIGHT
edge_att[0][two_hop_mask] = two_hop_att
edge_att[1][one_hop_mask] = one_hop_att
return edge_att
def batched_emb_hetero(self, g, x, batched_edge_mask, n_samples, idx):
g.ndata['x'] = x
gs = dgl.batch([g] * n_samples)
x = gs.ndata.pop('x')
self.encoder.set_graph(gs)
for etp in batched_edge_mask.keys():
| batched_edge_mask[etp] = batched_edge_mask[etp].view(2, -1, 1).to(gs.device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dvmazur/mixtral-offloading
# Path: src/expert_cache.py
class ExpertCache:
def __init__(self, make_module: callable, main_size: int, offload_size: int, buffer_size: int):
"""Dynamically loads an array of modules with identical hyperparameters"""
self.module_type = self.module_size = self.device = None
self.active = False
self.registered_experts: Dict[ExpertUID, ExpertInfo] = dict()
self.main_modules = [self._check_module(make_module()) for i in range(main_size)]
self.main_infos: List[Optional[ExpertInfo]] = [None for _ in range(main_size)]
assert self.module_size is not None
self.offloaded_storages = [
torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(offload_size)]
self.offloaded_infos: List[Optional[ExpertInfo]] = [None for _ in range(offload_size)]
# temporary storage to shave off latency
self.device_expert_buffers = deque([self._check_module(make_module()) for _ in range(buffer_size)])
self.offloaded_storage_buffers = deque([
torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(buffer_size)])
self.group_infos: Dict[int, EvictionGroupInfo] = defaultdict(EvictionGroupInfo)
def _check_module(self, module: MixtralExpertWrapper):
assert isinstance(module.storage, torch.UntypedStorage)
if self.module_type is None:
self.module_type = type(module)
self.module_size = len(module.storage)
self.device = module.storage.device
else:
assert isinstance(module, self.module_type)
assert len(module.storage) == self.module_size
assert module.storage.device == self.device
return module
def add_expert(self, uid: ExpertUID, module: MixtralExpertWrapper, eviction_group: int = 0,
offload: Optional[bool] = None):
"""Register an expert to the cache and associate it with uid"""
assert self.module_type is not None
assert isinstance(module, self.module_type)
return self.add_expert_storage(uid, module.storage, eviction_group=eviction_group, offload=offload)
def add_expert_storage(self, uid: ExpertUID, storage: torch.UntypedStorage,
eviction_group: int = 0, offload: Optional[bool] = None):
assert uid not in self.registered_experts, f"expert {uid} already registered"
assert isinstance(storage, torch.UntypedStorage)
assert len(storage) == self.module_size
if offload is None or not offload: # False or None
for i in range(len(self.main_modules)):
if self.main_infos[i] is None:
self.main_modules[i].storage.copy_(storage)
info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=False, index=i)
self.registered_experts[uid] = self.main_infos[i] = info
self.group_infos[eviction_group].add(info)
return # done allocating; found spot on device
if offload is None or offload: # True or None
for i in range(len(self.offloaded_storages)):
if self.offloaded_infos[i] is None:
self.offloaded_storages[i].copy_(storage)
info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=True, index=i)
self.registered_experts[uid] = self.offloaded_infos[i] = info
self.group_infos[eviction_group].add(info)
return # done allocating; found an offloaded spot
raise ValueError("Cache is full")
def load_experts(
self, *uids: ExpertUID, unordered: bool = False) -> Iterator[Tuple[ExpertUID, MixtralExpertWrapper]]:
"""
:example:
>>> for uid, expert in expert_cache.load_experts(*list_of_uids, unordered=True):
>>> for uid, expert in expert_iter:
>>> result += expert(x) * get_moe_weight(uid)
:param uids: iterate over the specified expert uids. Same uids as in add_expert
:param unordered: if True, allows cache to iterate experts in arbitrary order
The order is chosen to minimize the total wait time.
:returns: an iterator that yields (uid, expert) pairs, only usable inside the for loop
"""
assert len(set(uids)) == len(uids)
assert not self.active, "already loading experts; buffers are busy"
if unordered: # yield non-offloaded experts first
uids = sorted(uids, key=lambda uid: self.registered_experts[uid].offloaded)
infos = [self.registered_experts[uid] for uid in uids]
assert len(set(info.eviction_group for info in infos)) == 1, "experts must be in the same evicton group"
eviction_group = self.group_infos[infos[0].eviction_group]
for info in infos:
eviction_group.mark_used(info)
try:
self.active = True
# save pre-loaded experts before they can be swapped
pre_loaded_infos = deque([info for info in infos if not info.offloaded])
pre_loaded_experts = deque([self.main_modules[info.index] for info in pre_loaded_infos])
# begin loading experts into free buffers in background (via non-blocking copy)
infos_to_load = deque([info for info in infos if info.offloaded])
infos_in_loading = deque([])
experts_in_loading = deque([])
window_size = min(len(self.device_expert_buffers) - 1,
len(eviction_group.main_infos),
len(infos_to_load))
for _ in range(window_size):
info_to_load = infos_to_load.popleft()
infos_in_loading.append(info_to_load)
experts_in_loading.append(
self._swap(info_to_load, eviction_group.choose_expert_to_evict()))
for info in infos:
if len(pre_loaded_infos) > 0 and info is pre_loaded_infos[0]:
pre_loaded_infos.popleft()
yield (info.uid, pre_loaded_experts.popleft())
elif len(infos_in_loading) > 0 and info is infos_in_loading[0]:
infos_in_loading.popleft()
yield (info.uid, experts_in_loading.popleft())
if len(infos_to_load) > 0:
info_to_load = infos_to_load.popleft()
infos_in_loading.append(info_to_load)
experts_in_loading.append(
self._swap(info_to_load, eviction_group.choose_expert_to_evict()))
else:
raise RuntimeError("internal error: caching algorithm failed")
finally:
self.active = False
def _swap(self, info_to_load: ExpertInfo, info_to_evict: ExpertInfo) -> nn.Module:
"""Swap an offloaded expert (info_to_load) with an on-device expert (info_to_evict) return the loaded expert"""
assert info_to_load.offloaded and not info_to_evict.offloaded
assert info_to_load.eviction_group == info_to_evict.eviction_group
# swap a single on-device expert with a single offloaded expert using buffers for parallelism
offloaded_storage_buffer = self.offloaded_storage_buffers.popleft()
device_expert_buffer = self.device_expert_buffers.popleft()
device_expert_buffer.storage.copy_(self.offloaded_storages[info_to_load.index], non_blocking=True)
offloaded_storage_buffer.copy_(self.main_modules[info_to_evict.index].storage, non_blocking=True)
self.device_expert_buffers.append(self.main_modules[info_to_evict.index])
self.main_modules[info_to_evict.index] = device_expert_buffer
self.offloaded_storage_buffers.append(self.offloaded_storages[info_to_load.index])
self.offloaded_storages[info_to_load.index] = offloaded_storage_buffer
self.main_infos[info_to_evict.index] = info_to_load
self.offloaded_infos[info_to_load.index] = info_to_evict
info_to_evict.offloaded, info_to_load.offloaded = info_to_load.offloaded, info_to_evict.offloaded
info_to_evict.index, info_to_load.index = info_to_load.index, info_to_evict.index
self.group_infos[info_to_load.eviction_group].swap(info_to_load, info_to_evict)
return device_expert_buffer
# Path: src/expert_wrapper.py
class MixtralExpertWrapper(nn.Module):
def __init__(
self,
expert_module: tp.Any,
device: torch.device,
):
super().__init__()
expert_module, self.storage = self.replace_layer_storage(expert_module, device)
self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)
self._register_state_dict_hook(self._add_storage_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)
@staticmethod
def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)
return state_dict
def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())
del state_dict[prefix + 'storage']
def forward(self, *args, **kwargs):
return self.expert_module(*args, **kwargs)
@staticmethod
def replace_layer_storage(
layer: tp.Any,
device: torch.device,
):
state_dict = {
f"w{i}": {
"W_q": getattr(layer, f"w{i}").W_q,
"meta": getattr(layer, f"w{i}").meta,
"bias": getattr(layer, f"w{i}").bias,
}
for i in range(1, 4)
}
storage_size = 0
offsets = [0]
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
continue
storage_size += x.nbytes
offsets.append(storage_size)
storage = torch.UntypedStorage(storage_size, device=device)
i = 0
new_flattened_states = list()
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
new_flattened_states.append(x)
continue
start = offsets[i]
end = offsets[i + 1]
a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)
a_view[...] = x
assert a_view.data_ptr() == storage.data_ptr() + start
i += 1
new_flattened_states.append(a_view)
state_dict = nested_pack(new_flattened_states, state_dict)
for layer_id, states in state_dict.items():
patched = getattr(layer, layer_id)
patched.W_q = states["W_q"]
patched.meta = states["meta"]
patched.bias = states["bias"]
setattr(layer, layer_id, patched)
return layer, storage
# Path: src/custom_layers.py
class HQQLinearTritonSavable(HQQLinear):
def __init__(self, layer, quant_config, meta=None, **kwargs):
"""
Example how to get meta:
>>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
>>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
"""
assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4]
super().__init__(layer, quant_config, **kwargs)
if not hasattr(self, 'meta'):
assert meta is not None
self.meta = copy.deepcopy(meta)
self._register_state_dict_hook(self._add_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook)
def quantize(self, *args, **kwargs):
super().quantize(*args, **kwargs)
# repacking
self.repack()
def repack(self):
if self.W_q.shape != self.meta['shape']:
W_q = Quantizer.unpack[self.meta['packing']](self.W_q)
sh = self.meta['shape']
W_q = W_q.reshape((-1,) + sh[1:])
W_q = W_q[:sh[0], ...]
self.W_q = Quantizer.pack[self.meta['packing']](W_q)
def forward(self, x):
return self.forward_triton(x)
def set_backend(self, backend):
pass
@torch.inference_mode()
def forward_triton(self, x):
assert self.ready, "model was not quantized"
assert self.meta['axis'] == 0
W_q, meta = self.W_q, self.meta
del_keys = []
if 'quant_scale' in meta and meta['quant_scale']:
meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')
if 'quant_zero' in meta and meta['quant_zero']:
meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')
K = meta['shape'][1]
N = meta['shape'][0]
if self.meta['nbits'] == 4:
fn = triton_matmul4_transpose
elif self.meta['nbits'] == 3:
fn = functools.partial(triton_matmul3_transpose, N=N)
elif self.meta['nbits'] == 2:
fn = triton_matmul2_transpose
else:
raise RuntimeError(f"nbits == {self.meta['nbits']} isn't yet supported")
output = fn(
meta['group_size'], x,
W_q.view(-1, K),
meta['scale'].view(-1, K),
meta['zero'].view(-1, K),
bias=self.bias if hasattr(self, 'bias') else None,
)
#Cleanup
for key in del_keys:
del meta[key]
return output
# to support .forward_pytorch(...) - backward compatibility
@torch.inference_mode()
def dequantize(self):
assert self.ready, "model was not quantized"
W_q, meta = self.W_q, self.meta
del_keys = []
if(meta['quant_scale']):
meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')
if(meta['quant_zero']):
meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')
W_q_p = Quantizer.unpack[meta['packing']](W_q).half()
W_q_p = W_q_p[:meta['shape'][0], ...]
W_q_p = W_q_p.reshape((meta['group_size'], -1))
if((meta['group_size'] is not None) and (meta['nbits']==3)):
W_q_p = W_q_p[:meta['group_size']] if (meta['axis']==0) else W_q_p[:,:meta['group_size']]
W_est = ((W_q_p - meta['zero'])*meta['scale']).reshape(meta['shape'])
#Cleanup
del W_q_p
for key in del_keys: del meta[key]
return W_est
@classmethod
def get_hqq_meta(cls, linear_shape, quant_config):
layer = HQQLinear(nn.Linear(*linear_shape, bias=False), quant_config)
meta = layer.meta
def _remove_tensors_recursive(d):
keys = list(d.keys())
for k in keys:
if isinstance(d[k], torch.Tensor):
del d[k]
elif isinstance(d[k], dict):
_remove_tensors_recursive(d[k])
_remove_tensors_recursive(meta)
return meta
@staticmethod
def _add_to_state_dict_hook(self, state_dict, prefix, local_metadata):
tensor_paths = self._get_tensor_paths(self.meta)
assert set(tensor_paths).issubset(
{'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',
'scale', 'zero'}
)
def _add(name, value):
state_dict[prefix + name] = value
_add('W_q', self.W_q)
if self.bias is not None:
_add('bias', self.bias)
if 'meta_scale' in self.meta:
_add('meta.scale_q', self.meta['scale_q'])
_add('meta.meta_scale.scale', self.meta['meta_scale']['scale'])
_add('meta.meta_scale.zero', self.meta['meta_scale']['zero'])
else:
_add('meta.scale', self.meta['scale'])
if 'meta_zero' in self.meta:
_add('meta.zero_q', self.meta['zero_q'])
_add('meta.meta_zero.scale', self.meta['meta_zero']['scale'])
_add('meta.meta_zero.zero', self.meta['meta_zero']['zero'])
else:
_add('meta.zero', self.meta['zero'])
return state_dict
def _load_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
tensor_paths = [k[len(prefix + 'meta.'):] for k in state_dict.keys() if k.startswith(prefix + 'meta.')]
assert set(tensor_paths).issubset(
{'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',
'scale', 'zero'}
)
def _del(name):
del state_dict[prefix + name]
def _set(name):
setattr(self, name, state_dict[prefix + name])
_del(name)
def _get(name):
v = state_dict[prefix + name]
_del(name)
return v
_set('W_q')
if 'bias' in state_dict:
_set('bias')
else:
self.bias = None
if not hasattr(self, 'meta'):
self.meta = {}
if (prefix + 'meta.meta_scale.scale') in state_dict:
self.meta['scale_q'] = _get('meta.scale_q')
self.meta['quant_scale'] = True
if not 'meta_scale' in self.meta:
self.meta['meta_scale'] = {}
self.meta['meta_scale'] |= {
'scale': _get('meta.meta_scale.scale'),
'zero': _get('meta.meta_scale.zero')
}
else:
self.meta['scale'] = _get('meta.scale')
if (prefix + 'meta.meta_zero.scale') in state_dict:
self.meta['zero_q'] = _get('meta.zero_q')
self.meta['quant_zero'] = True
if not 'meta_zero' in self.meta:
self.meta['meta_zero'] = {}
self.meta['meta_zero'] |= {
'scale': _get('meta.meta_zero.scale'),
'zero': _get('meta.meta_zero.zero')
}
else:
self.meta['zero'] = _get('meta.zero')
self.ready = True
# self.cuda()
# self.in_gpu = self.W_q.device.type == 'cuda'
# assert self.in_gpu
self.repack()
@classmethod
def _get_tensor_paths(cls, state: Dict[str, Any], prefix=''):
paths = []
for k, v in state.items():
if isinstance(v, dict):
paths += cls._get_tensor_paths(v, prefix=k + '.')
elif isinstance(v, torch.Tensor):
paths.append(prefix + k)
return paths
def state_dict(self, *args, **kwargs):
return nn.Module.state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
nn.Module.load_state_dict(self, *args, **kwargs)
# Path: src/custom_layers.py
class MixtralBLockSparseTop2MLP_HQQ(nn.Module):
def __init__(self, config: MixtralConfig, quant_config: Dict[str, Any], meta1, meta2):
super().__init__()
self.w1 = HQQLinearTritonSavable(None, quant_config, meta1)
self.w2 = HQQLinearTritonSavable(None, quant_config, meta2)
self.w3 = HQQLinearTritonSavable(None, quant_config, meta1)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
current_hidden_states = self.w2(current_hidden_states)
return current_hidden_states
# Path: src/custom_layers.py
class SparseMoeWrapper(nn.Module):
def __init__(self, config, layer_id, gate, expert_cache):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size
self.num_experts = config.num_local_experts
self.top_k = config.num_experts_per_tok
self.layer_id = layer_id
self.gate = gate
self.experts = expert_cache
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (batch * sequence_length, n_experts)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# we cast back to the input dtype
routing_weights = routing_weights.to(hidden_states.dtype)
final_hidden_states = torch.zeros(
(batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
)
# One hot encode the selected experts to create an expert mask
# this will be used to easily index which expert is going to be sollicitated
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
active_experts = selected_experts.flatten().unique().tolist()
# Loop over all available experts in the model and perform the computation on each expert
for (_layer_index, expert_idx), expert_layer in self.experts.load_experts(
*((self.layer_id, expert_idx) for expert_idx in active_experts), unordered=True):
idx, top_x = torch.where(expert_mask[expert_idx])
assert top_x.shape[0] > 0
# in torch it is faster to index using lists than torch tensors
top_x_list = top_x.tolist()
idx_list = idx.tolist()
# Index the correct hidden states and compute the expert hidden state for
# the current expert. We need to make sure to multiply the output hidden
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim)
current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None]
# However `index_add_` only support torch tensors for indexing so we'll use
# the `top_x` tensor here.
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return final_hidden_states, router_logits
# Path: src/utils.py
@contextmanager
def with_default_dtype(dtype):
_dtype_original = torch.get_default_dtype()
try:
torch.set_default_dtype(dtype)
yield
finally:
torch.set_default_dtype(_dtype_original)
# Path: src/build_model.py
import os
import json
import typing as tp
import torch
from functools import cache
from dataclasses import dataclass
from torch import nn
from transformers import AutoConfig
from transformers.models.mixtral import MixtralForCausalLM, MixtralConfig
from safetensors.torch import load_file
from torch import nn
from tqdm.auto import trange
from hqq.core.quantize import BaseQuantizeConfig
from .expert_cache import ExpertCache
from .expert_wrapper import MixtralExpertWrapper
from .custom_layers import (
HQQLinearTritonSavable,
MixtralBLockSparseTop2MLP_HQQ,
SparseMoeWrapper,
)
from .utils import with_default_dtype
@dataclass(frozen=True)
class OffloadConfig:
main_size: int
offload_size: int
buffer_size: int
offload_per_layer: int
class QuantConfig:
def __init__(
self,
ffn_config: BaseQuantizeConfig,
attn_config: BaseQuantizeConfig,
):
self.ffn_config = ffn_config
self.attn_config = attn_config
@cache
def get_ffn_metas(self, hidden_dim: int, ffn_dim: int) -> tuple[tp.Any, tp.Any]:
return (
HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), self.ffn_config),
HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), self.ffn_config),
)
def replace_attn_layers(
model: MixtralForCausalLM,
config: MixtralConfig,
quant_config: QuantConfig,
device: torch.device,
) -> None:
attn_quant_config = quant_config.attn_config
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
head_dim = hidden_size // num_heads
num_key_value_heads = config.num_key_value_heads
shapes = [
(hidden_size, num_heads * head_dim),
| (hidden_size, num_key_value_heads * head_dim), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CircleRadon/Osprey
# Path: osprey/constants.py
IGNORE_INDEX = -100
# Path: osprey/datasets/stage2_data.py
class COCODataset(CustomDataset):
def __init__(self,
tokenizer=None,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=20,
):
super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)
self.begin_str = '<image>\nIn the conversation below, you simply answer the category name based on what you see ' \
'in the imagery inside a particular region. I will give you only one region each time.\n'
# Path: osprey/datasets/stage2_data.py
class RefCOCO(CustomDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=15,
):
super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)
self.begin_str = '<image>\nI will provide you with only one region ' \
'containing only one object, although there may be other ' \
'objects present in the image. It is recommended that you ' \
"describe the object's relative position with respect to other " \
'objects in the image, as well as its position within ' \
'the image and its basic attributes.'
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.img_ids = self.coco.getImgIds()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name'].split('_')[-1]
info['height'] = int(info['height'])
info['width'] = int(info['width'])
ann_ids = self.coco.getAnnIds(imgIds=[i])
ann_info = self.coco.loadAnns(ann_ids)
if len(ann_info)==0:
continue
data_infos.append(info)
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_data_item(self, idx):
data_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
img_path =os.path.join(self.img_prefix, data_info['filename'])
image = self.read_process_image(img_path)
gt_masks = []
gt_labels = []
for ann in ann_info:
mask = self.annToMask(ann['segmentation'], data_info['height'], data_info['width'])
gt_masks.append(mask)
cat = self.coco.loadCats(ann['category_id'])
gt_labels.append(data_info['caption'])
data_item = dict(
img = image,
gt_masks = gt_masks,
gt_labels = gt_labels
)
return data_item
# Path: osprey/datasets/stage2_data.py
class RefCOCOP(RefCOCO):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=15,
):
super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)
self.begin_str = '<image>\nI will provide you with only one region ' \
'containing only one object, although there may be other ' \
'objects present in the image. It is recommended that you ' \
"describe the object's relative position with respect to other " \
'objects in the image and its basic attibuts, you should not ' \
'give its position within the image.'
# Path: osprey/datasets/vcr.py
class VCRDataset(Dataset):
CLASSES = ('object',)
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
):
super(VCRDataset, self).__init__()
self.img_prefix = img_prefix
self.tokenizer = tokenizer
self.data_args = data_args
self.begin_str = """<image>.\nThis provides an overview of the picture.\n"""
self.data_infos = self.load_annotations(ann_file)
print('normal_vcr', len(self.data_infos))
def load_annotations(self, ann_file):
with open(ann_file, 'r') as f:
ann_list = [json.loads(line) for line in f]
data_infos = []
import re
def replace_numbers_with_tags(s, class_names):
pattern = r'\b(\d+)\b'
try:
result = re.sub(pattern, lambda match: f'{class_names[int(match.group(1))]} at region{match.group(1)}', s)
except:
# contain number not for instance
return None
return result
for ann in ann_list:
metadata_fn_path = ann['metadata_fn']
img_fn = ann['img_fn']
img_path = os.path.join(self.img_prefix,img_fn)
annotations = json.load(open(os.path.join(self.img_prefix, metadata_fn_path)))
masks = annotations['segms']
bboxes = np.array(annotations['boxes'])
class_names = ann['objects']
num_objects = len(class_names)
ref_string = ''
for i in range(num_objects):
ref_string = ref_string + f'region{i+1} <mask><pos>' + ','
ref_string = ref_string[:-1]
ref_prefix = random.choice(Ref_WAY)
begion_string = ref_prefix.replace('<region>', ref_string)
qa_s = []
q = ann['question_orig']
q = replace_numbers_with_tags(q, class_names)
a = ann['answer_orig']
a = replace_numbers_with_tags(a, class_names)
why = ann['rationale_orig']
why = replace_numbers_with_tags(why, class_names)
if (q is None) or (a is None) or (why) is None:
continue
qa_s.append({'from': 'human', 'value': begion_string + q})
qa_s.append({'from': 'gpt', 'value': a})
qa_s.append({'from': 'human', 'value': random.choice(WHY_QUESTIONS)})
qa_s.append({'from': 'gpt', 'value': why})
data_infos.append(dict(
img_path = img_path,
bboxes = bboxes,
masks = masks,
labels= class_names,
qas = qa_s)
)
return data_infos
def __len__(self):
return len(self.data_infos)
def __getitem__(self, i):
data_info = self.data_infos[i]
img_path = data_info['img_path']
masks = data_info['masks']
bboxes = data_info['bboxes']
qas = data_info['qas']
processor = self.data_args.image_processor
image = Image.open(img_path).convert('RGB')
w, h = image.size
# TODO ablation this
image_file = img_path
pred_masks = np.zeros((len(masks), h, w))
for i,mask in enumerate(masks):
int_box = [round(box) for box in bboxes[i][:-1]]
height_ = int(int_box[3]-int_box[1])
width_ = int(int_box[2]-int_box[0])
box_mask = make_mask(height_, width_, bboxes[i], mask)
pred_masks[i, int_box[1]:int_box[3], int_box[0]:int_box[2]] = box_mask
image = processor.preprocess(image,
do_center_crop=False,
return_tensors='pt')['pixel_values'][0]
image = torch.nn.functional.interpolate(image.unsqueeze(0),
size=(512, 512),
mode='bilinear',
align_corners=False).squeeze(0)
cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16) # FIXME: 16 is hardcoded patch size
qas = copy.deepcopy(qas)
qas[0]['value'] = self.begin_str + qas[0]['value']
sources = preprocess_multimodal(
copy.deepcopy([qas]),
self.data_args, cur_token_len)
data_dict = preprocess(
sources,
self.tokenizer,
has_image=True)
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0],
labels=data_dict['labels'][0])
data_dict['image'] = image
data_dict['masks'] = torch.Tensor(pred_masks)
return data_dict
# Path: osprey/datasets/vg.py
class VGDATA(CustomDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=3,
):
self.data_args = data_args
self.tokenizer = tokenizer
self.ann_file = ann_file
self.img_prefix = img_prefix
self.max_gt_per_img = max_gt_per_img
super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)
self.begin_str = """<image>\nThis provides an overview of the picture.\n"""
def get_data_item(self, idx):
data_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
img_path = os.path.join(self.img_prefix, data_info['filename'])
image = self.read_process_image(img_path)
gt_labels = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
mask = self.annToMask(ann['segmentation'], data_info['height'], data_info['width'])
gt_labels.append(ann['caption'])
gt_masks_ann.append(mask)
data_item = dict(
img = image,
gt_labels=gt_labels,
gt_masks=gt_masks_ann
)
return data_item
def process_text(self, data_item):
image = data_item['img']
ori_labels = data_item['gt_labels']
ori_masks = np.array(data_item['gt_masks'])
ori_masks = torch.from_numpy(ori_masks)
shuffle_ids = torch.randperm(len(ori_labels))
if len(shuffle_ids) > self.max_gt_per_img:
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_masks = ori_masks[shuffle_ids]
ori_labels = [ori_labels[i] for i in shuffle_ids]
sources = dict()
sources['conversations'] = []
for i in range(len(ori_labels)):
question = random.choice(QUESTIONS).strip()
question = question.replace('<region>', '<mask><pos>')
if i == 0:
question = self.begin_str + question
question += LIMIT
answer = ori_labels[i]
sources['conversations'].append(
{'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16)
sources = preprocess_multimodal(
copy.deepcopy([sources['conversations']]),
self.data_args,
cur_token_len)
# print(sources)
data_dict = preprocess(
sources,
self.tokenizer,
has_image=True
)
# get single
if isinstance(i, int):
data_dict = dict(input_ids=data_dict['input_ids'][0],
labels=data_dict['labels'][0])
data_dict['image'] = image
data_dict['masks'] = ori_masks
return data_dict
# Path: osprey/datasets/stage2_data.py
class PascalPart(CustomDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=15,
):
super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)
CAT_CLASSES = ('potted plant', 'aeroplane', 'cow', 'cat', 'bus', 'horse', 'car',
'dog', 'bicycle', 'person', 'bird', 'bottle', 'sheep', 'motorbike')
SUB_CLASSES = ('eye', 'window', 'cap', 'headlight', 'hand', 'mirror', 'arm', 'plant',
'wheel', 'ear', 'pot', 'foot', 'leg', 'nose', 'body', 'horn', 'handlebar',
'neck', 'license plate', 'paw', 'saddle', 'head', 'muzzle', 'tail', 'wing',
'beak', 'hair', 'torso', 'door', 'mouth')
begin_str = '<image>\n In the conversation below, you simply answer the category and subcategory name based on what you see' \
'in the image inside a particular region. It maybe a subpart of an object. '\
'I will give you only one region each time. Your answer should in the format of '\
'category:subcategory. '
class_str = 'Categories Containing '+', '.join(CAT_CLASSES)+ '. '
subclass_str = 'Subcategories Containing ' + ','.join(SUB_CLASSES)
self.begin_str = begin_str + class_str + subclass_str + '.\n'
# Path: osprey/datasets/stage2_data.py
class PartImagenet(CustomDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=15,
):
super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)
CAT_CLASSES = (
'Bottle', 'Biped', 'Quadruped', 'Fish', 'Reptile', 'Bicycle', 'Bird', 'Car', 'Boat', 'Snake', 'Aeroplane'
)
SUB_CLASSES = (
'Tier', 'Hand', 'Wing', 'Mouth', 'Tail', 'Side', 'Fin', 'Engine', 'Foot', 'Head', 'Body', 'Sail', 'Seat'
)
begin_str = '<image>\nIn the conversation below, you simply answer the category and subcategory name based on what you see' \
'in the image inside a particular region. It maybe a subpart of an object. '\
'I will give you only one region each time. Your answer should in the format of '\
'category subcategory. '
class_str = 'Categories Containing '+', '.join(CAT_CLASSES)+ '. '
subclass_str = 'Subcategories Containing ' + ','.join(SUB_CLASSES)
self.begin_str = begin_str + class_str + subclass_str + '.\n'
# Path: osprey/datasets/osprey_724k.py
class OspreyDetailedDescription(ConversationDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
):
super().__init__(tokenizer, data_args, ann_file, img_prefix)
def load_annotations(self, ann_file):
data_infos = []
ann_list = json.load(open(ann_file))
for ann in ann_list:
masks = []
qa_s = []
filename = ann['file_name'].split('_')[-1]
img_path = os.path.join(self.img_prefix, filename)
region_num = len(ann['annotation'])
h, w = ann['height'], ann['width']
for i in range(region_num):
mask = ann['annotation'][i]['segmentation']
masks.append(mask)
question = random.choice(DETAILED_QUESTIONS)
question = question.replace('<region>', '<mask><pos>')
if i==0:
qa_s.append({'from': 'human', 'value': self.begin_str+question})
else:
qa_s.append({'from': 'human', 'value': question})
answer = re.findall(r"<.*>:\ (.*)", ann['description'][i])[0]
qa_s.append({'from': 'gpt', 'value': answer})
data_infos.append(dict(
img_path = img_path,
masks = masks,
height = h,
width = w,
qas = qa_s
))
return data_infos
# Path: osprey/datasets/osprey_724k.py
class OspreyConversations(ConversationDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
):
self.limit = ""
super().__init__(tokenizer, data_args, ann_file, img_prefix)
# Path: osprey/datasets/osprey_724k.py
class OspreyShortForm(ConversationDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
):
self.limit = ' Answer the question using a single word or phrase.'
super().__init__(tokenizer, data_args, ann_file, img_prefix)
# Path: osprey/datasets/osprey_724k.py
class OspreyPartLevel(ConversationDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
):
self.limit = ' Answer the question using a single word or phrase.'
super().__init__(tokenizer, data_args, ann_file, img_prefix)
# Path: osprey/datasets/osprey_724k.py
class OspreyLVISPosNeg(ConversationDataset):
def __init__(self,
tokenizer,
data_args=None,
ann_file=None,
img_prefix=None,
):
super().__init__(tokenizer, data_args, ann_file, img_prefix)
def load_annotations(self, ann_file):
data_infos = []
ann_list = json.load(open(ann_file))
for ann in ann_list:
if len(ann['conversations'])//2 ==0:
continue
masks = []
qa_s = []
filename = ann['file_name']
img_path = os.path.join(self.img_prefix, filename)
region_num = len(ann['annotation'])
h, w = ann['height'], ann['width']
for i in range(region_num):
mask = ann['annotation'][i]['segmentation']
masks.append(mask)
for i in range(len(ann['conversations'])//2):
question = ann['conversations'][i*2]['value']
question = re.sub(r'<region\d+>', '<mask><pos>', question)
if i==0:
question = self.begin_str+question
qa_s.append({'from': 'human', 'value': question})
answer = ann['conversations'][i*2+1]['value']
qa_s.append({'from': 'gpt', 'value': answer})
data_infos.append(dict(
img_path = img_path,
masks = masks,
height = h,
width = w,
qas = qa_s
))
# print(qa_s)
return data_infos
# Path: osprey/datasets/data_modules.py
from dataclasses import dataclass
from torch.utils.data import ConcatDataset
from osprey.constants import IGNORE_INDEX
from .stage2_data import COCODataset, RefCOCO, RefCOCOP
from .vcr import VCRDataset
from .vg import VGDATA
from .stage2_data import PascalPart
from .stage2_data import PartImagenet
from .osprey_724k import OspreyDetailedDescription, OspreyConversations, OspreyShortForm, OspreyPartLevel, OspreyLVISPosNeg
import torch
import transformers
import json
@dataclass
class DataCollatorForDetDataset(object):
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances):
input_ids, labels, img_metas, masks = tuple([instance.get(key,None) for instance in instances]
for key in ('input_ids',
'labels',
'img_metas',
'masks'))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
img_metas=img_metas,
masks = masks
)
if 'image' in instances[0]:
images = [instance['image'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in images):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
| def make_multitask_data_module(tokenizer, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: open-mmlab/PIA
# Path: animatediff/models/attention.py
class Transformer3DModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
):
super().__init__()
self.use_linear_projection = use_linear_projection
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
inner_dim = num_attention_heads * attention_head_dim
# Define input layers
self.in_channels = in_channels
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
if use_linear_projection:
self.proj_in = nn.Linear(in_channels, inner_dim)
else:
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
# Define transformers blocks
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
for d in range(num_layers)
]
)
# 4. Define output layers
if use_linear_projection:
self.proj_out = nn.Linear(in_channels, inner_dim)
else:
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):
# Input
assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
batch, channel, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
hidden_states = self.proj_in(hidden_states)
# Blocks
for block in self.transformer_blocks:
hidden_states = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
video_length=video_length
)
# Output
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
hidden_states = self.proj_out(hidden_states)
else:
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
if not return_dict:
return (output,)
return Transformer3DModelOutput(sample=output)
# Path: animatediff/models/resnet.py
class Downsample3D(nn.Module):
def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
else:
raise NotImplementedError
def forward(self, hidden_states):
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
raise NotImplementedError
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: animatediff/models/resnet.py
class ResnetBlock3D(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout=0.0,
temb_channels=512,
groups=32,
groups_out=None,
pre_norm=True,
eps=1e-6,
non_linearity="swish",
time_embedding_norm="default",
output_scale_factor=1.0,
use_in_shortcut=None,
):
super().__init__()
self.pre_norm = pre_norm
self.pre_norm = True
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.time_embedding_norm = time_embedding_norm
self.output_scale_factor = output_scale_factor
if groups_out is None:
groups_out = groups
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels is not None:
if self.time_embedding_norm == "default":
time_emb_proj_out_channels = out_channels
elif self.time_embedding_norm == "scale_shift":
time_emb_proj_out_channels = out_channels * 2
else:
raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
else:
self.time_emb_proj = None
self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if non_linearity == "swish":
self.nonlinearity = lambda x: F.silu(x)
elif non_linearity == "mish":
self.nonlinearity = Mish()
elif non_linearity == "silu":
self.nonlinearity = nn.SiLU()
self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
self.conv_shortcut = None
if self.use_in_shortcut:
self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input_tensor, temb):
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
if temb is not None and self.time_embedding_norm == "default":
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
if temb is not None and self.time_embedding_norm == "scale_shift":
scale, shift = torch.chunk(temb, 2, dim=1)
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
# Path: animatediff/models/resnet.py
class Upsample3D(nn.Module):
def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_conv_transpose = use_conv_transpose
self.name = name
conv = None
if use_conv_transpose:
raise NotImplementedError
elif use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
def forward(self, hidden_states, output_size=None):
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
raise NotImplementedError
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
if hidden_states.shape[0] >= 64:
hidden_states = hidden_states.contiguous()
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
else:
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
# if self.use_conv:
# if self.name == "conv":
# hidden_states = self.conv(hidden_states)
# else:
# hidden_states = self.Conv2d_0(hidden_states)
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: animatediff/models/motion_module.py
def get_motion_module(
in_channels,
motion_module_type: str,
motion_module_kwargs: dict
):
if motion_module_type == "Vanilla":
return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,)
else:
raise ValueError
# Path: animatediff/models/unet_blocks.py
import torch
import pdb
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module
# Adapted from https://github.com/guoyww/AnimateDiff
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
| resnet_time_scale_shift=resnet_time_scale_shift, |