prompt
stringlengths 1.74k
34.3k
| ref
stringlengths 4
432
|
---|---|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: 3DTopia/OpenLRM
# Path: lrm/models/generator.py
class LRMGenerator(nn.Module):
"""
Full model of the large reconstruction model.
"""
def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,
transformer_dim: int, transformer_layers: int, transformer_heads: int,
triplane_low_res: int, triplane_high_res: int, triplane_dim: int,
encoder_freeze: bool = True, encoder_model_name: str = 'facebook/dino-vitb16', encoder_feat_dim: int = 768):
super().__init__()
# attributes
self.encoder_feat_dim = encoder_feat_dim
self.camera_embed_dim = camera_embed_dim
# modules
self.encoder = DinoWrapper(
model_name=encoder_model_name,
freeze=encoder_freeze,
)
self.camera_embedder = CameraEmbedder(
raw_dim=12+4, embed_dim=camera_embed_dim,
)
self.transformer = TriplaneTransformer(
inner_dim=transformer_dim, num_layers=transformer_layers, num_heads=transformer_heads,
image_feat_dim=encoder_feat_dim,
camera_embed_dim=camera_embed_dim,
triplane_low_res=triplane_low_res, triplane_high_res=triplane_high_res, triplane_dim=triplane_dim,
)
self.synthesizer = TriplaneSynthesizer(
triplane_dim=triplane_dim, samples_per_ray=rendering_samples_per_ray,
)
def forward_planes(self, image, camera):
# image: [N, C_img, H_img, W_img]
# camera: [N, D_cam_raw]
assert image.shape[0] == camera.shape[0], "Batch size mismatch for image and camera"
N = image.shape[0]
# encode image
image_feats = self.encoder(image)
assert image_feats.shape[-1] == self.encoder_feat_dim, \
f"Feature dimension mismatch: {image_feats.shape[-1]} vs {self.encoder_feat_dim}"
# embed camera
camera_embeddings = self.camera_embedder(camera)
assert camera_embeddings.shape[-1] == self.camera_embed_dim, \
f"Feature dimension mismatch: {camera_embeddings.shape[-1]} vs {self.camera_embed_dim}"
# transformer generating planes
planes = self.transformer(image_feats, camera_embeddings)
assert planes.shape[0] == N, "Batch size mismatch for planes"
assert planes.shape[1] == 3, "Planes should have 3 channels"
return planes
def forward(self, image, source_camera, render_cameras, render_size: int):
# image: [N, C_img, H_img, W_img]
# source_camera: [N, D_cam_raw]
# render_cameras: [N, M, D_cam_render]
# render_size: int
assert image.shape[0] == source_camera.shape[0], "Batch size mismatch for image and source_camera"
assert image.shape[0] == render_cameras.shape[0], "Batch size mismatch for image and render_cameras"
N, M = render_cameras.shape[:2]
planes = self.forward_planes(image, source_camera)
# render target views
render_results = self.synthesizer(planes, render_cameras, render_size)
assert render_results['images_rgb'].shape[0] == N, "Batch size mismatch for render_results"
assert render_results['images_rgb'].shape[1] == M, "Number of rendered views should be consistent with render_cameras"
return {
'planes': planes,
**render_results,
}
# Path: lrm/cam_utils.py
def build_camera_principle(RT: torch.Tensor, intrinsics: torch.Tensor):
"""
RT: (N, 3, 4)
intrinsics: (N, 3, 2), [[fx, fy], [cx, cy], [width, height]]
"""
fx, fy, cx, cy = get_normalized_camera_intrinsics(intrinsics)
return torch.cat([
RT.reshape(-1, 12),
fx.unsqueeze(-1), fy.unsqueeze(-1), cx.unsqueeze(-1), cy.unsqueeze(-1),
], dim=-1)
# Path: lrm/cam_utils.py
def build_camera_standard(RT: torch.Tensor, intrinsics: torch.Tensor):
"""
RT: (N, 3, 4)
intrinsics: (N, 3, 2), [[fx, fy], [cx, cy], [width, height]]
"""
E = compose_extrinsic_RT(RT)
fx, fy, cx, cy = get_normalized_camera_intrinsics(intrinsics)
I = torch.stack([
torch.stack([fx, torch.zeros_like(fx), cx], dim=-1),
torch.stack([torch.zeros_like(fy), fy, cy], dim=-1),
torch.tensor([[0, 0, 1]], dtype=torch.float32, device=RT.device).repeat(RT.shape[0], 1),
], dim=1)
return torch.cat([
E.reshape(-1, 16),
I.reshape(-1, 9),
], dim=-1)
# Path: lrm/cam_utils.py
def center_looking_at_camera_pose(camera_position: torch.Tensor, look_at: torch.Tensor = None, up_world: torch.Tensor = None):
"""
camera_position: (M, 3)
look_at: (3)
up_world: (3)
return: (M, 3, 4)
"""
# by default, looking at the origin and world up is pos-z
if look_at is None:
look_at = torch.tensor([0, 0, 0], dtype=torch.float32)
if up_world is None:
up_world = torch.tensor([0, 0, 1], dtype=torch.float32)
look_at = look_at.unsqueeze(0).repeat(camera_position.shape[0], 1)
up_world = up_world.unsqueeze(0).repeat(camera_position.shape[0], 1)
z_axis = camera_position - look_at
z_axis = z_axis / z_axis.norm(dim=-1, keepdim=True)
x_axis = torch.cross(up_world, z_axis)
x_axis = x_axis / x_axis.norm(dim=-1, keepdim=True)
y_axis = torch.cross(z_axis, x_axis)
y_axis = y_axis / y_axis.norm(dim=-1, keepdim=True)
extrinsics = torch.stack([x_axis, y_axis, z_axis, camera_position], dim=-1)
return extrinsics
# Path: lrm/inferrer.py
import torch
import math
import os
import imageio
import mcubes
import trimesh
import numpy as np
import argparse
from PIL import Image
from .models.generator import LRMGenerator
from .cam_utils import build_camera_principle, build_camera_standard, center_looking_at_camera_pose
from huggingface_hub import hf_hub_download
[cx, cy],
[w, h],
], dtype=torch.float32)
return intrinsics
def _default_source_camera(self, batch_size: int = 1):
# return: (N, D_cam_raw)
dist_to_center = 2
canonical_camera_extrinsics = torch.tensor([[
[1, 0, 0, 0],
[0, 0, -1, -dist_to_center],
[0, 1, 0, 0],
]], dtype=torch.float32)
canonical_camera_intrinsics = self._default_intrinsics().unsqueeze(0)
source_camera = build_camera_principle(canonical_camera_extrinsics, canonical_camera_intrinsics)
return source_camera.repeat(batch_size, 1)
def _default_render_cameras(self, batch_size: int = 1):
# return: (N, M, D_cam_render)
render_camera_extrinsics = self._get_surrounding_views()
render_camera_intrinsics = self._default_intrinsics().unsqueeze(0).repeat(render_camera_extrinsics.shape[0], 1, 1)
render_cameras = build_camera_standard(render_camera_extrinsics, render_camera_intrinsics)
return render_cameras.unsqueeze(0).repeat(batch_size, 1, 1)
@staticmethod
def images_to_video(images, output_path, fps, verbose=False):
# images: (T, C, H, W)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
frames = []
for i in range(images.shape[0]):
frame = (images[i].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
assert frame.shape[0] == images.shape[2] and frame.shape[1] == images.shape[3], \
f"Frame shape mismatch: {frame.shape} vs {images.shape}"
assert frame.min() >= 0 and frame.max() <= 255, \
f"Frame value out of range: {frame.min()} ~ {frame.max()}"
frames.append(frame)
imageio.mimwrite(output_path, np.stack(frames), fps=fps, codec='mpeg4', quality=10)
if verbose:
print(f"Saved video to {output_path}")
def infer_single(self, image: torch.Tensor, render_size: int, mesh_size: int, export_video: bool, export_mesh: bool):
# image: [1, C_img, H_img, W_img]
mesh_thres = 3.0
chunk_size = 2
batch_size = 1
source_camera = self._default_source_camera(batch_size).to(self.device)
render_cameras = self._default_render_cameras(batch_size).to(self.device)
with torch.no_grad():
planes = self.model.forward_planes(image, source_camera)
results = {}
if export_video:
# forward synthesizer per mini-batch
frames = []
for i in range(0, render_cameras.shape[1], chunk_size):
frames.append(
self.model.synthesizer(
planes,
render_cameras[:, i:i+chunk_size],
render_size,
)
)
# merge frames
frames = {
k: torch.cat([r[k] for r in frames], dim=1)
for k in frames[0].keys()
}
# update results
results.update({
'frames': frames,
})
if export_mesh:
grid_out = self.model.synthesizer.forward_grid(
planes=planes,
grid_size=mesh_size,
)
vtx, faces = mcubes.marching_cubes(grid_out['sigma'].squeeze(0).squeeze(-1).cpu().numpy(), mesh_thres)
vtx = vtx / (mesh_size - 1) * 2 - 1
vtx_tensor = torch.tensor(vtx, dtype=torch.float32, device=self.device).unsqueeze(0)
vtx_colors = self.model.synthesizer.forward_points(planes, vtx_tensor)['rgb'].squeeze(0).cpu().numpy() # (0, 1)
vtx_colors = (vtx_colors * 255).astype(np.uint8)
mesh = trimesh.Trimesh(vertices=vtx, faces=faces, vertex_colors=vtx_colors)
results.update({
'mesh': mesh,
})
return results
def infer(self, source_image: str, dump_path: str, source_size: int, render_size: int, mesh_size: int, export_video: bool, export_mesh: bool):
source_image_size = source_size if source_size > 0 else self.infer_kwargs['source_size']
image = torch.tensor(np.array(Image.open(source_image))).permute(2, 0, 1).unsqueeze(0) / 255.0
# if RGBA, blend to RGB
if image.shape[1] == 4:
image = image[:, :3, ...] * image[:, 3:, ...] + (1 - image[:, 3:, ...])
image = torch.nn.functional.interpolate(image, size=(source_image_size, source_image_size), mode='bicubic', align_corners=True)
image = torch.clamp(image, 0, 1)
results = self.infer_single(
image.to(self.device),
render_size=render_size if render_size > 0 else self.infer_kwargs['render_size'],
mesh_size=mesh_size,
export_video=export_video,
export_mesh=export_mesh,
)
image_name = os.path.basename(source_image)
uid = image_name.split('.')[0]
os.makedirs(dump_path, exist_ok=True)
# dump video
if 'frames' in results:
| renderings = results['frames'] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: xinghaochen/TinySAM
# Path: tinysam/modeling/tiny_vit_sam.py
class TinyViT(nn.Module):
def __init__(self, img_size=224, in_chans=3, num_classes=1000,
embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_sizes=[7, 7, 14, 7],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
layer_lr_decay=1.0,
):
super().__init__()
self.img_size=img_size
self.num_classes = num_classes
self.depths = depths
self.num_layers = len(depths)
self.mlp_ratio = mlp_ratio
activation = nn.GELU
self.patch_embed = PatchEmbed(in_chans=in_chans,
embed_dim=embed_dims[0],
resolution=img_size,
activation=activation)
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate,
sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
kwargs = dict(dim=embed_dims[i_layer],
input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),
patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),
# input_resolution=(patches_resolution[0] // (2 ** i_layer),
# patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
downsample=PatchMerging if (
i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
out_dim=embed_dims[min(
i_layer + 1, len(embed_dims) - 1)],
activation=activation,
)
if i_layer == 0:
layer = ConvLayer(
conv_expand_ratio=mbconv_expand_ratio,
**kwargs,
)
else:
layer = BasicLayer(
num_heads=num_heads[i_layer],
window_size=window_sizes[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
local_conv_size=local_conv_size,
**kwargs)
self.layers.append(layer)
# Classifier head
self.norm_head = nn.LayerNorm(embed_dims[-1])
self.head = nn.Linear(
embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
# init weights
self.apply(self._init_weights)
self.set_layer_lr_decay(layer_lr_decay)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dims[-1],
256,
kernel_size=1,
bias=False,
),
LayerNorm2d(256),
nn.Conv2d(
256,
256,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(256),
)
def set_layer_lr_decay(self, layer_lr_decay):
decay_rate = layer_lr_decay
# layers -> blocks (depth)
depth = sum(self.depths)
lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]
#print("LR SCALES:", lr_scales)
def _set_lr_scale(m, scale):
for p in m.parameters():
p.lr_scale = scale
self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))
i = 0
for layer in self.layers:
for block in layer.blocks:
block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))
i += 1
if layer.downsample is not None:
layer.downsample.apply(
lambda x: _set_lr_scale(x, lr_scales[i - 1]))
assert i == depth
for m in [self.norm_head, self.head]:
m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))
for k, p in self.named_parameters():
p.param_name = k
def _check_lr_scale(m):
for p in m.parameters():
assert hasattr(p, 'lr_scale'), p.param_name
self.apply(_check_lr_scale)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'attention_biases'}
def forward_features(self, x):
# x: (N, C, H, W)
x = self.patch_embed(x)
x = self.layers[0](x)
start_i = 1
for i in range(start_i, len(self.layers)):
layer = self.layers[i]
x = layer(x)
B,_,C=x.size()
x = x.view(B, 64, 64, C)
x=x.permute(0, 3, 1, 2)
x=self.neck(x)
return x
def forward(self, x):
x = self.forward_features(x)
#x = self.norm_head(x)
#x = self.head(x)
return x
# Path: tinysam/modeling/image_encoder.py
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + self.pos_embed
for blk in self.blocks:
x = blk(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
# Path: tinysam/modeling/mask_decoder.py
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
transformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for output
mask_slice = slice(1, None)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Path: tinysam/modeling/prompt_encoder.py
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
# Path: tinysam/modeling/sam.py
import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple, Union
from .tiny_vit_sam import TinyViT
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
| self, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: dcharatan/pixelsplat
# Path: src/dataset/data_module.py
def get_data_shim(encoder: nn.Module) -> DataShim:
"""Get functions that modify the batch. It's sometimes necessary to modify batches
outside the data loader because GPU computations are required to modify the batch or
because the modification depends on something outside the data loader.
"""
shims: list[DataShim] = []
if hasattr(encoder, "get_data_shim"):
shims.append(encoder.get_data_shim())
def combined_shim(batch):
for shim in shims:
batch = shim(batch)
return batch
return combined_shim
# Path: src/dataset/types.py
class BatchedExample(TypedDict, total=False):
target: BatchedViews
context: BatchedViews
scene: list[str]
# Path: src/evaluation/metrics.py
@torch.no_grad()
def compute_lpips(
ground_truth: Float[Tensor, "batch channel height width"],
predicted: Float[Tensor, "batch channel height width"],
) -> Float[Tensor, " batch"]:
value = get_lpips(predicted.device).forward(ground_truth, predicted, normalize=True)
return value[:, 0, 0, 0]
# Path: src/evaluation/metrics.py
@torch.no_grad()
def compute_psnr(
ground_truth: Float[Tensor, "batch channel height width"],
predicted: Float[Tensor, "batch channel height width"],
) -> Float[Tensor, " batch"]:
ground_truth = ground_truth.clip(min=0, max=1)
predicted = predicted.clip(min=0, max=1)
mse = reduce((ground_truth - predicted) ** 2, "b c h w -> b", "mean")
return -10 * mse.log10()
# Path: src/evaluation/metrics.py
@torch.no_grad()
def compute_ssim(
ground_truth: Float[Tensor, "batch channel height width"],
predicted: Float[Tensor, "batch channel height width"],
) -> Float[Tensor, " batch"]:
ssim = [
structural_similarity(
gt.detach().cpu().numpy(),
hat.detach().cpu().numpy(),
win_size=11,
gaussian_weights=True,
channel_axis=0,
data_range=1.0,
)
for gt, hat in zip(ground_truth, predicted)
]
return torch.tensor(ssim, dtype=predicted.dtype, device=predicted.device)
# Path: src/global_cfg.py
def get_cfg() -> DictConfig:
global cfg
return cfg
# Path: src/loss/loss.py
class Loss(nn.Module, ABC, Generic[T_cfg, T_wrapper]):
cfg: T_cfg
name: str
def __init__(self, cfg: T_wrapper) -> None:
super().__init__()
# Extract the configuration from the wrapper.
(field,) = fields(type(cfg))
self.cfg = getattr(cfg, field.name)
self.name = field.name
@abstractmethod
def forward(
self,
prediction: DecoderOutput,
batch: BatchedExample,
gaussians: Gaussians,
global_step: int,
) -> Float[Tensor, ""]:
pass
# Path: src/misc/benchmarker.py
class Benchmarker:
def __init__(self):
self.execution_times = defaultdict(list)
@contextmanager
def time(self, tag: str, num_calls: int = 1):
try:
start_time = time()
yield
finally:
end_time = time()
for _ in range(num_calls):
self.execution_times[tag].append((end_time - start_time) / num_calls)
def dump(self, path: Path) -> None:
path.parent.mkdir(exist_ok=True, parents=True)
with path.open("w") as f:
json.dump(dict(self.execution_times), f)
def dump_memory(self, path: Path) -> None:
path.parent.mkdir(exist_ok=True, parents=True)
with path.open("w") as f:
json.dump(torch.cuda.memory_stats()["allocated_bytes.all.peak"], f)
def summarize(self) -> None:
for tag, times in self.execution_times.items():
print(f"{tag}: {len(times)} calls, avg. {np.mean(times)} seconds per call")
# Path: src/misc/image_io.py
def prep_image(image: FloatImage) -> UInt8[np.ndarray, "height width channel"]:
# Handle batched images.
if image.ndim == 4:
image = rearrange(image, "b c h w -> c h (b w)")
# Handle single-channel images.
if image.ndim == 2:
image = rearrange(image, "h w -> () h w")
# Ensure that there are 3 or 4 channels.
channel, _, _ = image.shape
if channel == 1:
image = repeat(image, "() h w -> c h w", c=3)
assert image.shape[0] in (3, 4)
image = (image.detach().clip(min=0, max=1) * 255).type(torch.uint8)
return rearrange(image, "c h w -> h w c").cpu().numpy()
# Path: src/misc/image_io.py
def save_image(
image: FloatImage,
path: Union[Path, str],
) -> None:
"""Save an image. Assumed to be in range 0-1."""
# Create the parent directory if it doesn't already exist.
path = Path(path)
path.parent.mkdir(exist_ok=True, parents=True)
# Save the image.
Image.fromarray(prep_image(image)).save(path)
# Path: src/misc/LocalLogger.py
LOG_PATH = Path("outputs/local")
# Path: src/misc/LocalLogger.py
class LocalLogger(Logger):
def __init__(self) -> None:
super().__init__()
self.experiment = None
os.system(f"rm -r {LOG_PATH}")
@property
def name(self):
return "LocalLogger"
@property
def version(self):
return 0
@rank_zero_only
def log_hyperparams(self, params):
pass
@rank_zero_only
def log_metrics(self, metrics, step):
pass
@rank_zero_only
def log_image(
self,
key: str,
images: list[Any],
step: Optional[int] = None,
**kwargs,
):
# The function signature is the same as the wandb logger's, but the step is
# actually required.
assert step is not None
for index, image in enumerate(images):
path = LOG_PATH / f"{key}/{index:0>2}_{step:0>6}.png"
path.parent.mkdir(exist_ok=True, parents=True)
Image.fromarray(image).save(path)
# Path: src/misc/step_tracker.py
class StepTracker:
lock: RLock
step: Int64[Tensor, ""]
def __init__(self):
self.lock = Manager().RLock()
self.step = torch.tensor(0, dtype=torch.int64).share_memory_()
def set_step(self, step: int) -> None:
with self.lock:
self.step.fill_(step)
def get_step(self) -> int:
with self.lock:
return self.step.item()
# Path: src/visualization/annotation.py
def add_label(
image: Float[Tensor, "3 width height"],
label: str,
font: Path = Path("assets/Inter-Regular.otf"),
font_size: int = 24,
) -> Float[Tensor, "3 width_with_label height_with_label"]:
return vcat(
draw_label(label, font, font_size, image.device),
image,
align="left",
gap=4,
)
# Path: src/visualization/camera_trajectory/interpolation.py
@torch.no_grad()
def interpolate_extrinsics(
initial: Float[Tensor, "*#batch 4 4"],
final: Float[Tensor, "*#batch 4 4"],
t: Float[Tensor, " time_step"],
eps: float = 1e-4,
) -> Float[Tensor, "*batch time_step 4 4"]:
"""Interpolate extrinsics by rotating around their "focus point," which is the
least-squares intersection between the look vectors of the initial and final
extrinsics.
"""
initial = initial.type(torch.float64)
final = final.type(torch.float64)
t = t.type(torch.float64)
# Based on the dot product between the look vectors, pick from one of two cases:
# 1. Look vectors are parallel: interpolate about their origins' midpoint.
# 3. Look vectors aren't parallel: interpolate about their focus point.
initial_look = initial[..., :3, 2]
final_look = final[..., :3, 2]
dot_products = einsum(initial_look, final_look, "... i, ... i -> ...")
parallel_mask = (dot_products.abs() - 1).abs() < eps
# Pick focus points.
initial_origin = initial[..., :3, 3]
final_origin = final[..., :3, 3]
pivot_point = 0.5 * (initial_origin + final_origin)
pivot_point[~parallel_mask] = intersect_rays(
initial_origin[~parallel_mask],
initial_look[~parallel_mask],
final_origin[~parallel_mask],
final_look[~parallel_mask],
)
# Convert to pivot parameters.
pivot_frame = generate_rotation_coordinate_frame(initial_look, final_look, eps=eps)
initial_params = extrinsics_to_pivot_parameters(initial, pivot_frame, pivot_point)
final_params = extrinsics_to_pivot_parameters(final, pivot_frame, pivot_point)
# Interpolate the pivot parameters.
interpolated_params = interpolate_pivot_parameters(initial_params, final_params, t)
# Convert back.
return pivot_parameters_to_extrinsics(
interpolated_params.type(torch.float32),
rearrange(pivot_frame, "... i j -> ... () i j").type(torch.float32),
rearrange(pivot_point, "... xyz -> ... () xyz").type(torch.float32),
)
# Path: src/visualization/camera_trajectory/interpolation.py
def interpolate_intrinsics(
initial: Float[Tensor, "*#batch 3 3"],
final: Float[Tensor, "*#batch 3 3"],
t: Float[Tensor, " time_step"],
) -> Float[Tensor, "*batch time_step 3 3"]:
initial = rearrange(initial, "... i j -> ... () i j")
final = rearrange(final, "... i j -> ... () i j")
t = rearrange(t, "t -> t () ()")
return initial + (final - initial) * t
# Path: src/visualization/camera_trajectory/wobble.py
@torch.no_grad()
def generate_wobble(
extrinsics: Float[Tensor, "*#batch 4 4"],
radius: Float[Tensor, "*#batch"],
t: Float[Tensor, " time_step"],
) -> Float[Tensor, "*batch time_step 4 4"]:
tf = generate_wobble_transformation(radius, t)
return rearrange(extrinsics, "... i j -> ... () i j") @ tf
# Path: src/visualization/camera_trajectory/wobble.py
@torch.no_grad()
def generate_wobble_transformation(
radius: Float[Tensor, "*#batch"],
t: Float[Tensor, " time_step"],
num_rotations: int = 1,
scale_radius_with_t: bool = True,
) -> Float[Tensor, "*batch time_step 4 4"]:
# Generate a translation in the image plane.
tf = torch.eye(4, dtype=torch.float32, device=t.device)
tf = tf.broadcast_to((*radius.shape, t.shape[0], 4, 4)).clone()
radius = radius[..., None]
if scale_radius_with_t:
radius = radius * t
tf[..., 0, 3] = torch.sin(2 * torch.pi * num_rotations * t) * radius
tf[..., 1, 3] = -torch.cos(2 * torch.pi * num_rotations * t) * radius
return tf
# Path: src/visualization/color_map.py
def apply_color_map_to_image(
image: Float[Tensor, "*batch height width"],
color_map: str = "inferno",
) -> Float[Tensor, "*batch 3 height with"]:
image = apply_color_map(image, color_map)
return rearrange(image, "... h w c -> ... c h w")
# Path: src/visualization/layout.py
def add_border(
image: Float[Tensor, "channel height width"],
border: int = 8,
color: Color = 1,
) -> Float[Tensor, "channel new_height new_width"]:
color = _sanitize_color(color).to(image)
c, h, w = image.shape
result = torch.empty(
(c, h + 2 * border, w + 2 * border), dtype=torch.float32, device=image.device
)
result[:] = color[:, None, None]
result[:, border : h + border, border : w + border] = image
return result
# Path: src/visualization/layout.py
def hcat(
*images: Iterable[Float[Tensor, "channel _ _"]],
align: Literal["start", "center", "end", "top", "bottom"] = "start",
gap: int = 8,
gap_color: Color = 1,
):
"""Shorthand for a horizontal linear concatenation."""
return cat(
"horizontal",
*images,
align={
"start": "start",
"center": "center",
"end": "end",
"top": "start",
"bottom": "end",
}[align],
gap=gap,
gap_color=gap_color,
)
# Path: src/visualization/layout.py
def vcat(
*images: Iterable[Float[Tensor, "channel _ _"]],
align: Literal["start", "center", "end", "left", "right"] = "start",
gap: int = 8,
gap_color: Color = 1,
):
"""Shorthand for a horizontal linear concatenation."""
return cat(
"vertical",
*images,
align={
"start": "start",
"center": "center",
"end": "end",
"left": "start",
"right": "end",
}[align],
gap=gap,
gap_color=gap_color,
)
# Path: src/visualization/validation_in_3d.py
def render_cameras(batch: dict, resolution: int) -> Float[Tensor, "3 3 height width"]:
# Define colors for context and target views.
num_context_views = batch["context"]["extrinsics"].shape[1]
num_target_views = batch["target"]["extrinsics"].shape[1]
color = torch.ones(
(num_target_views + num_context_views, 3),
dtype=torch.float32,
device=batch["target"]["extrinsics"].device,
)
color[num_context_views:, 1:] = 0
return draw_cameras(
resolution,
torch.cat(
(batch["context"]["extrinsics"][0], batch["target"]["extrinsics"][0])
),
torch.cat(
(batch["context"]["intrinsics"][0], batch["target"]["intrinsics"][0])
),
color,
torch.cat((batch["context"]["near"][0], batch["target"]["near"][0])),
torch.cat((batch["context"]["far"][0], batch["target"]["far"][0])),
)
# Path: src/visualization/validation_in_3d.py
def render_projections(
gaussians: Gaussians,
resolution: int,
margin: float = 0.1,
draw_label: bool = True,
extra_label: str = "",
) -> Float[Tensor, "batch 3 3 height width"]:
device = gaussians.means.device
b, _, _ = gaussians.means.shape
# Compute the minima and maxima of the scene.
minima = gaussians.means.min(dim=1).values
maxima = gaussians.means.max(dim=1).values
scene_minima, scene_maxima = compute_equal_aabb_with_margin(
minima, maxima, margin=margin
)
projections = []
for look_axis in range(3):
right_axis = (look_axis + 1) % 3
down_axis = (look_axis + 2) % 3
# Define the extrinsics for rendering.
extrinsics = torch.zeros((b, 4, 4), dtype=torch.float32, device=device)
extrinsics[:, right_axis, 0] = 1
extrinsics[:, down_axis, 1] = 1
extrinsics[:, look_axis, 2] = 1
extrinsics[:, right_axis, 3] = 0.5 * (
scene_minima[:, right_axis] + scene_maxima[:, right_axis]
)
extrinsics[:, down_axis, 3] = 0.5 * (
scene_minima[:, down_axis] + scene_maxima[:, down_axis]
)
extrinsics[:, look_axis, 3] = scene_minima[:, look_axis]
extrinsics[:, 3, 3] = 1
# Define the intrinsics for rendering.
extents = scene_maxima - scene_minima
far = extents[:, look_axis]
near = torch.zeros_like(far)
width = extents[:, right_axis]
height = extents[:, down_axis]
projection = render_cuda_orthographic(
extrinsics,
width,
height,
near,
far,
(resolution, resolution),
torch.zeros((b, 3), dtype=torch.float32, device=device),
gaussians.means,
gaussians.covariances,
gaussians.harmonics,
gaussians.opacities,
fov_degrees=10.0,
)
if draw_label:
right_axis_name = "XYZ"[right_axis]
down_axis_name = "XYZ"[down_axis]
label = f"{right_axis_name}{down_axis_name} Projection {extra_label}"
projection = torch.stack([add_label(x, label) for x in projection])
projections.append(projection)
return torch.stack(pad(projections), dim=1)
# Path: src/model/decoder/decoder.py
class DecoderOutput:
class Decoder(nn.Module, ABC, Generic[T]):
T = TypeVar("T")
def __init__(self, cfg: T, dataset_cfg: DatasetCfg) -> None:
def forward(
self,
gaussians: Gaussians,
extrinsics: Float[Tensor, "batch view 4 4"],
intrinsics: Float[Tensor, "batch view 3 3"],
near: Float[Tensor, "batch view"],
far: Float[Tensor, "batch view"],
image_shape: tuple[int, int],
depth_mode: DepthRenderingMode | None = None,
) -> DecoderOutput:
# Path: src/model/encoder/encoder.py
class Encoder(nn.Module, ABC, Generic[T]):
cfg: T
def __init__(self, cfg: T) -> None:
super().__init__()
self.cfg = cfg
@abstractmethod
def forward(
self,
context: BatchedViews,
deterministic: bool,
) -> Gaussians:
pass
def get_data_shim(self) -> DataShim:
"""The default shim doesn't modify the batch."""
return lambda x: x
# Path: src/model/encoder/visualization/encoder_visualizer.py
class EncoderVisualizer(ABC, Generic[T_cfg, T_encoder]):
cfg: T_cfg
encoder: T_encoder
def __init__(self, cfg: T_cfg, encoder: T_encoder) -> None:
self.cfg = cfg
self.encoder = encoder
@abstractmethod
def visualize(
self,
context: dict,
global_step: int,
) -> dict[str, Float[Tensor, "3 _ _"]]:
pass
# Path: src/model/model_wrapper.py
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Protocol, runtime_checkable
from einops import pack, rearrange, repeat
from jaxtyping import Float
from pytorch_lightning import LightningModule
from pytorch_lightning.loggers.wandb import WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from torch import Tensor, nn, optim
from ..dataset.data_module import get_data_shim
from ..dataset.types import BatchedExample
from ..evaluation.metrics import compute_lpips, compute_psnr, compute_ssim
from ..global_cfg import get_cfg
from ..loss import Loss
from ..misc.benchmarker import Benchmarker
from ..misc.image_io import prep_image, save_image
from ..misc.LocalLogger import LOG_PATH, LocalLogger
from ..misc.step_tracker import StepTracker
from ..visualization.annotation import add_label
from ..visualization.camera_trajectory.interpolation import (
interpolate_extrinsics,
interpolate_intrinsics,
)
from ..visualization.camera_trajectory.wobble import (
generate_wobble,
generate_wobble_transformation,
)
from ..visualization.color_map import apply_color_map_to_image
from ..visualization.layout import add_border, hcat, vcat
from ..visualization.validation_in_3d import render_cameras, render_projections
from .decoder.decoder import Decoder, DepthRenderingMode
from .encoder import Encoder
from .encoder.visualization.encoder_visualizer import EncoderVisualizer
import moviepy.editor as mpy
import torch
import wandb
@dataclass
class OptimizerCfg:
lr: float
warm_up_steps: int
@dataclass
class TestCfg:
output_path: Path
@dataclass
class TrainCfg:
depth_mode: DepthRenderingMode | None
extended_visualization: bool
@runtime_checkable
class TrajectoryFn(Protocol):
def __call__(
self,
t: Float[Tensor, " t"],
) -> tuple[
Float[Tensor, "batch view 4 4"], # extrinsics
Float[Tensor, "batch view 3 3"], # intrinsics
]:
pass
class ModelWrapper(LightningModule):
| logger: Optional[WandbLogger] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hutaiHang/Faster-Diffusion
# Path: utils_sd.py
def register_controlnet_pipeline(pipe):
def new_call(self):
@torch.no_grad()
def call(
prompt: Union[str, List[str]] = None,
image: Union[
torch.FloatTensor,
PIL.Image.Image,
np.ndarray,
List[torch.FloatTensor],
List[PIL.Image.Image],
List[np.ndarray],
] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
guess_mode: bool = False,
):
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
image,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
controlnet_conditioning_scale,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
global_pool_conditions = (
controlnet.config.global_pool_conditions
if isinstance(controlnet, ControlNetModel)
else controlnet.nets[0].config.global_pool_conditions
)
guess_mode = guess_mode or global_pool_conditions
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Prepare image
if isinstance(controlnet, ControlNetModel):
image = self.prepare_image(
image=image,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
height, width = image.shape[-2:]
elif isinstance(controlnet, MultiControlNetModel):
images = []
for image_ in image:
image_ = self.prepare_image(
image=image_,
width=width,
height=height,
batch_size=batch_size * num_images_per_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
images.append(image_)
image = images
height, width = image[0].shape[-2:]
else:
assert False
# 5. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 6. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
self.init_latent = latents.detach().clone()
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 8. Denoising loop
#-------------------------------------------------------------
all_steps = len(self.scheduler.timesteps)
curr_span = 1
curr_step = 0
idx = 1
keytime = [0,1,2,3,5,10,15,25,35]
keytime.append(all_steps)
while curr_step<all_steps:
register_time(self.unet, curr_step)
if curr_span>0:
time_ls = []
for i in range(curr_step, curr_step+curr_span):
if i<all_steps:
time_ls.append(self.scheduler.timesteps[i])
else:
break
##--------------------------------
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, time_ls[0])
if curr_step in [0,1,2,3,5,10,15,25,35]:
# controlnet(s) inference
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
time_ls[0],
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=image,
conditioning_scale=controlnet_conditioning_scale,
guess_mode=guess_mode,
return_dict=False,
)
#----------------------save controlnet feature-------------------------
#useless, should delete
# setattr(self, 'downres_samples', deepcopy(down_block_res_samples))
# setattr(self, 'midres_sample', mid_block_res_sample.detach().clone())
#-----------------------save controlnet feature------------------------
else:
down_block_res_samples = None #self.downres_samples
mid_block_res_sample = None #self.midres_sample
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
time_ls,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
if isinstance(time_ls, list):
step_span = len(time_ls)
bs = noise_pred.shape[0]
bs_perstep = bs//step_span
denoised_latent = latents
for i, timestep in enumerate(time_ls):
if timestep/1000 < 0.5:
denoised_latent = denoised_latent + 0.003*self.init_latent
curr_noise = noise_pred[i*bs_perstep:(i+1)*bs_perstep]
denoised_latent = self.scheduler.step(curr_noise, timestep, denoised_latent, **extra_step_kwargs, return_dict=False)[0]
latents = denoised_latent
##----------------------------------------
curr_step += curr_span
idx += 1
if curr_step<all_steps:
curr_span = keytime[idx] - keytime[idx-1]
#-------------------------------------------------------------
# If we do sequential model offloading, let's offload unet and controlnet
# manually for max memory savings
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.unet.to("cpu")
self.controlnet.to("cpu")
torch.cuda.empty_cache()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
else:
image = latents
has_nsfw_concept = None
if has_nsfw_concept is None:
do_denormalize = [True] * image.shape[0]
else:
do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
return call
pipe.call = new_call(pipe)
# Path: utils_sd.py
def register_faster_forward(model, mod = '50ls'):
def faster_forward(self):
def forward(
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info("Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# prepare attention_mask
if attention_mask is not None:
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
if isinstance(timestep, list):
timesteps = timestep[0]
step = len(timestep)
else:
timesteps = timestep
step = 1
if not torch.is_tensor(timesteps) and (not isinstance(timesteps,list)):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif (not isinstance(timesteps,list)) and len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
if (not isinstance(timesteps,list)) and len(timesteps.shape) == 1:
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
elif isinstance(timesteps, list):
#timesteps list, such as [981,961,941]
timesteps = warpped_timestep(timesteps, sample.shape[0]).to(sample.device)
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
emb = emb + aug_emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
if self.encoder_hid_proj is not None:
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
#===============
order = self.order #timestep, start by 0
#===============
ipow = int(np.sqrt(9 + 8*order))
cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35]
if isinstance(mod, int):
cond = order % mod == 0
elif mod == "pro":
cond = ipow * ipow == (9 + 8 * order)
elif mod == "50ls":
cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]
elif mod == "50ls2":
cond = order in [0, 10, 11, 12, 15, 20, 25, 30,35,45] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]
elif mod == "50ls3":
cond = order in [0, 20, 25, 30,35,45,46,47,48,49] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]
elif mod == "50ls4":
cond = order in [0, 9, 13, 14, 15, 28, 29, 32, 36,45] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]
elif mod == "100ls":
cond = order > 85 or order < 10 or order % 5 == 0
elif mod == "75ls":
cond = order > 65 or order < 10 or order % 5 == 0
elif mod == "s2":
cond = order < 20 or order > 40 or order % 2 == 0
if cond:
# print('current timestep:', order)
# 2. pre-process
sample = self.conv_in(sample)
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
)
if mid_block_additional_residual is not None:
sample = sample + mid_block_additional_residual
#----------------------save feature-------------------------
# setattr(self, 'skip_feature', (tmp_sample.clone() for tmp_sample in down_block_res_samples))
setattr(self, 'skip_feature', deepcopy(down_block_res_samples))
setattr(self, 'toup_feature', sample.detach().clone())
#-----------------------save feature------------------------
#-------------------expand feature for parallel---------------
if isinstance(timestep, list):
#timesteps list, such as [981,961,941]
timesteps = warpped_timestep(timestep, sample.shape[0]).to(sample.device)
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=self.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)
sample = warpped_feature(sample, step)
encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)
#-------------------expand feature for parallel---------------
else:
down_block_res_samples = self.skip_feature
sample = self.toup_feature
#-------------------expand feature for parallel---------------
down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)
sample = warpped_feature(sample, step)
encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)
#-------------------expand feature for parallel---------------
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
)
# 6. post-process
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
return forward
if model.__class__.__name__ == 'UNet2DConditionModel':
model.forward = faster_forward(model)
# Path: utils_sd.py
def seed_everything(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Path: controlnet_demo.py
import numpy as np
import cv2
import time
import torch
from PIL import Image
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, DDIMScheduler
from controlnet_aux import HEDdetector, OpenposeDetector
from diffusers.utils import load_image
from utils_sd import register_controlnet_pipeline, register_faster_forward, seed_everything
image = load_image("images/condition.jpeg")
image = np.array(image)
low_threshold = 100
high_threshold = 200
image = cv2.Canny(image, low_threshold, high_threshold)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
image_condition = Image.fromarray(image)
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16
).to('cuda')
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
).to('cuda')
print('Warm up of the gpu')
for i in range(2):
image = pipe("Mona Lisa", image_condition).images[0]
#-------------------
print("Start Generating")
seed_everything(8888)
start_time = time.time()
image = pipe("Mona Lisa", image_condition).images[0]
end_time = time.time()
print("Origin Pipeline: {:.3f} seconds".format(end_time-start_time))
image.save('images/canny_out_origin.png')
register_controlnet_pipeline(pipe)
register_faster_forward(pipe.unet)
seed_everything(8888)
| start_time = time.time() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: SHI-Labs/VCoder
# Path: vcoder_llava/model/vcoder_llava_arch.py
class VCoderLlavaMetaModel:
def __init__(self, config):
super(VCoderLlavaMetaModel, self).__init__(config)
self.config = config
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
if hasattr(config, "seg_mm_projector_type"):
self.seg_mm_projector = build_seg_projector(config)
if hasattr(config, "use_mm2_proj"):
if config.use_mm2_proj:
self.mm2_projector = build_vision_projector(config)
if hasattr(config, "mm_vcoder_lm_emb"):
self.vcoder_lm_emb = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
if self.get_vision_tower() is None:
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
else:
if fsdp is not None and len(fsdp) > 0:
vision_tower = self.vision_tower[0]
else:
vision_tower = self.vision_tower
vision_tower.load_model()
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
p.requires_grad = True
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
def initialize_seg_modules(self, model_args, fsdp=None):
mm_seg_select_layer = model_args.mm_seg_select_layer
mm_seg_select_feature = model_args.mm_seg_select_feature
self.config.seg_mm_hidden_size = self.vision_tower.hidden_size
pretrain_mm2_mlp_adapter = model_args.pretrain_mm2_mlp_adapter
self.config.seg_use_mm_proj = True
self.config.seg_mm_projector_type = getattr(model_args, 'seg_mm_projector_type', 'linear')
self.config.mm_seg_select_layer = mm_seg_select_layer
self.config.mm_seg_select_feature = mm_seg_select_feature
self.seg_mm_projector = build_seg_projector(self.config)
self.vcoder_lm_emb = nn.Embedding(self.config.vocab_size, self.config.hidden_size, self.config.pad_token_id)
if getattr(model_args, "use_mm2_proj"):
self.config.use_mm2_proj = model_args.use_mm2_proj
self.mm2_projector = build_vision_projector(self.config)
if pretrain_mm2_mlp_adapter is not None:
mm2_projector_weights = torch.load(pretrain_mm2_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm2_projector.load_state_dict(get_w(mm2_projector_weights, 'mm_projector'))
# Path: vcoder_llava/model/vcoder_llava_arch.py
class VCoderLlavaMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_seg_images(self, seg_images):
seg_features = self.get_model().get_vision_tower()(seg_images)
seg_features = self.get_model().seg_mm_projector(seg_features)
return seg_features
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def encode_images_w_seg(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm2_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images, seg_images,
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
if seg_images is not None and hasattr(self, 'mm2_projector'):
image_features = self.encode_images_w_seg(concat_images)
else:
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
if seg_images is not None and hasattr(self, 'mm2_projector'):
image_features = self.encode_images_w_seg(images)
else:
image_features = self.encode_images(images)
if seg_images is not None:
if type(seg_images) is list or seg_images.ndim == 5:
concat_seg_images = torch.cat([image for image in seg_images], dim=0)
seg_features = self.encode_seg_images(concat_seg_images)
split_sizes = [image.shape[0] for image in seg_images]
seg_features = torch.split(seg_features, split_sizes, dim=0)
seg_features = [x.flatten(0, 1) for x in seg_features]
else:
seg_features = self.encode_seg_images(seg_images)
self.get_model().vcoder_lm_emb.weight.data = self.get_model().get_input_embeddings().weight.data.clone()
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
cur_seg_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0 or (cur_input_ids == SEG_TOKEN_INDEX).sum() == 0:
# FIXME: this is a hacky fix, for deepspeed zero3 to work
cur_image_features = image_features[cur_image_idx]
if seg_images is not None:
cur_seg_features = seg_features[cur_seg_idx]
half_len = cur_input_ids.shape[0] // 2
if seg_images is not None:
cur_input_embeds_1 = self.get_model().vcoder_lm_emb(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().vcoder_lm_emb(cur_input_ids[half_len:])
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_seg_features[0:0], cur_image_features[0:0], cur_input_embeds_2], dim=0)
else:
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
cur_seg_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
if seg_images is None:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
else:
cur_new_input_embeds.append(self.get_model().vcoder_lm_emb(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if seg_images is not None:
seg_token_indices = torch.where(cur_input_ids == SEG_TOKEN_INDEX)[0]
while seg_token_indices.numel() > 0:
cur_seg_features = seg_features[cur_seg_idx]
seg_token_start = seg_token_indices[0]
cur_new_input_embeds.append(self.get_model().vcoder_lm_emb(cur_input_ids[:seg_token_start]))
cur_new_input_embeds.append(cur_seg_features)
if labels is not None:
cur_new_labels.append(cur_labels[:seg_token_start])
cur_new_labels.append(torch.full((cur_seg_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[seg_token_start+1:]
cur_seg_idx += 1
cur_input_ids = cur_input_ids[seg_token_start+1:]
seg_token_indices = torch.where(cur_input_ids == SEG_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
if seg_images is None:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
else:
cur_new_input_embeds.append(self.get_model().vcoder_lm_emb(cur_input_ids))
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
# Path: vcoder_llava/model/language_model/vcoder_llava_llama.py
from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers import AutoConfig, AutoModelForCausalLM, \
LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from ..vcoder_llava_arch import VCoderLlavaMetaModel, VCoderLlavaMetaForCausalLM
import torch
import torch.nn as nn
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VCoderLlavaConfig(LlamaConfig):
model_type = "vcoder_llava"
class VCoderLlavaLlamaModel(VCoderLlavaMetaModel, LlamaModel):
config_class = VCoderLlavaConfig
def __init__(self, config: LlamaConfig):
super(VCoderLlavaLlamaModel, self).__init__(config)
class VCoderLlavaLlamaForCausalLM(LlamaForCausalLM, VCoderLlavaMetaForCausalLM):
config_class = VCoderLlavaConfig
def __init__(self, config):
super(LlamaForCausalLM, self).__init__(config)
self.model = VCoderLlavaLlamaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
segs: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OSU-NLP-Group/SeeAct
# Path: src/data_utils/format_prompt_utils.py
def data_format_input_multichoice(
sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False
):
# Parse html into a dom tree
dom_tree = lxml.etree.fromstring(sample["cleaned_html"])
dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids)
tree_repr, id_mapping = get_tree_repr(
dom_tree, id_mapping={}, keep_html_brackets=keep_html_brackets
)
candidate_nodes = dom_tree.xpath("//*[@backend_node_id]")
choices = []
for idx, node in enumerate(candidate_nodes):
temp = get_tree_repr(
node,
id_mapping=id_mapping,
keep_html_brackets=keep_html_brackets,
)
choices.append(
[
node.attrib["backend_node_id"],
" ".join(
get_tree_repr(
node,
id_mapping=id_mapping,
keep_html_brackets=keep_html_brackets,
)[0].split()[:10]
),
]
)
gt = id_mapping.get(gt, -1)
seq_input = (
"Based on the HTML webpage above, try to complete the following task:\n"
f"Task: {sample['confirmed_task']}\n"
f"Previous actions:\n"
)
if len(sample["previous_actions"]) > 0:
for action in sample["previous_actions"][-previous_k:]:
seq_input += f"{action}\n"
else:
seq_input += "None\n"
seq_input += (
"What should be the next action? Please select from the following choices "
"(If the correct action is not in the page above, please select A. 'None of the above'):\n\n"
"A. None of the above\n"
)
for idx, choice in enumerate(choices):
# convert to ascii A, B, C, D, ...
seq_input += f"{chr(66 + idx)}. {choice[1]}\n"
if gt == -1:
seq_target = "A."
else:
gt += 1
current_action_op = sample["operation"]["op"]
current_action_value = sample["operation"]["value"]
seq_target = f"{chr(65 + gt)}.\n" f"Action: {current_action_op}\n"
if current_action_op != "CLICK":
seq_target += f"Value: {current_action_value}"
return tree_repr, seq_input, seq_target, choices, node_to_keep
# Path: src/data_utils/format_prompt_utils.py
def format_options(choices):
option_text = ""
abcd = ''
non_abcd = ''
multi_choice = ''
for multichoice_idx, choice in enumerate(choices):
multi_choice += f"{generate_option_name(multichoice_idx)}. {choice[1]}\n"
abcd += f"{generate_option_name(multichoice_idx)}, "
non_abcd = generate_option_name(multichoice_idx + 1)
multi_choice += f"{non_abcd}. None of the other options match the correct element"
# option_text += abcd
option_text += f"If none of these elements match your target element, please select {non_abcd}. None of the other options match the correct element.\n"
option_text += (multi_choice + '\n\n')
return option_text
# Path: src/data_utils/format_prompt_utils.py
def generate_option_name(index):
if index < 26:
return string.ascii_uppercase[index]
else:
first_letter_index = (index - 26) // 26
second_letter_index = (index - 26) % 26
first_letter = string.ascii_uppercase[first_letter_index]
second_letter = string.ascii_uppercase[second_letter_index]
return f"{first_letter}{second_letter}"
# Path: src/data_utils/format_prompt_utils.py
def generate_new_referring_prompt(referring_description="", element_format="", action_format="", value_format="",
choices=None,split="4"):
referring_prompt = ""
# Add description about how to format output
if referring_description != "":
referring_prompt += referring_description
referring_prompt += "\n\n"
# Add element prediction format and choices
# Prepare Option texts
# For exp {1, 2, 4}, generate option
# For element_atttribute, set options field at None
if choices:
choice_text = format_options(choices)
referring_prompt += choice_text
if element_format != "":
referring_prompt += element_format
referring_prompt += "\n\n"
# Format Action Prediction
if action_format != "":
referring_prompt += action_format
referring_prompt += "\n\n"
# Format Value Prediction
if value_format != "":
referring_prompt += value_format
referring_prompt += ""
return referring_prompt
# Path: src/data_utils/format_prompt_utils.py
def generate_new_query_prompt(system_prompt="", task="", previous_actions=None, question_description=""):
"""
Generate the first phase prompt to ask model to generate general descriptions about {environment, high-level plans, next step action}
Each experiment will have a similar prompt in this phase
This prompt is used to generate models' thoughts without disrupt of formatting/referring prompts
"""
sys_role=""+system_prompt
query_text = ""
# System Prompt
query_text += "You are asked to complete the following task: "
# Task Description
query_text += task
query_text += "\n\n"
# Previous Actions
previous_action_text = "Previous Actions:\n"
if previous_actions is None:
previous_actions = []
for action_text in previous_actions:
previous_action_text += action_text
previous_action_text += "\n"
query_text += previous_action_text
query_text += "\n"
# Question Description
query_text += question_description
return [sys_role,query_text]
# Path: src/data_utils/prompts.py
from .format_prompt_utils import data_format_input_multichoice, \
format_options, generate_option_name, generate_new_referring_prompt, generate_new_query_prompt
Format:
ELEMENT: The uppercase letter of your choice.""",
"action_format": f"{action_format}",
"value_format": f"{value_format}"
}
exp3_prompt_dict = {
"system_prompt": sys_prompt,
"question_description": question_description_new_exp3,
"referring_description": f"""""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element, element type, element text, action and value should be in five separate lines.
Format:
ELEMENT: Please describe which element you need to operate with. Describe it as detailed as possible, including what it is and where it is.
ELEMENT TYPE: Please specify its type from these options: BUTTON, TEXTBOX, SELECTBOX, or LINK.
ELEMENT TEXT: Please provide the exact text displayed on the element. Do not invent or modify the text; reproduce it as-is from the screenshot.""",
"action_format": f"{action_format}",
"value_format": f"{value_format}"
}
##### SeeAct Online Prompts
seeact_online_sys_prompt = '''Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() and keyboard.press('Enter') functions in playwright respectively) One next step means one operation within the four. Unlike humans, for typing (e.g., in text areas, text boxes) and selecting (e.g., from dropdown menus or <select> elements), you should try directly typing the input or selecting the choice, bypassing the need for an initial click. You should not attempt to create accounts, log in or do the final submission. Terminate when you deem the task complete or if it requires potentially harmful actions.'''
seeact_online_question_description_new_exp4 = '''The screenshot below shows the webpage you see. Follow the following guidance to think step by step before outlining the next action step at the current stage:
(Current Webpage Identification)
Firstly, think about what the current webpage is.
(Previous Action Analysis)
Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step. Specifically, if the last action involved a TYPE, always evaluate whether it necessitates a confirmation step, because typically a single TYPE action does not make effect. (often, simply pressing 'Enter', assuming the default element involved in the last action, unless other clear elements are present for operation).
(Screenshot Details Analysis)
Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.
(Next Action Based on Webpage and Analysis)
Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.
To be successful, it is important to follow the following rules:
1. You should only issue a valid action given the current observation.
2. You should only issue one action at a time
3. For handling the select dropdown elements on the webpage, it's not necessary for you to provide completely accurate options right now. The full list of options for these elements will be supplied later.'''
seeact_online_action_format = "ACTION: Choose an action from {CLICK, SELECT, TYPE, PRESS ENTER, TERMINATE, NONE}."
seeact_online_value_format = "VALUE: Provide additional input based on ACTION.\n\nThe VALUE means:\nIf ACTION == TYPE, specify the " \
"text to be typed.\nIf ACTION == SELECT, indicate the option to be chosen. Revise the selection value to align with the available options within the element.\nIf ACTION == CLICK, PRESS ENTER, TERMINATE or NONE, " \
"write \"None\"."
seeact_choice_prompt_dict = {
"system_prompt": seeact_online_sys_prompt,
"question_description": seeact_online_question_description_new_exp4,
"referring_description": f"""(Reiteration)
First, reiterate your next target element, its detailed location, and the corresponding operation.
(Multichoice Question)
Below is a multi-choice question, where the choices are elements in the webpage. All elements are arranged in the order based on their height on the webpage, from top to bottom (and from left to right). This arrangement can be used to locate them. From the screenshot, find out where and what each one is on the webpage, taking into account both their text content and HTML details. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by re-examining the screenshot, the choices, and your further reasoning.""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.
Format:
ELEMENT: The uppercase letter of your choice. (No need for PRESS ENTER)""",
"action_format": f"{seeact_online_action_format}",
"value_format": f"{seeact_online_value_format}"
}
def generate_prompt(experiment_split, task=None, previous=None, choices=None):
assert experiment_split != None, "Please specify the experiment split."
assert task != None, "Please input the task."
assert previous != None, "Please input the previous actions."
prompt_list = []
system_prompt_input = None
question_description_input = None
referring_input = None
element_format_input = None
action_format_input = None
value_format_input = None
if experiment_split in ["text","text_choice","4api"]:
system_prompt_input = exp4_prompt_dict["system_prompt"]
question_description_input = exp4_prompt_dict["question_description"]
| referring_input = exp4_prompt_dict["referring_description"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DeepWok/mase
# Path: machop/chop/models/manual/llama_llora/configuration_llama_llora.py
class LlamaLoraConfig(PretrainedConfig):
r"""
Available checkpoints on huggingface:
- "lmsys/vicuna-7b-v1.3", # official release of Vicuna
- "lmsys/vicuna-13b-v1.3", # official release of Vicuna
- "lmsys/vicuna-33b-v1.3", # official release of Vicuna
- "huggyllama/llama-7b", # the uploader states this is the same as the official release of Llama
- "huggyllama/llama-13b", # the uploader states this is the same as the official release of Llama
- "huggyllama/llama-30b", # the uploader states this is the same as the official release of Llama
- "huggyllama/llama-65b", # the uploader states this is the same as the official release of Llama
This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LlamaModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import LlamaModel, LlamaConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = LlamaConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = LlamaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llama"
keys_to_ignore_at_inference = ["past_key_values"]
# CZ: here are some pretrained models I found on the HuggingFace model hub
avail_hf_ckpts = (
"lmsys/vicuna-7b-v1.3", # official release of Vicuna
"lmsys/vicuna-13b-v1.3", # official release of Vicuna
"lmsys/vicuna-33b-v1.3", # official release of Vicuna
"huggyllama/llama-7b", # the uploader states this is the same as the official release of Llama
"huggyllama/llama-13b", # the uploader states this is the same as the official release of Llama
"huggyllama/llama-30b", # the uploader states this is the same as the official release of Llama
"huggyllama/llama-65b", # the uploader states this is the same as the official release of Llama
)
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
lora_config: dict = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
if lora_config is not None:
lora_config = parse_llama_lora_config(lora_config, num_hidden_layers)
self.lora_config = lora_config
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def __setattr__(self, key, value):
if key == "lora_config" and value is not None:
value = parse_llama_lora_config(
config=value, num_hidden_layers=self.num_hidden_layers
)
return super().__setattr__(key, value)
# Path: machop/chop/models/manual/lora_modules.py
class LoraLayer:
def __init__(self, in_features: int, out_features: int, **kwargs):
self.r = {}
self.lora_alpha = {}
self.scaling = {}
self.lora_dropout = nn.ModuleDict({})
self.lora_A = nn.ModuleDict({})
self.lora_B = nn.ModuleDict({})
# For Embedding layer
self.lora_embedding_A = nn.ParameterDict({})
self.lora_embedding_B = nn.ParameterDict({})
# Mark the weight as unmerged
self.merged = False
self.disable_adapter = False
self.in_features = in_features
self.out_features = out_features
self.kwargs = kwargs
init_lora_weights = bool(field(default=True))
def update_layer(
self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights
):
self.r[adapter_name] = r
self.lora_alpha[adapter_name] = lora_alpha
if lora_dropout > 0.0:
lora_dropout_layer = nn.Dropout(p=lora_dropout)
else:
lora_dropout_layer = nn.Identity()
self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer}))
# Actual trainable parameters
if self.disable_adapter == False:
if r > 0:
self.lora_A.update(
nn.ModuleDict(
{adapter_name: nn.Linear(self.in_features, r, bias=False)}
)
)
self.lora_B.update(
nn.ModuleDict(
{adapter_name: nn.Linear(r, self.out_features, bias=False)}
)
)
self.scaling[adapter_name] = lora_alpha / r
else:
pass
if init_lora_weights:
self.reset_lora_parameters(adapter_name)
self.to(self.weight.device)
def reset_lora_parameters(self, adapter_name):
if adapter_name in self.lora_A.keys():
# initialize A the same way as the default for nn.Linear and B to zero
nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5))
nn.init.zeros_(self.lora_B[adapter_name].weight)
if adapter_name in self.lora_embedding_A.keys():
# initialize a the same way as the default for nn.linear and b to zero
nn.init.zeros_(self.lora_embedding_A[adapter_name])
nn.init.normal_(self.lora_embedding_B[adapter_name])
# Path: machop/chop/models/manual/lora_modules.py
class LinearLora(nn.Linear, LoraLayer):
# Lora implemented in a dense layer
def __init__(
self,
in_features: int,
out_features: int,
config: dict = None,
**kwargs,
):
self.config = config
init_lora_weights = self.config.get("init_lora_weights", True)
r, lora_alpha, lora_dropout, adapter_name, disable_adapter = (
config["r"],
config["lora_alpha"],
config["lora_dropout"],
config["adapter_name"],
config["disable_adapter"],
)
lora_dropout = float(lora_dropout)
nn.Linear.__init__(self, in_features, out_features, **kwargs)
LoraLayer.__init__(self, in_features=in_features, out_features=out_features)
# Freezing the pre-trained weight matrix
self.weight.requires_grad = False
self.disable_adapter = disable_adapter
self.fan_in_fan_out = config.get("fan_in_fan_out", False)
self.is_target_conv_1d_layer = config.get("is_target_conv_1d_layer", False)
if self.fan_in_fan_out:
self.weight.data = self.weight.data.T
nn.Linear.reset_parameters(self)
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
self.active_adapter = adapter_name
self.is_target_conv_1d_layer = self.is_target_conv_1d_layer
def merge(self):
if self.active_adapter not in self.lora_A.keys():
return
if self.merged:
warnings.warn("Already merged. Nothing to do.")
return
if self.r[self.active_adapter] > 0:
self.weight.data += self.get_delta_weight(self.active_adapter)
self.merged = True
def unmerge(self):
if self.active_adapter not in self.lora_A.keys():
return
if not self.merged:
warnings.warn("Already unmerged. Nothing to do.")
return
if self.r[self.active_adapter] > 0:
self.weight.data -= self.get_delta_weight(self.active_adapter)
self.merged = False
def get_delta_weight(self, adapter):
return (
transpose(
self.lora_B[adapter].weight @ self.lora_A[adapter].weight,
self.fan_in_fan_out,
)
* self.scaling[adapter]
)
def _linear(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(
input, transpose(self.weight, self.fan_in_fan_out), bias=self.bias
)
def forward(self, x: torch.Tensor):
previous_dtype = x.dtype
if self.active_adapter not in self.lora_A.keys():
return self._linear(x)
if self.disable_adapter:
if self.r[self.active_adapter] > 0 and self.merged:
self.unmerge()
result = self._linear(x)
elif self.r[self.active_adapter] == 0 or self.merged:
result = self._linear(x)
else:
lora_A = self.lora_A[self.active_adapter]
lora_B = self.lora_B[self.active_adapter]
dropout = self.lora_dropout[self.active_adapter]
scaling = self.scaling[self.active_adapter]
result = self._linear(x)
x = x.to(lora_A.weight.dtype)
result += lora_B(lora_A(dropout(x))) * scaling
result = result.to(previous_dtype)
return result
def extract_lora_params(self):
lora_params = {
"lora_A": self.lora_A[self.active_adapter].state_dict(),
"lora_B": self.lora_B[self.active_adapter].state_dict(),
}
return lora_params
# Helper function to bias the training towards either the target module or the entire model
# Path: machop/chop/models/manual/llama_llora/modeling_llama_llora.py
import math
import torch
import torch.utils.checkpoint
from typing import List, Optional, Tuple, Union
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.models.llama import LlamaTokenizer
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
SequenceClassifierOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_llama_llora import LlamaLoraConfig
from ..lora_modules import LoraLayer, LinearLora
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
LLAMA_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LlamaConfig`]):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
LLAMA_START_DOCSTRING,
)
class LlamaPreTrainedModel(PreTrainedModel):
config_class = LlamaLoraConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LlamaModel):
module.gradient_checkpointing = value
LLAMA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
information on the default strategy.
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.n_positions - 1]`.
[What are position IDs?](../glossary#position-ids)
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
| output_hidden_states (`bool`, *optional*): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: byeongjun-park/HarmonyView
# Path: ldm/base_utils.py
def read_pickle(pkl_path):
with open(pkl_path, 'rb') as f:
return pickle.load(f)
# Path: ldm/base_utils.py
def output_points(fn,pts,colors=None):
with open(fn, 'w') as f:
for pi, pt in enumerate(pts):
f.write(f'{pt[0]:.6f} {pt[1]:.6f} {pt[2]:.6f} ')
if colors is not None:
f.write(f'{int(colors[pi,0])} {int(colors[pi,1])} {int(colors[pi,2])}')
f.write('\n')
# Path: renderer/renderer.py
class NeuSRenderer(BaseRenderer):
def __init__(self, train_batch_num, test_batch_num, lambda_eikonal_loss=0.1, use_mask=True,
lambda_rgb_loss=1.0, lambda_mask_loss=0.0, rgb_loss='soft_l1', coarse_sn=64, fine_sn=64):
super().__init__(train_batch_num, test_batch_num)
self.n_samples = coarse_sn
self.n_importance = fine_sn
self.up_sample_steps = 4
self.anneal_end = 200
self.use_mask = use_mask
self.lambda_eikonal_loss = lambda_eikonal_loss
self.lambda_rgb_loss = lambda_rgb_loss
self.lambda_mask_loss = lambda_mask_loss
self.rgb_loss = rgb_loss
self.sdf_network = SDFNetwork(d_out=257, d_in=3, d_hidden=256, n_layers=8, skip_in=[4], multires=6, bias=0.5, scale=1.0, geometric_init=True, weight_norm=True)
self.color_network = RenderingNetwork(d_feature=256, d_in=9, d_out=3, d_hidden=256, n_layers=4, weight_norm=True, multires_view=4, squeeze_out=True)
self.default_dtype = torch.float32
self.deviation_network = SingleVarianceNetwork(0.3)
@torch.no_grad()
def get_vertex_colors(self, vertices):
"""
@param vertices: n,3
@return:
"""
V = vertices.shape[0]
bn = 20480
verts_colors = []
with torch.no_grad():
for vi in range(0, V, bn):
verts = torch.from_numpy(vertices[vi:vi+bn].astype(np.float32)).cuda()
feats = self.sdf_network(verts)[..., 1:]
gradients = self.sdf_network.gradient(verts) # ...,3
gradients = F.normalize(gradients, dim=-1)
colors = self.color_network(verts, gradients, gradients, feats)
colors = torch.clamp(colors,min=0,max=1).cpu().numpy()
verts_colors.append(colors)
verts_colors = (np.concatenate(verts_colors, 0)*255).astype(np.uint8)
return verts_colors
def upsample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s):
"""
Up sampling give a fixed inv_s
"""
device = rays_o.device
batch_size, n_samples = z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3
inner_mask = self.get_inner_mask(pts)
# radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)
inside_sphere = inner_mask[:, :-1] | inner_mask[:, 1:]
sdf = sdf.reshape(batch_size, n_samples)
prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
prev_cos_val = torch.cat([torch.zeros([batch_size, 1], dtype=self.default_dtype, device=device), cos_val[:, :-1]], dim=-1)
cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)
cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)
cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere
dist = (next_z_vals - prev_z_vals)
prev_esti_sdf = mid_sdf - cos_val * dist * 0.5
next_esti_sdf = mid_sdf + cos_val * dist * 0.5
prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)
next_cdf = torch.sigmoid(next_esti_sdf * inv_s)
alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
weights = alpha * torch.cumprod(
torch.cat([torch.ones([batch_size, 1], dtype=self.default_dtype, device=device), 1. - alpha + 1e-7], -1), -1)[:, :-1]
z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
return z_samples
def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False):
batch_size, n_samples = z_vals.shape
_, n_importance = new_z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]
z_vals = torch.cat([z_vals, new_z_vals], dim=-1)
z_vals, index = torch.sort(z_vals, dim=-1)
if not last:
device = pts.device
new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance)
sdf = torch.cat([sdf, new_sdf], dim=-1)
xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1).to(device)
index = index.reshape(-1)
sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)
return z_vals, sdf
def sample_depth(self, rays_o, rays_d, near, far, perturb):
n_samples = self.n_samples
n_importance = self.n_importance
up_sample_steps = self.up_sample_steps
device = rays_o.device
# sample points
batch_size = len(rays_o)
z_vals = torch.linspace(0.0, 1.0, n_samples, dtype=self.default_dtype, device=device) # sn
z_vals = near + (far - near) * z_vals[None, :] # rn,sn
if perturb > 0:
t_rand = (torch.rand([batch_size, 1]).to(device) - 0.5)
z_vals = z_vals + t_rand * 2.0 / n_samples
# Up sample
with torch.no_grad():
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
sdf = self.sdf_network.sdf(pts).reshape(batch_size, n_samples)
for i in range(up_sample_steps):
rn, sn = z_vals.shape
inv_s = torch.ones(rn, sn - 1, dtype=self.default_dtype, device=device) * 64 * 2 ** i
new_z_vals = self.upsample(rays_o, rays_d, z_vals, sdf, n_importance // up_sample_steps, inv_s)
z_vals, sdf = self.cat_z_vals(rays_o, rays_d, z_vals, new_z_vals, sdf, last=(i + 1 == up_sample_steps))
return z_vals
def compute_sdf_alpha(self, points, dists, dirs, cos_anneal_ratio, step):
# points [...,3] dists [...] dirs[...,3]
sdf_nn_output = self.sdf_network(points)
sdf = sdf_nn_output[..., 0]
feature_vector = sdf_nn_output[..., 1:]
gradients = self.sdf_network.gradient(points) # ...,3
inv_s = self.deviation_network(points).clip(1e-6, 1e6) # ...,1
inv_s = inv_s[..., 0]
true_cos = (dirs * gradients).sum(-1) # [...]
iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +
F.relu(-true_cos) * cos_anneal_ratio) # always non-positive
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists * 0.5
estimated_prev_sdf = sdf - iter_cos * dists * 0.5
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0) # [...]
return alpha, gradients, feature_vector, inv_s, sdf
def get_anneal_val(self, step):
if self.anneal_end < 0:
return 1.0
else:
return np.min([1.0, step / self.anneal_end])
def get_inner_mask(self, points):
return torch.sum(torch.abs(points)<=DEFAULT_SIDE_LENGTH,-1)==3
def render_impl(self, ray_batch, is_train, step):
near, far = near_far_from_sphere(ray_batch['rays_o'], ray_batch['rays_d'])
rays_o, rays_d = ray_batch['rays_o'], ray_batch['rays_d']
z_vals = self.sample_depth(rays_o, rays_d, near, far, is_train)
batch_size, n_samples = z_vals.shape
# section length in original space
dists = z_vals[..., 1:] - z_vals[..., :-1] # rn,sn-1
dists = torch.cat([dists, dists[..., -1:]], -1) # rn,sn
mid_z_vals = z_vals + dists * 0.5
points = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * mid_z_vals.unsqueeze(-1) # rn, sn, 3
inner_mask = self.get_inner_mask(points)
dirs = rays_d.unsqueeze(-2).expand(batch_size, n_samples, 3)
dirs = F.normalize(dirs, dim=-1)
device = rays_o.device
alpha, sampled_color, gradient_error, normal = torch.zeros(batch_size, n_samples, dtype=self.default_dtype, device=device), \
torch.zeros(batch_size, n_samples, 3, dtype=self.default_dtype, device=device), \
torch.zeros([batch_size, n_samples], dtype=self.default_dtype, device=device), \
torch.zeros([batch_size, n_samples, 3], dtype=self.default_dtype, device=device)
if torch.sum(inner_mask) > 0:
cos_anneal_ratio = self.get_anneal_val(step) if is_train else 1.0
alpha[inner_mask], gradients, feature_vector, inv_s, sdf = self.compute_sdf_alpha(points[inner_mask], dists[inner_mask], dirs[inner_mask], cos_anneal_ratio, step)
sampled_color[inner_mask] = self.color_network(points[inner_mask], gradients, -dirs[inner_mask], feature_vector)
# Eikonal loss
gradient_error[inner_mask] = (torch.linalg.norm(gradients, ord=2, dim=-1) - 1.0) ** 2 # rn,sn
normal[inner_mask] = F.normalize(gradients, dim=-1)
weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1], dtype=self.default_dtype, device=device), 1. - alpha + 1e-7], -1), -1)[..., :-1] # rn,sn
mask = torch.sum(weights,dim=1).unsqueeze(-1) # rn,1
color = (sampled_color * weights[..., None]).sum(dim=1) + (1 - mask) # add white background
normal = (normal * weights[..., None]).sum(dim=1)
outputs = {
'rgb': color, # rn,3
'gradient_error': gradient_error, # rn,sn
'inner_mask': inner_mask, # rn,sn
'normal': normal, # rn,3
'mask': mask, # rn,1
}
return outputs
def render_with_loss(self, ray_batch, is_train, step):
render_outputs = self.render(ray_batch, is_train, step)
rgb_gt = ray_batch['rgb']
rgb_pr = render_outputs['rgb']
if self.rgb_loss == 'soft_l1':
epsilon = 0.001
rgb_loss = torch.sqrt(torch.sum((rgb_gt - rgb_pr) ** 2, dim=-1) + epsilon)
elif self.rgb_loss =='mse':
rgb_loss = F.mse_loss(rgb_pr, rgb_gt, reduction='none')
else:
raise NotImplementedError
rgb_loss = torch.mean(rgb_loss)
eikonal_loss = torch.sum(render_outputs['gradient_error'] * render_outputs['inner_mask']) / torch.sum(render_outputs['inner_mask'] + 1e-5)
loss = rgb_loss * self.lambda_rgb_loss + eikonal_loss * self.lambda_eikonal_loss
loss_batch = {
'eikonal': eikonal_loss,
'rendering': rgb_loss,
# 'mask': mask_loss,
}
if self.lambda_mask_loss>0 and self.use_mask:
mask_loss = F.mse_loss(render_outputs['mask'], ray_batch['mask'], reduction='none').mean()
loss += mask_loss * self.lambda_mask_loss
loss_batch['mask'] = mask_loss
return loss, loss_batch
# Path: renderer/renderer.py
DEFAULT_SIDE_LENGTH = 0.6
# Path: ldm/util.py
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# Path: train_renderer.py
import argparse
import imageio
import numpy as np
import torch
import torch.nn.functional as F
import trimesh
import mcubes
from pathlib import Path
from omegaconf import OmegaConf
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor, Callback
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning import Trainer
from skimage.io import imsave
from tqdm import tqdm
from ldm.base_utils import read_pickle, output_points
from renderer.renderer import NeuSRenderer, DEFAULT_SIDE_LENGTH
from ldm.util import instantiate_from_config
class ResumeCallBacks(Callback):
def __init__(self):
pass
def on_train_start(self, trainer, pl_module):
pl_module.optimizers().param_groups = pl_module.optimizers()._optimizer.param_groups
def render_images(model, output,):
# render from model
n = 180
azimuths = (np.arange(n) / n * np.pi * 2).astype(np.float32)
elevations = np.deg2rad(np.asarray([30] * n).astype(np.float32))
K, _, _, _, poses = read_pickle(f'meta_info/camera-16.pkl')
h, w = 256, 256
default_size = 256
K = np.diag([w/default_size,h/default_size,1.0]) @ K
imgs = []
for ni in tqdm(range(n)):
# R = euler2mat(azimuths[ni], elevations[ni], 0, 'szyx')
# R = np.asarray([[0,-1,0],[0,0,-1],[1,0,0]]) @ R
e, a = elevations[ni], azimuths[ni]
row1 = np.asarray([np.sin(e)*np.cos(a),np.sin(e)*np.sin(a),-np.cos(e)])
row0 = np.asarray([-np.sin(a),np.cos(a), 0])
row2 = np.cross(row0, row1)
R = np.stack([row0,row1,row2],0)
t = np.asarray([0,0,1.5])
pose = np.concatenate([R,t[:,None]],1)
pose_ = torch.from_numpy(pose.astype(np.float32)).unsqueeze(0)
K_ = torch.from_numpy(K.astype(np.float32)).unsqueeze(0) # [1,3,3]
coords = torch.stack(torch.meshgrid(torch.arange(h), torch.arange(w)), -1)[:, :, (1, 0)] # h,w,2
coords = coords.float()[None, :, :, :].repeat(1, 1, 1, 1) # imn,h,w,2
coords = coords.reshape(1, h * w, 2)
coords = torch.cat([coords, torch.ones(1, h * w, 1, dtype=torch.float32)], 2) # imn,h*w,3
# imn,h*w,3 @ imn,3,3 => imn,h*w,3
rays_d = coords @ torch.inverse(K_).permute(0, 2, 1)
R, t = pose_[:, :, :3], pose_[:, :, 3:]
rays_d = rays_d @ R
rays_d = F.normalize(rays_d, dim=-1)
rays_o = -R.permute(0, 2, 1) @ t # imn,3,3 @ imn,3,1
rays_o = rays_o.permute(0, 2, 1).repeat(1, h * w, 1) # imn,h*w,3
ray_batch = {
'rays_o': rays_o.reshape(-1,3).cuda(),
'rays_d': rays_d.reshape(-1,3).cuda(),
}
with torch.no_grad():
| image = model.renderer.render(ray_batch,False,5000)['rgb'].reshape(h,w,3) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OPPOMKLab/u-LLaVA
# Path: models/GroundingDINO/groundingdino/util/misc.py
def inverse_sigmoid(x, eps=1e-3):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py
class BiAttentionBlock(nn.Module):
def __init__(
self,
v_dim,
l_dim,
embed_dim,
num_heads,
dropout=0.1,
drop_path=0.0,
init_values=1e-4,
cfg=None,
):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super(BiAttentionBlock, self).__init__()
# pre layer norm
self.layer_norm_v = nn.LayerNorm(v_dim)
self.layer_norm_l = nn.LayerNorm(l_dim)
self.attn = BiMultiHeadAttention(
v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout
)
# add layer scale for training stability
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True)
self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True)
def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
v = self.layer_norm_v(v)
l = self.layer_norm_l(l)
delta_v, delta_l = self.attn(
v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l
)
# v, l = v + delta_v, l + delta_l
v = v + self.drop_path(self.gamma_v * delta_v)
l = l + self.drop_path(self.gamma_l * delta_l)
return v, l
# def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None)
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py
class MultiScaleDeformableAttention(nn.Module):
"""Multi-Scale Deformable Attention Module used in Deformable-DETR
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
<https://arxiv.org/pdf/2010.04159.pdf>`_.
Args:
embed_dim (int): The embedding dimension of Attention. Default: 256.
num_heads (int): The number of attention heads. Default: 8.
num_levels (int): The number of feature map used in Attention. Default: 4.
num_points (int): The number of sampling points for each query
in each head. Default: 4.
img2col_steps (int): The step used in image_to_column. Defualt: 64.
dropout (float): Dropout layer used in output. Default: 0.1.
batch_first (bool): if ``True``, then the input and output tensor will be
provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)`
"""
def __init__(
self,
embed_dim: int = 256,
num_heads: int = 8,
num_levels: int = 4,
num_points: int = 4,
img2col_step: int = 64,
batch_first: bool = False,
):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(
"embed_dim must be divisible by num_heads, but got {} and {}".format(
embed_dim, num_heads
)
)
head_dim = embed_dim // num_heads
self.batch_first = batch_first
if not _is_power_of_2(head_dim):
warnings.warn(
"""
You'd better set d_model in MSDeformAttn to make sure that
each dim of the attention head a power of 2, which is more efficient.
"""
)
self.im2col_step = img2col_step
self.embed_dim = embed_dim
self.num_heads = num_heads
self.num_levels = num_levels
self.num_points = num_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
self.init_weights()
def _reset_parameters(self):
return self.init_weights()
def init_weights(self):
"""
Default initialization for Parameters of Module.
"""
constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.num_heads, dtype=torch.float32) * (
2.0 * math.pi / self.num_heads
)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.num_heads, 1, 1, 2)
.repeat(1, self.num_levels, self.num_points, 1)
)
for i in range(self.num_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.0)
constant_(self.attention_weights.bias.data, 0.0)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.0)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.0)
def freeze_sampling_offsets(self):
print("Freeze sampling offsets")
self.sampling_offsets.weight.requires_grad = False
self.sampling_offsets.bias.requires_grad = False
def freeze_attention_weights(self):
print("Freeze attention weights")
self.attention_weights.weight.requires_grad = False
self.attention_weights.bias.requires_grad = False
def forward(
self,
query: torch.Tensor,
key: Optional[torch.Tensor] = None,
value: Optional[torch.Tensor] = None,
query_pos: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
reference_points: Optional[torch.Tensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
level_start_index: Optional[torch.Tensor] = None,
**kwargs
) -> torch.Tensor:
"""Forward Function of MultiScaleDeformableAttention
Args:
query (torch.Tensor): Query embeddings with shape
`(num_query, bs, embed_dim)`
key (torch.Tensor): Key embeddings with shape
`(num_key, bs, embed_dim)`
value (torch.Tensor): Value embeddings with shape
`(num_key, bs, embed_dim)`
query_pos (torch.Tensor): The position embedding for `query`. Default: None.
key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`,
indicating which elements within `key` to be ignored in attention.
reference_points (torch.Tensor): The normalized reference points
with shape `(bs, num_query, num_levels, 2)`,
all elements is range in [0, 1], top-left (0, 0),
bottom-right (1, 1), including padding are.
or `(N, Length_{query}, num_levels, 4)`, add additional
two dimensions `(h, w)` to form reference boxes.
spatial_shapes (torch.Tensor): Spatial shape of features in different levels.
With shape `(num_levels, 2)`, last dimension represents `(h, w)`.
level_start_index (torch.Tensor): The start index of each level. A tensor with
shape `(num_levels, )` which can be represented as
`[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`.
Returns:
torch.Tensor: forward results with shape `(num_query, bs, embed_dim)`
"""
if value is None:
value = query
if query_pos is not None:
query = query + query_pos
if not self.batch_first:
# change to (bs, num_query ,embed_dims)
query = query.permute(1, 0, 2)
value = value.permute(1, 0, 2)
bs, num_query, _ = query.shape
bs, num_value, _ = value.shape
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
value = self.value_proj(value)
if key_padding_mask is not None:
value = value.masked_fill(key_padding_mask[..., None], float(0))
value = value.view(bs, num_value, self.num_heads, -1)
sampling_offsets = self.sampling_offsets(query).view(
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
)
attention_weights = self.attention_weights(query).view(
bs, num_query, self.num_heads, self.num_levels * self.num_points
)
attention_weights = attention_weights.softmax(-1)
attention_weights = attention_weights.view(
bs,
num_query,
self.num_heads,
self.num_levels,
self.num_points,
)
# bs, num_query, num_heads, num_levels, num_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets
/ self.num_points
* reference_points[:, :, None, :, None, 2:]
* 0.5
)
else:
raise ValueError(
"Last dim of reference_points must be 2 or 4, but get {} instead.".format(
reference_points.shape[-1]
)
)
if torch.cuda.is_available() and value.is_cuda:
halffloat = False
if value.dtype == torch.float16:
halffloat = True
value = value.float()
sampling_locations = sampling_locations.float()
attention_weights = attention_weights.float()
output = MultiScaleDeformableAttnFunction.apply(
value,
spatial_shapes,
level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
if halffloat:
output = output.half()
else:
output = multi_scale_deformable_attn_pytorch(
value, spatial_shapes, sampling_locations, attention_weights
)
output = self.output_proj(output)
if not self.batch_first:
output = output.permute(1, 0, 2)
return output
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/transformer_vanilla.py
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self.nhead = nhead
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
# repeat attn mask
if src_mask.dim() == 3 and src_mask.shape[0] == src.shape[1]:
# bs, num_q, num_k
src_mask = src_mask.repeat(self.nhead, 1, 1)
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask)[0]
# src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def _get_activation_fn(activation, d_model=256, batch_dim=0):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
if activation == "prelu":
return nn.PReLU()
if activation == "selu":
return F.selu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def _get_clones(module, N, layer_share=False):
# import ipdb; ipdb.set_trace()
if layer_share:
return nn.ModuleList([module for i in range(N)])
else:
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def gen_encoder_output_proposals(
memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None
):
"""
Input:
- memory: bs, \sum{hw}, d_model
- memory_padding_mask: bs, \sum{hw}
- spatial_shapes: nlevel, 2
- learnedwh: 2
Output:
- output_memory: bs, \sum{hw}, d_model
- output_proposals: bs, \sum{hw}, 4
"""
N_, S_, C_ = memory.shape
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
# import ipdb; ipdb.set_trace()
grid_y, grid_x = torch.meshgrid(
torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device),
)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
if learnedwh is not None:
# import ipdb; ipdb.set_trace()
wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl)
else:
wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
# scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)
# grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
# wh = torch.ones_like(grid) / scale
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += H_ * W_
# import ipdb; ipdb.set_trace()
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(
-1, keepdim=True
)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf"))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
# output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
# output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))
return output_memory, output_proposals
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def gen_sineembed_for_position(pos_tensor):
# n_query, bs, _ = pos_tensor.size()
# sineembed_tensor = torch.zeros(n_query, bs, 256)
scale = 2 * math.pi
dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.size(-1) == 2:
pos = torch.cat((pos_y, pos_x), dim=2)
elif pos_tensor.size(-1) == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
return pos
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py
def get_sine_pos_embed(
pos_tensor: torch.Tensor,
num_pos_feats: int = 128,
temperature: int = 10000,
exchange_xy: bool = True,
):
"""generate sine position embedding from a position tensor
Args:
pos_tensor (torch.Tensor): shape: [..., n].
num_pos_feats (int): projected shape for each float in the tensor.
temperature (int): temperature in the sine/cosine function.
exchange_xy (bool, optional): exchange pos x and pos y. \
For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True.
Returns:
pos_embed (torch.Tensor): shape: [..., n*num_pos_feats].
"""
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
def sine_func(x: torch.Tensor):
sin_x = x * scale / dim_t
sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
return sin_x
pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)]
if exchange_xy:
pos_res[0], pos_res[1] = pos_res[1], pos_res[0]
pos_res = torch.cat(pos_res, dim=-1)
return pos_res
# Path: models/GroundingDINO/groundingdino/models/GroundingDINO/transformer.py
from typing import Optional
from torch import Tensor, nn
from models.GroundingDINO.groundingdino.util.misc import inverse_sigmoid
from .fuse_modules import BiAttentionBlock
from .ms_deform_attn import MultiScaleDeformableAttention as MSDeformAttn
from .transformer_vanilla import TransformerEncoderLayer
from .utils import (
MLP,
_get_activation_fn,
_get_clones,
gen_encoder_output_proposals,
gen_sineembed_for_position,
get_sine_pos_embed,
)
import torch
import torch.utils.checkpoint as checkpoint
normalize_before=False,
return_intermediate_dec=False,
query_dim=4,
num_patterns=0,
# for deformable encoder
num_feature_levels=1,
enc_n_points=4,
dec_n_points=4,
# init query
learnable_tgt_init=False,
# two stage
two_stage_type="no", # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1']
embed_init_tgt=False,
# for text
use_text_enhancer=False,
use_fusion_layer=False,
use_checkpoint=False,
use_transformer_ckpt=False,
use_text_cross_attention=False,
text_dropout=0.1,
fusion_dropout=0.1,
fusion_droppath=0.0,
):
super().__init__()
self.num_feature_levels = num_feature_levels
self.num_encoder_layers = num_encoder_layers
self.num_unicoder_layers = num_unicoder_layers
self.num_decoder_layers = num_decoder_layers
self.num_queries = num_queries
assert query_dim == 4
# choose encoder layer type
encoder_layer = DeformableTransformerEncoderLayer(
d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points
)
if use_text_enhancer:
text_enhance_layer = TransformerEncoderLayer(
d_model=d_model,
nhead=nhead // 2,
dim_feedforward=dim_feedforward // 2,
dropout=text_dropout,
)
else:
text_enhance_layer = None
if use_fusion_layer:
feature_fusion_layer = BiAttentionBlock(
v_dim=d_model,
l_dim=d_model,
embed_dim=dim_feedforward // 2,
num_heads=nhead // 2,
dropout=fusion_dropout,
drop_path=fusion_droppath,
)
else:
feature_fusion_layer = None
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
assert encoder_norm is None
self.encoder = TransformerEncoder(
encoder_layer,
num_encoder_layers,
d_model=d_model,
num_queries=num_queries,
text_enhance_layer=text_enhance_layer,
feature_fusion_layer=feature_fusion_layer,
use_checkpoint=use_checkpoint,
use_transformer_ckpt=use_transformer_ckpt,
)
# choose decoder layer type
decoder_layer = DeformableTransformerDecoderLayer(
d_model,
dim_feedforward,
dropout,
activation,
num_feature_levels,
nhead,
dec_n_points,
use_text_cross_attention=use_text_cross_attention,
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=d_model,
query_dim=query_dim,
num_feature_levels=num_feature_levels,
)
self.d_model = d_model
self.nhead = nhead
self.dec_layers = num_decoder_layers
self.num_queries = num_queries # useful for single stage model only
self.num_patterns = num_patterns
if not isinstance(num_patterns, int):
Warning("num_patterns should be int but {}".format(type(num_patterns)))
self.num_patterns = 0
if num_feature_levels > 1:
if self.num_encoder_layers > 0:
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
else:
self.level_embed = None
self.learnable_tgt_init = learnable_tgt_init
assert learnable_tgt_init, "why not learnable_tgt_init"
self.embed_init_tgt = embed_init_tgt
if (two_stage_type != "no" and embed_init_tgt) or (two_stage_type == "no"):
self.tgt_embed = nn.Embedding(self.num_queries, d_model)
nn.init.normal_(self.tgt_embed.weight.data)
else:
self.tgt_embed = None
# for two stage
self.two_stage_type = two_stage_type
| assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chinhsuanwu/ifusion
# Path: dataset/finetune.py
class FinetuneIterableDataset(IterableDataset, FinetuneDataset):
def __init__(self, transform_fp):
super().__init__(transform_fp)
def __iter__(self):
while True:
index = torch.randint(0, len(self.perm), size=(1,)).item()
index_target, index_cond = (
self.perm[index, 0].item(),
self.perm[index, 1].item(),
)
yield {
"image_target": self.all_images[index_target],
"image_cond": self.all_images[index_cond],
"T": self.get_trans(self.all_camtoworlds[index_target], self.all_camtoworlds[index_cond], in_T=True),
}
# Path: dataset/inference.py
class MultiImageInferenceDataset(Dataset, BaseDataset):
def __init__(
self,
transform_fp,
n_views: int,
theta: int,
radius: float,
):
self.setup(transform_fp)
self.infer_camtoworlds = make_circular_poses(n_views, theta, radius)
def __len__(self):
return len(self.infer_camtoworlds)
def __getitem__(self, index):
target_camtoworld = self.infer_camtoworlds[index]
latlon = torch.stack(
[
self.get_trans(target_camtoworld, self.all_camtoworlds[i], in_T=False)
for i in range(len(self.all_camtoworlds))
]
)
return {
"image_cond": self.all_images,
"theta": latlon[:, 0],
"azimuth": latlon[:, 1],
"distance": latlon[:, 2],
}
def loader(self, batch_size=1, num_workers=8, **kwargs):
return DataLoader(
self,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
sampler=None,
**kwargs,
)
# Path: dataset/inference.py
class SingleImageInferenceDataset(Dataset, BaseDataset):
def __init__(
self,
image_fp: str = None,
transform_fp: str = None,
n_views: int = 8,
theta: int = -20,
radius: float = 1.0,
default_latlon: List[float] = [0, 0, 1],
):
if image_fp:
self.image = load_image(image_fp, device="cpu").squeeze(0)
self.camtoworld = latlon2mat(torch.tensor([default_latlon]))
elif transform_fp:
self.setup(transform_fp)
self.image, self.camtoworld = self.all_images[0], self.all_camtoworlds[0]
else:
raise ValueError("Either image_fp or transform_fp must be provided.")
self.infer_camtoworlds = make_circular_poses(n_views, theta, radius)
def __len__(self):
return len(self.infer_camtoworlds)
def __getitem__(self, index):
target_camtoworld = self.infer_camtoworlds[index]
latlon = self.get_trans(target_camtoworld, self.camtoworld, in_T=False)
return {
"image_cond": self.image,
"theta": latlon[0],
"azimuth": latlon[1],
"distance": latlon[2],
}
def loader(self, batch_size=1, num_workers=8, **kwargs):
return DataLoader(
self,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
sampler=None,
**kwargs,
)
# Path: util/pose.py
def latlon2mat(latlon, in_deg=True, default_radius=1.0):
if latlon.shape[-1] == 2:
radius = torch.ones_like(latlon[:, 0]) * default_radius
latlon = torch.cat((latlon, radius.unsqueeze(1)), dim=1)
if in_deg:
latlon[:, :2] = latlon[:, :2].deg2rad()
mv = [
translate(0, 0, -radius) @ rotate_x(theta) @ rotate_y(-azimuth)
for theta, azimuth, radius in latlon
]
c2w = torch.linalg.inv(torch.stack(mv))
return c2w
# Path: util/pose.py
def make_T(theta, azimuth, distance, in_deg=False):
if in_deg:
theta, azimuth = theta.deg2rad(), azimuth.deg2rad()
return torch.stack(
(
theta,
torch.sin(azimuth),
torch.cos(azimuth),
distance,
)
)
# Path: util/pose.py
def mat2latlon(T, in_deg=False, return_radius=False):
if len(T.shape) == 2:
T = T.unsqueeze(0)
xyz = T[:, :3, 3]
radius = torch.norm(xyz, dim=1, keepdim=True)
xyz = xyz / radius
theta = -torch.asin(xyz[:, 1])
azimuth = torch.atan2(xyz[:, 0], xyz[:, 2])
if in_deg:
theta, azimuth = theta.rad2deg(), azimuth.rad2deg()
if return_radius:
return torch.stack((theta, azimuth, radius.squeeze(0))).T
return torch.stack((theta, azimuth)).T
# Path: util/util.py
def load_image(fp, to_clip=True, verbose=True, device="cuda"):
if verbose:
print(f"[INFO] Loading image {fp}")
image = np.array(Image.open(fp))
if image.shape[-1] == 4:
image[image[..., -1] < 128] = [255] * 4
image = image[..., :3]
image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)
image = image.astype(np.float32) / 255.0
image = torch.from_numpy(image).contiguous().to(device)
image = image.permute(2, 0, 1).unsqueeze(0)
if to_clip:
image = image * 2 - 1
return image
# Path: util/util.py
def parse_optimizer(config, params):
optim = getattr(torch.optim, config.name)(params, **config.args)
return optim
# Path: util/util.py
def parse_scheduler(config, optim):
scheduler = getattr(torch.optim.lr_scheduler, config.name)(optim, **config.args)
return scheduler
# Path: util/viz.py
def plot_image(*xs, normalize=False, fp="out.png"):
# x: [B, 3, H, W], [3, H, W], [1, H, W] or [H, W] torch.Tensor
# [B, H, W, 3], [H, W, 3], [H, W, 1] or [H, W] numpy.ndarray
def _plot_image(image):
if isinstance(image, torch.Tensor):
if len(image.shape) == 3:
if image.shape[0] == 1 or image.shape[0] == 3 or image.shape[0] == 4:
image = image.permute(1, 2, 0).squeeze()
image = image.detach().cpu().numpy()
image = image.astype(np.float32)
# normalize
if normalize:
image = (image - image.min(axis=0, keepdims=True)) / (
image.max(axis=0, keepdims=True)
- image.min(axis=0, keepdims=True)
+ 1e-8
)
if image.max() <= 1:
image *= 255
Image.fromarray(image.astype(np.uint8)).save(fp)
for x in xs:
if len(x.shape) == 4:
for i in range(x.shape[0]):
_plot_image(x[i])
else:
_plot_image(x)
# Path: ifusion.py
import json
import numpy as np
import torch
from glob import glob
from einops import rearrange
from liegroups.torch import SE3
from tqdm import trange
from dataset.finetune import FinetuneIterableDataset
from dataset.inference import MultiImageInferenceDataset, SingleImageInferenceDataset
from util.pose import latlon2mat, make_T, mat2latlon
from util.typing import *
from util.util import load_image, parse_optimizer, parse_scheduler
from util.viz import plot_image
results = torch.tensor(results)
best_idx = torch.argmin(results[:, 0])
pred_pose = results[best_idx][1:]
print(
f"[INFO] Best pose: theta: {pred_pose[0]:.2f}, azimuth: {pred_pose[1]:.2f}, distance: {pred_pose[2]:.2f}"
)
return pred_pose
def optimize_pose(
model,
image_dir: str,
transform_fp: str,
demo_fp: str,
default_latlon: List[float] = [0, 0, 1],
**kwargs,
):
image_fps = sorted(glob(image_dir + "/*.png") + glob(image_dir + "/*.jpg"))
image_fps = [fp for fp in image_fps if fp != demo_fp]
# FIXME: always pick the first image as reference
ref_image = load_image(image_fps[0])
qry_images = [load_image(image_fps[i]) for i in range(1, len(image_fps))]
out_dict = {"camera_angle_x": np.deg2rad(49.1), "frames": []}
out_dict["frames"].append(
{
"file_path": image_fps[0].replace(image_dir + "/", ""),
"transform_matrix": latlon2mat(torch.tensor([default_latlon])).tolist(),
"latlon": list(default_latlon),
}
)
for qry_fp, qry_image in zip(image_fps[1:], qry_images):
assert ref_image.shape == qry_image.shape
pose = optimize_pose_pair(
model=model, ref_image=ref_image, qry_image=qry_image, **kwargs
)
pose = np.add(default_latlon, pose.unsqueeze(0))
out_dict["frames"].append(
{
"file_path": qry_fp.replace(image_dir + "/", ""),
"transform_matrix": latlon2mat(pose.clone()).tolist(),
"latlon": pose.squeeze().tolist(),
}
)
# save poses to json
with open(transform_fp, "w") as f:
json.dump(out_dict, f, indent=4)
def finetune(
model,
transform_fp: str,
lora_ckpt_fp: str,
lora_rank: int,
lora_target_replace_module: List[str],
args,
):
model.inject_lora(
rank=lora_rank,
target_replace_module=lora_target_replace_module,
)
train_dataset = FinetuneIterableDataset(transform_fp)
train_loader = train_dataset.loader(args.batch_size)
optimizer = parse_optimizer(args.optimizer, model.require_grad_params)
scheduler = parse_scheduler(args.scheduler, optimizer)
train_loader = iter(train_loader)
with trange(args.max_step) as pbar:
for step in pbar:
optimizer.zero_grad()
batch = next(train_loader)
batch = {k: v.to(model.device) for k, v in batch.items()}
loss = model(batch)
pbar.set_description(f"step: {step}, loss: {loss.item():.4f}")
loss.backward()
optimizer.step()
scheduler.step()
model.save_lora(lora_ckpt_fp)
model.remove_lora()
def inference(
model,
transform_fp: str,
lora_ckpt_fp: str,
demo_fp: str,
lora_rank: int,
lora_target_replace_module: List[str],
use_multi_view_condition: bool,
n_views: int,
theta: float,
radius: float,
args,
):
if lora_ckpt_fp:
model.inject_lora(
ckpt_fp=lora_ckpt_fp,
rank=lora_rank,
target_replace_module=lora_target_replace_module,
)
if use_multi_view_condition:
test_dataset = MultiImageInferenceDataset
generate_fn = model.generate_from_tensor_multi_cond
else:
test_dataset = SingleImageInferenceDataset
generate_fn = model.generate_from_tensor
test_dataset = test_dataset(
transform_fp=transform_fp, n_views=n_views, theta=theta, radius=radius
)
| test_loader = test_dataset.loader(args.batch_size) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: wangzhecheng/SkyScript
# Path: src/open_clip/model.py
def get_cast_dtype(precision: str):
cast_dtype = None
if precision == 'bf16':
cast_dtype = torch.bfloat16
elif precision == 'fp16':
cast_dtype = torch.float16
return cast_dtype
# Path: src/open_clip/model.py
def trace_model(model, batch_size=256, device=torch.device('cpu')):
model.eval()
image_size = model.visual.image_size
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
model = torch.jit.trace_module(
model,
inputs=dict(
forward=(example_images, example_text),
encode_text=(example_text,),
encode_image=(example_images,)
))
model.visual.image_size = image_size
return model
# Path: src/open_clip/factory.py
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess_train, preprocess_val
# Path: src/training/zero_shot.py
def zero_shot_classifier(model, classnames, templates, args):
tokenizer = get_tokenizer(args.model)
with torch.no_grad():
zeroshot_weights = []
for classname in tqdm(classnames):
texts = [template(classname) for template in templates] # format with class
texts = tokenizer(texts).to(args.device) # tokenize
if args.distributed and not args.horovod:
class_embeddings = model.module.encode_text(texts)
else:
class_embeddings = model.encode_text(texts)
class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device)
return zeroshot_weights
# Path: src/training/logger.py
def setup_logging(log_file, level, include_host=False):
if include_host:
import socket
hostname = socket.gethostname()
formatter = logging.Formatter(
f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
else:
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
logging.root.setLevel(level)
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logging.root.addHandler(stream_handler)
if log_file:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setFormatter(formatter)
logging.root.addHandler(file_handler)
# Path: src/training/distributed.py
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args)
# Path: src/training/distributed.py
def init_distributed_device(args):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
args.distributed = False
args.world_size = 1
args.rank = 0 # global rank
args.local_rank = 0
if args.horovod:
assert hvd is not None, "Horovod is not installed"
hvd.init()
args.local_rank = int(hvd.local_rank())
args.rank = hvd.rank()
args.world_size = hvd.size()
args.distributed = True
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif is_using_distributed():
if 'SLURM_PROCID' in os.environ:
# DDP via SLURM
args.local_rank, args.rank, args.world_size = world_info_from_env()
# SLURM var -> torch.distributed vars in case needed
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
else:
# DDP via torchrun, torch.distributed.launch
args.local_rank, _, _ = world_info_from_env()
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url)
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
args.distributed = True
if torch.cuda.is_available():
if args.distributed and not args.no_set_device_rank:
device = 'cuda:%d' % args.local_rank
else:
device = 'cuda:0'
torch.cuda.set_device(device)
else:
device = 'cpu'
args.device = device
device = torch.device(device)
return device
# Path: src/training/distributed.py
def broadcast_object(args, obj, src=0):
# broadcast a pickle-able python object from rank-0 to all ranks
if args.horovod:
return hvd.broadcast_object(obj, root_rank=src)
else:
if args.rank == src:
objects = [obj]
else:
objects = [None]
dist.broadcast_object_list(objects, src=src)
return objects[0]
# Path: src/training/precision.py
def get_autocast(precision):
if precision == 'amp':
return torch.cuda.amp.autocast
elif precision == 'amp_bfloat16' or precision == 'amp_bf16':
# amp_bfloat16 is more stable than amp float16 for clip training
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
else:
return suppress
# Path: params.py
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--root-data-dir",
type=str,
default=None,
help="Root directory to datasets",
)
parser.add_argument(
"--train-data",
type=str,
default=None,
help="Path to file(s) with training data. When using webdataset, multiple datasources can be combined using the `::` separator.",
)
parser.add_argument(
"--train-data-upsampling-factors",
type=str,
default=None,
help=(
"When using multiple data sources with webdataset and sampling with replacement, this can be used to upsample specific data sources. "
"Similar to --train-data, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) "
"By default, datapoints are sampled uniformly regardless of the dataset sizes."
)
)
parser.add_argument(
"--val-data",
type=str,
default=None,
help="Path to file(s) with validation data",
)
parser.add_argument(
"--train-num-samples",
type=int,
default=None,
help="Number of samples in dataset. Required for webdataset if not available in info file.",
)
parser.add_argument(
"--val-num-samples",
type=int,
default=None,
help="Number of samples in dataset. Useful for webdataset if not available in info file.",
)
parser.add_argument(
"--dataset-type",
choices=["webdataset", "csv", "synthetic", "auto"],
default="auto",
help="Which type of dataset to process."
)
parser.add_argument(
"--dataset-resampled",
default=False,
action="store_true",
help="Whether to use sampling with replacement for webdataset shard selection."
)
parser.add_argument(
"--csv-separator",
type=str,
default="\t",
help="For csv-like datasets, which separator to use."
)
parser.add_argument(
"--csv-img-key",
type=str,
default="filepath",
help="For csv-like datasets, the name of the key for the image paths."
)
parser.add_argument(
"--csv-caption-key",
type=str,
default="title",
help="For csv-like datasets, the name of the key for the captions."
)
parser.add_argument(
"--imagenet-val",
type=str,
default=None,
help="Path to imagenet val set for conducting zero shot evaluation.",
)
parser.add_argument(
"--imagenet-v2",
type=str,
default=None,
help="Path to imagenet v2 for conducting zero shot evaluation.",
)
parser.add_argument(
"--logs",
type=str,
default="./logs/",
help="Where to store tensorboard logs. Use None to avoid storing logs.",
)
parser.add_argument(
"--log-local",
action="store_true",
default=False,
help="log files on local master, otherwise global master only.",
)
parser.add_argument(
"--name",
type=str,
default=None,
help="Optional identifier for the experiment when storing logs. Otherwise use current time.",
)
parser.add_argument(
"--workers", type=int, default=1, help="Number of dataloader workers per GPU."
)
parser.add_argument(
"--batch-size", type=int, default=64, help="Batch size per GPU."
)
parser.add_argument(
"--epochs", type=int, default=32, help="Number of epochs to train for."
)
parser.add_argument(
"--epochs-cooldown", type=int, default=None,
help="When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards."
)
parser.add_argument("--lr", type=float, default=None, help="Learning rate.")
parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.")
parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.")
parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.")
parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.")
parser.add_argument(
"--warmup", type=int, default=10000, help="Number of steps to warmup for."
)
parser.add_argument(
"--use-bn-sync",
default=False,
action="store_true",
help="Whether to use batch norm sync.")
parser.add_argument(
"--skip-scheduler",
action="store_true",
default=False,
help="Use this flag to skip the learning rate decay.",
)
parser.add_argument(
"--lr-scheduler",
type=str,
default='cosine',
help="LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine",
)
parser.add_argument(
"--lr-cooldown-end", type=float, default=0.0,
help="End learning rate for cooldown schedule. Default: 0"
)
parser.add_argument(
"--lr-cooldown-power", type=float, default=1.0,
help="Power for polynomial cooldown schedule. Default: 1.0 (linear decay)"
)
parser.add_argument(
"--save-frequency", type=int, default=1, help="How often to save checkpoints."
)
parser.add_argument(
"--save-most-recent",
action="store_true",
default=False,
help="Always save the most recent model trained to epoch_latest.pt.",
)
parser.add_argument(
"--zeroshot-frequency", type=int, default=2, help="How often to run zero shot."
)
parser.add_argument(
"--val-frequency", type=int, default=1, help="How often to run evaluation with val data."
)
parser.add_argument(
"--resume",
default=None,
type=str,
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--precision",
choices=["amp", "amp_bf16", "amp_bfloat16", "bf16", "fp16", "fp32"],
default="amp",
help="Floating point precision."
)
parser.add_argument(
"--model",
type=str,
default="RN50",
help="Name of the vision backbone to use.",
)
parser.add_argument(
"--pretrained",
default='',
type=str,
help="Use a pretrained CLIP model weights with the specified tag or file path.",
)
parser.add_argument(
"--pretrained-image",
default=False,
action='store_true',
help="Load imagenet pretrained weights for image tower backbone if available.",
)
parser.add_argument(
"--lock-image",
default=False,
action='store_true',
help="Lock full image tower by disabling gradients.",
)
parser.add_argument(
"--lock-image-unlocked-groups",
type=int,
default=0,
help="Leave last n image tower layer groups unlocked.",
)
parser.add_argument(
"--lock-image-freeze-bn-stats",
default=False,
action='store_true',
help="Freeze BatchNorm running stats in image tower for any locked layers.",
)
parser.add_argument(
'--image-mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override default image mean value of dataset')
parser.add_argument(
'--image-std', type=float, nargs='+', default=None, metavar='STD',
help='Override default image std deviation of of dataset')
parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)
parser.add_argument(
"--grad-checkpointing",
default=False,
action='store_true',
help="Enable gradient checkpointing.",
)
parser.add_argument(
"--local-loss",
default=False,
action="store_true",
help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)"
)
parser.add_argument(
"--gather-with-grad",
default=False,
action="store_true",
help="enable full distributed gradient for feature gather"
)
parser.add_argument(
'--force-image-size', type=int, nargs='+', default=None,
help='Override default image size'
)
parser.add_argument(
"--force-quick-gelu",
default=False,
action='store_true',
help="Force use of QuickGELU activation for non-OpenAI transformer models.",
)
parser.add_argument(
"--force-patch-dropout",
default=None,
type=float,
help="Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper",
)
parser.add_argument(
"--force-custom-text",
default=False,
action='store_true',
help="Force use of CustomTextCLIP model (separate text-tower).",
)
parser.add_argument(
"--torchscript",
default=False,
action='store_true',
help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'",
)
parser.add_argument(
"--trace",
default=False,
action='store_true',
help="torch.jit.trace the model for inference / eval only",
)
parser.add_argument(
"--accum-freq", type=int, default=1, help="Update the model every --acum-freq steps."
)
# arguments for distributed training
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--report-to",
default='',
type=str,
help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']"
)
parser.add_argument(
"--wandb-notes",
default='',
type=str,
help="Notes if logging with wandb"
)
parser.add_argument(
"--wandb-project-name",
type=str,
default='open-clip',
help="Name of the project if logging with wandb.",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="If true, more information is logged."
)
parser.add_argument(
"--copy-codebase",
default=False,
action="store_true",
help="If true, we copy the entire base on the log directory, and execute from there."
)
parser.add_argument(
"--horovod",
default=False,
action="store_true",
help="Use horovod for distributed training."
)
parser.add_argument(
"--ddp-static-graph",
default=False,
action='store_true',
help="Enable static graph optimization for DDP in PyTorch >= 1.11.",
)
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc)."
)
parser.add_argument(
"--seed", type=int, default=0, help="Default random seed."
)
parser.add_argument(
"--grad-clip-norm", type=float, default=None, help="Gradient clip."
)
parser.add_argument(
"--lock-text",
default=False,
action='store_true',
help="Lock full text tower by disabling gradients.",
)
parser.add_argument(
"--lock-text-unlocked-layers",
type=int,
default=0,
help="Leave last n image tower layer groups unlocked.",
)
parser.add_argument(
"--lock-text-freeze-layer-norm",
default=False,
action='store_true',
help="Freeze BatchNorm running stats in image tower for any locked layers.",
)
parser.add_argument(
"--log-every-n-steps",
type=int,
default=100,
help="Log every n steps to tensorboard/console/wandb.",
)
parser.add_argument(
"--coca-caption-loss-weight",
type=float,
default=2.0,
help="Weight assigned to caption loss in CoCa."
)
parser.add_argument(
"--coca-contrastive-loss-weight",
type=float,
default=1.0,
help="Weight assigned to contrastive loss when training CoCa."
)
parser.add_argument(
"--remote-sync",
type=str,
default=None,
help="Optinoally sync with a remote path specified by this arg",
)
parser.add_argument(
"--remote-sync-frequency",
type=int,
default=300,
help="How frequently to sync to a remote directly if --remote-sync is not None.",
)
parser.add_argument(
"--remote-sync-protocol",
choices=["s3", "fsspec"],
default="s3",
help="How to do the remote sync backup if --remote-sync is not None.",
)
parser.add_argument(
"--delete-previous-checkpoint",
default=False,
action="store_true",
help="If true, delete previous checkpoint after storing a new one."
)
parser.add_argument(
"--distill-model",
default=None,
help='Which model arch to distill from, if any.'
)
parser.add_argument(
"--distill-pretrained",
default=None,
help='Which pre-trained weights to distill from, if any.'
)
# newly added flag for adding random rotation into data augmentation
parser.add_argument(
"--random-rotation",
action="store_true",
default=False,
help="If True, add random rotation into image transform for data augmentation (only for training)."
)
# newly added for testing zero-shot and linear probe classification (custom dataset)
parser.add_argument(
"--datasets-for-testing",
nargs='*',
type=str,
default=None,
help="A list of names of datasets for testing zero-shot classification testing",
)
parser.add_argument(
"--classification-mode",
type=str,
default="multiclass",
help="Choose either binary or multiclass",
)
parser.add_argument(
"--test-data",
type=str,
default=None,
help="Path to file(s) with test data (e.g., for testing zero-shot classification)",
)
parser.add_argument(
"--classnames",
type=str,
default=None,
help="Path to txt file containing class names",
)
parser.add_argument(
"--test-data-name",
type=str,
default=None,
help="The name of the test data (e.g., RSICD, EuroSat)",
)
parser.add_argument(
"--csv-class-key",
type=str,
default="label",
help="For csv-like datasets, the name of the key for image labels (for classification)."
)
parser.add_argument(
"--csv-actual-label-key",
type=str,
default="binary",
help="If classification_model=binary, then specify the name of the key for actual binary labels (i.e., 0/1)."
)
parser.add_argument(
"--alpha",
type=float,
default=None,
help="The regularization multiplier of logistic regression to try for linear probing. If None, do a search."
)
parser.add_argument(
"--samples-per-class",
type=str,
default=None,
help="Numbers of samples per class to train logistic regression for linear probing. If None, use full dataset."
)
parser.add_argument(
"--test-result-save-path",
type=str,
default=None,
help="The path to save test results as a pickle file."
)
parser.add_argument(
"--debugging",
action="store_true",
default=False,
help="Whether to use debugging mode, which will return more information."
)
args = parser.parse_args(args)
# If some params are not passed, we use the default values based on model name.
default_params = get_default_params(args.model)
for name, val in default_params.items():
if getattr(args, name) is None:
setattr(args, name, val)
return args
# Path: prompt_templates.py
# Path: benchmark_dataset_info.py
BENCHMARK_DATASET_INFOMATION = {
'aid': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/aid/aid_img_txt_pairs_test.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/aid/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
'eurosat': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/eurosat/eurosat_img_txt_pairs_test.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/eurosat/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
'fmow': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/fmow/fmow_img_txt_pairs_val.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/fmow/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
'nwpu': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/nwpu/img_txt_pairs_train.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/nwpu/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
'patternnet': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/patternnet/img_txt_pairs_train.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/patternnet/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
'SkyScript_cls': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/SkyScript_cls/img_txt_pairs_val.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/SkyScript_cls/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
'millionaid': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/millionaid/img_txt_pairs_train.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/millionaid/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
'rsicb': {
'classification_mode': 'multiclass',
'test_data': BENCHMARK_DATASET_ROOT_DIR + '/rsicb256/img_txt_pairs_train.csv',
'classnames': BENCHMARK_DATASET_ROOT_DIR + '/rsicb256/classnames.txt',
'csv_separator': ',',
'csv_img_key': 'filepath',
'csv_class_key': 'label',
},
}
# Path: test_zero_shot_classification.py
import torch
import numpy as np
import os
import sys
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import torch.nn.functional as F
import random
from PIL import Image
from os.path import join, exists
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from src.open_clip.model import get_cast_dtype, trace_model
from src.open_clip.factory import create_model_and_transforms
from src.training.zero_shot import zero_shot_classifier
from src.training.logger import setup_logging
from src.training.distributed import is_master, init_distributed_device, broadcast_object
from src.training.precision import get_autocast
from params import parse_args
from prompt_templates import template_dict
from benchmark_dataset_info import BENCHMARK_DATASET_INFOMATION
Image.MAX_IMAGE_PIXELS = 1000000000
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
| random.seed(seed + rank) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JarodMica/ai-voice-cloning
# Path: modules/rvc/infer/modules/uvr5/modules.py
def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):
infos = []
try:
inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
save_root_vocal = (
save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
)
save_root_ins = (
save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
)
if model_name == "onnx_dereverb_By_FoxJoy":
pre_fun = MDXNetDereverb(15, config.device)
else:
func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho
pre_fun = func(
agg=int(agg),
model_path=os.path.join(
os.getenv("weight_uvr5_root"), model_name + ".pth"
),
device=config.device,
is_half=config.is_half,
)
if inp_root != "":
paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]
else:
paths = [path.name for path in paths]
for path in paths:
inp_path = os.path.join(inp_root, path)
need_reformat = 1
done = 0
try:
info = ffmpeg.probe(inp_path, cmd="ffprobe")
if (
info["streams"][0]["channels"] == 2
and info["streams"][0]["sample_rate"] == "44100"
):
need_reformat = 0
pre_fun._path_audio_(
inp_path, save_root_ins, save_root_vocal, format0
)
done = 1
except:
need_reformat = 1
traceback.print_exc()
if need_reformat == 1:
tmp_path = "%s/%s.reformatted.wav" % (
os.path.join(os.environ["TEMP"]),
os.path.basename(inp_path),
)
os.system(
"ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y"
% (inp_path, tmp_path)
)
inp_path = tmp_path
try:
if done == 0:
pre_fun.path_audio(
inp_path, save_root_ins, save_root_vocal, format0
)
infos.append("%s->Success" % (os.path.basename(inp_path)))
yield "\n".join(infos)
except:
try:
if done == 0:
pre_fun._path_audio_(
inp_path, save_root_ins, save_root_vocal, format0
)
infos.append("%s->Success" % (os.path.basename(inp_path)))
yield "\n".join(infos)
except:
infos.append(
"%s->%s" % (os.path.basename(inp_path), traceback.format_exc())
)
yield "\n".join(infos)
except:
infos.append(traceback.format_exc())
yield "\n".join(infos)
finally:
try:
if model_name == "onnx_dereverb_By_FoxJoy":
del pre_fun.pred.model
del pre_fun.pred.model_
else:
del pre_fun.model
del pre_fun
except:
traceback.print_exc()
if torch.cuda.is_available():
torch.cuda.empty_cache()
logger.info("Executed torch.cuda.empty_cache()")
yield "\n".join(infos)
# Path: modules/rvc/infer/modules/vc/modules.py
class VC:
def __init__(self, config):
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self, sid, *to_return_protect):
logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (
self.net_g,
self.n_spk,
self.vc,
self.hubert_model,
self.tgt_sr,
) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"])
del self.net_g, self.cpt
if torch.cuda.is_available():
torch.cuda.empty_cache()
return (
{"visible": False, "__type__": "update"},
{
"visible": True,
"value": to_return_protect0,
"__type__": "update",
},
{
"visible": True,
"value": to_return_protect1,
"__type__": "update",
},
"",
"",
)
# person = f'{os.getenv("weight_root")}/{sid}'
person = os.path.join(os.getcwd(), sid)
logger.info(f"Loading: {person}")
self.cpt = torch.load(person, map_location="cpu", weights_only=True)
self.tgt_sr = self.cpt["config"][-1]
self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
synthesizer_class = {
("v1", 1): SynthesizerTrnMs256NSFsid,
("v1", 0): SynthesizerTrnMs256NSFsid_nono,
("v2", 1): SynthesizerTrnMs768NSFsid,
("v2", 0): SynthesizerTrnMs768NSFsid_nono,
}
self.net_g = synthesizer_class.get(
(self.version, self.if_f0), SynthesizerTrnMs256NSFsid
)(*self.cpt["config"], is_half=self.config.is_half)
del self.net_g.enc_q
self.net_g.load_state_dict(self.cpt["weight"], strict=False)
self.net_g.eval().to(self.config.device)
if self.config.is_half:
self.net_g = self.net_g.half()
else:
self.net_g = self.net_g.float()
self.pipeline = Pipeline(self.tgt_sr, self.config)
n_spk = self.cpt["config"][-3]
index = {"value": get_index_path_from_model(sid), "__type__": "update"}
logger.info("Select index: " + index["value"])
return (
(
{"visible": True, "maximum": n_spk, "__type__": "update"},
to_return_protect0,
to_return_protect1,
index,
index,
)
if to_return_protect
else {"visible": True, "maximum": n_spk, "__type__": "update", "tgt_sr" : self.tgt_sr}
)
def vc_single(
self,
sid,
input_audio_path,
f0_up_key,
f0_file,
f0_method,
file_index,
file_index2,
index_rate,
filter_radius,
resample_sr,
rms_mix_rate,
protect,
):
if input_audio_path is None:
return "You need to upload an audio", None
f0_up_key = int(f0_up_key)
try:
audio = load_audio(input_audio_path, 16000)
audio_max = np.abs(audio).max() / 0.95
if audio_max > 1:
audio /= audio_max
times = [0, 0, 0]
if self.hubert_model is None:
self.hubert_model = load_hubert(self.config)
file_index = (
(
file_index.strip(" ")
.strip('"')
.strip("\n")
.strip('"')
.strip(" ")
.replace("trained", "added")
)
if file_index != ""
else file_index2
) # 防止小白写错,自动帮他替换掉
audio_opt = self.pipeline.pipeline(
self.hubert_model,
self.net_g,
sid,
audio,
input_audio_path,
times,
f0_up_key,
f0_method,
file_index,
index_rate,
self.if_f0,
filter_radius,
self.tgt_sr,
resample_sr,
rms_mix_rate,
self.version,
protect,
f0_file,
)
if self.tgt_sr != resample_sr >= 16000:
tgt_sr = resample_sr
else:
tgt_sr = self.tgt_sr
index_info = (
"Index:\n%s." % file_index
if os.path.exists(file_index)
else "Index not used."
)
return (
"Success.\n%s\nTime:\nnpy: %.2fs, f0: %.2fs, infer: %.2fs."
% (index_info, *times),
(tgt_sr, audio_opt),
)
except:
info = traceback.format_exc()
logger.warning(info)
return info, (None, None)
def vc_multi(
self,
sid,
dir_path,
opt_root,
paths,
f0_up_key,
f0_method,
file_index,
file_index2,
index_rate,
filter_radius,
resample_sr,
rms_mix_rate,
protect,
format1,
):
try:
dir_path = (
dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
) # 防止小白拷路径头尾带了空格和"和回车
opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
os.makedirs(opt_root, exist_ok=True)
try:
if dir_path != "":
paths = [
os.path.join(dir_path, name) for name in os.listdir(dir_path)
]
else:
paths = [path.name for path in paths]
except:
traceback.print_exc()
paths = [path.name for path in paths]
infos = []
for path in paths:
info, opt = self.vc_single(
sid,
path,
f0_up_key,
None,
f0_method,
file_index,
file_index2,
# file_big_npy,
index_rate,
filter_radius,
resample_sr,
rms_mix_rate,
protect,
)
if "Success" in info:
try:
tgt_sr, audio_opt = opt
if format1 in ["wav", "flac"]:
sf.write(
"%s/%s.%s"
% (opt_root, os.path.basename(path), format1),
audio_opt,
tgt_sr,
)
else:
path = "%s/%s.%s" % (
opt_root,
os.path.basename(path),
format1,
)
with BytesIO() as wavf:
sf.write(wavf, audio_opt, tgt_sr, format="wav")
wavf.seek(0, 0)
with open(path, "wb") as outf:
wav2(wavf, outf, format1)
except:
info += traceback.format_exc()
infos.append("%s->%s" % (os.path.basename(path), info))
yield "\n".join(infos)
yield "\n".join(infos)
except:
yield traceback.format_exc()
# Path: modules/rvc/infer-web.py
import os, sys
import logging
import shutil
import threading
import traceback
import warnings
import json
import pathlib
import fairseq
import faiss
import gradio as gr
import numpy as np
import torch
from random import shuffle
from subprocess import Popen
from time import sleep
from dotenv import load_dotenv
from sklearn.cluster import MiniBatchKMeans
from configs.config import Config
from i18n.i18n import I18nAuto
from modules.rvc.infer.lib.train.process_ckpt import (
change_info,
extract_small_model,
merge,
show_info,
)
from modules.rvc.infer.modules.uvr5.modules import uvr
from modules.rvc.infer.modules.vc.modules import VC
from modules.rvc.infer.modules.onnx.export import export_onnx as eo
now_dir = os.getcwd()
sys.path.append(now_dir)
logging.getLogger("numba").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
tmp = os.path.join(now_dir, "TEMP")
shutil.rmtree(tmp, ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True)
os.makedirs(tmp, exist_ok=True)
os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True)
os.environ["TEMP"] = tmp
warnings.filterwarnings("ignore")
torch.manual_seed(114514)
load_dotenv()
config = Config()
vc = VC(config)
if config.dml == True:
| def forward_dml(ctx, x, scale): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Lavreniuk/EVP
# Path: depth/models_depth/model.py
class EVPDepth(nn.Module):
def __init__(self, args=None, caption_aggregation=False):
super().__init__()
self.max_depth = args.max_depth
self.min_depth = args.min_depth_eval
embed_dim = 192
channels_in = embed_dim*8
channels_out = embed_dim
if args.dataset == 'nyudepthv2':
self.encoder = EVPDepthEncoder(out_dim=channels_in, dataset='nyu', caption_aggregation=caption_aggregation)
else:
self.encoder = EVPDepthEncoder(out_dim=channels_in, dataset='kitti', caption_aggregation=caption_aggregation)
self.decoder = Decoder(channels_in, channels_out, args)
self.decoder.init_weights()
self.mViT = False
self.custom = False
if not self.mViT and not self.custom:
n_bins = 64
bin_embedding_dim = 128
num_out_features = [32, 32, 32, 192]
min_temp = 0.0212
max_temp = 50
btlnck_features = 256
n_attractors = [16, 8, 4, 1]
attractor_alpha = 1000
attractor_gamma = 2
attractor_kind = "mean"
attractor_type = "inv"
self.bin_centers_type = "softplus"
self.bottle_neck = nn.Sequential(
nn.Conv2d(channels_in, btlnck_features, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=False),
nn.Conv2d(btlnck_features, btlnck_features, kernel_size=3, stride=1, padding=1))
for m in self.bottle_neck.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001, bias=0)
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayerUnnormed
self.seed_bin_regressor = SeedBinRegressorLayer(
btlnck_features, n_bins=n_bins, min_depth=self.min_depth, max_depth=self.max_depth)
self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
self.projectors = nn.ModuleList([
Projector(num_out, bin_embedding_dim)
for num_out in num_out_features
])
self.attractors = nn.ModuleList([
Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=self.min_depth, max_depth=self.max_depth,
alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type)
for i in range(len(num_out_features))
])
last_in = 192 + 1
self.conditional_log_binomial = ConditionalLogBinomial(
last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp)
elif self.mViT and not self.custom:
n_bins = 256
self.adaptive_bins_layer = mViT(192, n_query_channels=192, patch_size=16,
dim_out=n_bins,
embedding_dim=192, norm='linear')
self.conv_out = nn.Sequential(nn.Conv2d(192, n_bins, kernel_size=1, stride=1, padding=0),
nn.Softmax(dim=1))
def forward(self, x, class_ids=None, img_paths=None):
b, c, h, w = x.shape
x = x*2.0 - 1.0 # normalize to [-1, 1]
if h == 480 and w == 480:
new_x = torch.zeros(b, c, 512, 512, device=x.device)
new_x[:, :, 0:480, 0:480] = x
x = new_x
elif h==352 and w==352:
new_x = torch.zeros(b, c, 384, 384, device=x.device)
new_x[:, :, 0:352, 0:352] = x
x = new_x
elif h == 512 and w == 512:
pass
else:
print(h,w)
raise NotImplementedError
conv_feats = self.encoder(x, class_ids, img_paths)
if h == 480 or h == 352:
conv_feats = conv_feats[:, :, :-1, :-1]
self.decoder.remove_hooks()
out_depth, out, x_blocks = self.decoder([conv_feats])
if not self.mViT and not self.custom:
x = self.bottle_neck(conv_feats)
_, seed_b_centers = self.seed_bin_regressor(x)
if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
b_prev = (seed_b_centers - self.min_depth) / \
(self.max_depth - self.min_depth)
else:
b_prev = seed_b_centers
prev_b_embedding = self.seed_projector(x)
for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks):
b_embedding = projector(x)
b, b_centers = attractor(
b_embedding, b_prev, prev_b_embedding, interpolate=True)
b_prev = b.clone()
prev_b_embedding = b_embedding.clone()
rel_cond = torch.sigmoid(out_depth) * self.max_depth
# concat rel depth with last. First interpolate rel depth to last size
rel_cond = nn.functional.interpolate(
rel_cond, size=out.shape[2:], mode='bilinear', align_corners=True)
last = torch.cat([out, rel_cond], dim=1)
b_embedding = nn.functional.interpolate(
b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
x = self.conditional_log_binomial(last, b_embedding)
# Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
b_centers = nn.functional.interpolate(
b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
out_depth = torch.sum(x * b_centers, dim=1, keepdim=True)
elif self.mViT and not self.custom:
bin_widths_normed, range_attention_maps = self.adaptive_bins_layer(out)
out = self.conv_out(range_attention_maps)
bin_widths = (self.max_depth - self.min_depth) * bin_widths_normed # .shape = N, dim_out
bin_widths = nn.functional.pad(bin_widths, (1, 0), mode='constant', value=self.min_depth)
bin_edges = torch.cumsum(bin_widths, dim=1)
centers = 0.5 * (bin_edges[:, :-1] + bin_edges[:, 1:])
n, dout = centers.size()
centers = centers.view(n, dout, 1, 1)
out_depth = torch.sum(out * centers, dim=1, keepdim=True)
else:
out_depth = torch.sigmoid(out_depth) * self.max_depth
return {'pred_d': out_depth}
# Path: depth/configs/train_options.py
class TrainOptions(BaseOptions):
def initialize(self):
parser = BaseOptions.initialize(self)
# experiment configs
parser.add_argument('--epochs', type=int, default=25)
parser.add_argument('--max_lr', type=float, default=5e-4)
parser.add_argument('--min_lr', type=float, default=3e-5)
parser.add_argument('--weight_decay', type=float, default=5e-2)
parser.add_argument('--layer_decay', type=float, default=0.9)
parser.add_argument('--crop_h', type=int, default=448)
parser.add_argument('--crop_w', type=int, default=576)
parser.add_argument('--log_dir', type=str, default='./logs')
# logging options
parser.add_argument('--val_freq', type=int, default=1)
parser.add_argument('--pro_bar', type=str2bool, default='False')
parser.add_argument('--save_freq', type=int, default=1)
parser.add_argument('--print_freq', type=int, default=100)
parser.add_argument('--save_model', action='store_true')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--save_result', action='store_true')
return parser
# Path: depth/configs/test_options.py
class TestOptions(BaseOptions):
def initialize(self):
parser = BaseOptions.initialize(self)
# experiment configs
parser.add_argument('--ckpt_dir', type=str,
default='./ckpt/best_model_nyu.ckpt',
help='load ckpt path')
parser.add_argument('--result_dir', type=str, default='./results',
help='save result images into result_dir/exp_name')
parser.add_argument('--crop_h', type=int, default=448)
parser.add_argument('--crop_w', type=int, default=576)
parser.add_argument('--save_eval_pngs', action='store_true',
help='save result image into evaluation form')
parser.add_argument('--save_visualize', action='store_true',
help='save result image into visulized form')
return parser
# Path: app.py
import os
import sys
import cv2
import numpy as np
import torch
import glob
import utils
import torchvision.transforms as transforms
import torch.nn.functional as F
import gradio as gr
import tempfile
from depth.models_depth.model import EVPDepth
from models_refer.model import EVPRefer
from depth.configs.train_options import TrainOptions
from depth.configs.test_options import TestOptions
from utils_depth.misc import colorize
from PIL import Image
from transformers import CLIPTokenizer
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'stable-diffusion')))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), 'taming-transformers')))
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), 'depth')))
css = """
#img-display-container {
max-height: 50vh;
}
#img-display-input {
max-height: 40vh;
}
#img-display-output {
max-height: 40vh;
}
"""
def create_depth_demo(model, device):
gr.Markdown("### Depth Prediction demo")
with gr.Row():
input_image = gr.Image(label="Input Image", type='pil', elem_id='img-display-input')
depth_image = gr.Image(label="Depth Map", elem_id='img-display-output')
raw_file = gr.File(label="16-bit raw depth, multiplier:256")
submit = gr.Button("Submit")
def on_submit(image):
transform = transforms.ToTensor()
image = transform(image).unsqueeze(0).to(device)
shape = image.shape
image = torch.nn.functional.interpolate(image, (440,480), mode='bilinear', align_corners=True)
image = F.pad(image, (0, 0, 40, 0))
with torch.no_grad():
pred = model(image)['pred_d']
pred = pred[:,:,40:,:]
pred = torch.nn.functional.interpolate(pred, shape[2:], mode='bilinear', align_corners=True)
pred_d_numpy = pred.squeeze().cpu().numpy()
colored_depth, _, _ = colorize(pred_d_numpy, cmap='gray_r')
tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
raw_depth = Image.fromarray((pred_d_numpy*256).astype('uint16'))
raw_depth.save(tmp.name)
return [colored_depth, tmp.name]
submit.click(on_submit, inputs=[input_image], outputs=[depth_image, raw_file])
examples = gr.Examples(examples=["imgs/test_img1.jpg", "imgs/test_img2.jpg", "imgs/test_img3.jpg", "imgs/test_img4.jpg", "imgs/test_img5.jpg"],
inputs=[input_image])
def create_refseg_demo(model, tokenizer, device):
gr.Markdown("### Referring Segmentation demo")
with gr.Row():
input_image = gr.Image(label="Input Image", type='pil', elem_id='img-display-input')
refseg_image = gr.Image(label="Output Mask", elem_id='img-display-output')
input_text = gr.Textbox(label='Prompt', placeholder='Please upload your image first', lines=2)
submit = gr.Button("Submit")
def on_submit(image, text):
image = np.array(image)
image_t = transforms.ToTensor()(image).unsqueeze(0).to(device)
image_t = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])(image_t)
shape = image_t.shape
image_t = torch.nn.functional.interpolate(image_t, (512,512), mode='bilinear', align_corners=True)
input_ids = tokenizer(text=text, truncation=True, max_length=40, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")['input_ids'].to(device)
with torch.no_grad():
pred = model(image_t, input_ids)
pred = torch.nn.functional.interpolate(pred, shape[2:], mode='bilinear', align_corners=True)
output_mask = pred.cpu().argmax(1).data.numpy().squeeze()
alpha = 0.65
image[output_mask == 0] = (image[output_mask == 0]*alpha).astype(np.uint8)
contours, _ = cv2.findContours(output_mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0, 255, 0), 2)
return Image.fromarray(image)
submit.click(on_submit, inputs=[input_image, input_text], outputs=refseg_image)
examples = gr.Examples(examples=[["imgs/test_img2.jpg", "green plant"], ["imgs/test_img3.jpg", "chair"], ["imgs/test_img4.jpg", "left green plant"], ["imgs/test_img5.jpg", "man walking on foot"], ["imgs/test_img5.jpg", "the rightest camel"]],
inputs=[input_image, input_text])
def main():
upload_2_models = True
opt = TestOptions().initialize()
args = opt.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if upload_2_models:
model = EVPDepth(args=args, caption_aggregation=True)
model.to(device)
model_weight = torch.load('best_model_nyu.ckpt', map_location=device)['model']
model.load_state_dict(model_weight, strict=False)
model.eval()
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
model_refseg = EVPRefer()
model_refseg.to(device)
model_weight = torch.load('best_model_refcoco.pth', map_location=device)['model']
model_refseg.load_state_dict(model_weight, strict=False)
model_refseg.eval()
del model_weight
print('Models uploaded successfully')
title = "# EVP"
description = """Official demo for **EVP: Enhanced Visual Perception using Inverse Multi-Attentive Feature
Refinement and Regularized Image-Text Alignment**.
EVP is a deep learning model for metric depth estimation from a single image as well as referring segmentation.
Please refer to our [project page](https://lavreniuk.github.io/EVP) or [paper](https://arxiv.org/abs/2312.08548) or [github](https://github.com/Lavreniuk/EVP) for more details."""
with gr.Blocks() as demo:
gr.Markdown(title)
gr.Markdown(description)
if upload_2_models:
with gr.Tab("Depth Prediction"):
| create_depth_demo(model, device) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: penghao-wu/vstar
# Path: VisualSearch/model/VSM.py
class VSMForCausalLM(LlavaLlamaForCausalLM):
def __init__(
self,
config,
**kwargs,
):
if not hasattr(config, "train_mask_decoder"):
config.mm_use_im_start_end = kwargs.pop("use_mm_start_end", True)
config.mm_vision_tower = kwargs.get(
"vision_tower", "openai/clip-vit-large-patch14"
)
self.ce_loss_weight = kwargs.pop("ce_loss_weight", None)
self.dice_loss_weight = kwargs.pop("dice_loss_weight", None)
self.bce_loss_weight = kwargs.pop("bce_loss_weight", None)
self.det_loss_weight = kwargs.pop("det_loss_weight", None)
else:
config.mm_vision_tower = config.vision_tower
self.loc_token_idx = kwargs.pop("loc_token_idx")
super().__init__(config)
self.model = VSMModel(config, **kwargs)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_visual_embs(self, pixel_values: torch.FloatTensor):
with torch.no_grad():
image_embeddings = self.model.owlvit.get_visual_embs(pixel_values)
return image_embeddings
def forward(self, **kwargs):
if "past_key_values" in kwargs:
return super().forward(**kwargs)
return self.model_forward(**kwargs)
def model_forward(
self,
images: torch.FloatTensor,
images_clip: torch.FloatTensor,
input_ids: torch.LongTensor,
labels: torch.LongTensor,
attention_masks: torch.LongTensor,
offset: torch.LongTensor,
masks_list: List[torch.FloatTensor],
label_list: List[torch.Tensor],
bboxes_labels_list: List[torch.FloatTensor],
bboxes_valid_list: torch.Tensor,
masks_valid_list: List[torch.Tensor],
resize_list: List[tuple],
inference: bool = False,
**kwargs,
):
image_embeddings = self.get_visual_embs(images)
batch_size = image_embeddings.shape[0]
assert batch_size == len(offset) - 1
loc_token_mask = input_ids[:, 1:] == self.loc_token_idx
loc_token_mask = torch.cat(
[
loc_token_mask,
torch.zeros((loc_token_mask.shape[0], 1)).bool().cuda(),
],
dim=1,
)
# hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)
loc_token_mask = torch.cat(
[torch.zeros((loc_token_mask.shape[0], 255)).bool().cuda(), loc_token_mask],
dim=1,
)
if inference:
n_batch = 1
length = input_ids.shape[0]
assert images_clip.shape[0] == 1
images_clip_extend = images_clip.expand(length, -1, -1, -1).contiguous()
output_hidden_states = []
for i in range(n_batch):
start_i, end_i = i * length, min((i + 1) * length, input_ids.shape[0])
output_i = super().forward(
images=images_clip_extend[: end_i - start_i],
attention_mask=attention_masks[start_i:end_i],
input_ids=input_ids[start_i:end_i],
output_hidden_states=True,
)
output_hidden_states.append(output_i.hidden_states)
torch.cuda.empty_cache()
output_hidden_states_list = []
output_hidden_states_level = torch.cat(output_hidden_states, dim=0)
output_hidden_states_list.append(output_hidden_states_level)
output_hidden_states = output_hidden_states_list
output = None
else:
images_clip_list = []
for i in range(len(offset) - 1):
start_i, end_i = offset[i], offset[i + 1]
images_clip_i = (
images_clip[i]
.unsqueeze(0)
.expand(end_i - start_i, -1, -1, -1)
.contiguous()
)
images_clip_list.append(images_clip_i)
images_clip = torch.cat(images_clip_list, dim=0)
output = super().forward(
images=images_clip,
attention_mask=attention_masks,
input_ids=input_ids,
labels=labels,
output_hidden_states=True,
)
output_hidden_states = output.hidden_states
# seg
hidden_states_seg = []
assert len(self.model.text_hidden_fcs_seg) == 1
hidden_states_seg.append(self.model.text_hidden_fcs_seg[0](output_hidden_states[-1]))
last_hidden_state_seg = torch.stack(hidden_states_seg, dim=-1).sum(dim=-1)
# det
hidden_states_det = []
assert len(self.model.text_hidden_fcs_det) == 1
hidden_states_det.append(self.model.text_hidden_fcs_det[0](output_hidden_states[-1]))
last_hidden_state_det = torch.stack(hidden_states_det, dim=-1).sum(dim=-1)
pred_embeddings_seg = last_hidden_state_seg[loc_token_mask]
pred_embeddings_det = last_hidden_state_det[loc_token_mask]
loc_token_counts = loc_token_mask.int().sum(-1) # [bs, ]
loc_token_offset = loc_token_counts.cumsum(-1)
loc_token_offset = torch.cat(
[torch.zeros(1).long().cuda(), loc_token_offset], dim=0
)
loc_token_offset = loc_token_offset[offset]
pred_embeddings_seg_ = []
for i in range(len(loc_token_offset) - 1):
start_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]
pred_embeddings_seg_.append(pred_embeddings_seg[start_i:end_i])
pred_embeddings_seg = pred_embeddings_seg_
pred_embeddings_det_ = []
for i in range(len(loc_token_offset) - 1):
start_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]
pred_embeddings_det_.append(pred_embeddings_det[start_i:end_i])
pred_embeddings_det = pred_embeddings_det_
# seg branch
multimask_output = False
pred_masks = []
for i in range(len(pred_embeddings_seg)):
(
sparse_embeddings,
dense_embeddings,
) = self.model.prompt_encoder(
points=None,
boxes=None,
masks=None,
text_embeds=pred_embeddings_seg[i].unsqueeze(1),
)
sparse_embeddings = sparse_embeddings.to(pred_embeddings_seg[i].dtype)
low_res_masks, iou_predictions = self.model.mask_decoder(
image_embeddings=self.model.visual_projection(image_embeddings[i].unsqueeze(0)).permute(0, 3, 1, 2),
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
pred_mask = F.interpolate(
low_res_masks, label_list[i].shape, mode="bilinear", align_corners=False
)
pred_masks.append(pred_mask[:, 0])
gt_masks = masks_list
# det branch
detection_result_batch = []
for i in range(len(pred_embeddings_det)):
bs = pred_embeddings_det[i].shape[0]
detection_result = self.model.owlvit(image_embeddings[i].unsqueeze(0).repeat(bs, 1, 1, 1), pred_embeddings_det[i].unsqueeze(1))
detection_result_batch.append(detection_result)
pred_logits = torch.cat([detection_result['pred_logits'] for detection_result in detection_result_batch], 0)
pred_boxes = torch.cat([detection_result['pred_boxes'] for detection_result in detection_result_batch], 0)
if inference:
return {
"pred_masks": pred_masks,
"gt_masks": gt_masks,
"pred_logits": pred_logits,
"pred_boxes": pred_boxes,
"gt_bboxes": bboxes_labels_list
}
num_boxes = 0
for bboxes_labels, bboxes_valid in zip(bboxes_labels_list, bboxes_valid_list):
if bboxes_valid:
num_boxes += len(bboxes_labels)
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=image_embeddings.device)
num_boxes = torch.clamp(num_boxes, min=1).item()
detection_result_batch = {'pred_logits':pred_logits, 'pred_boxes':pred_boxes}
target_det = []
all_bboxes_valid = []
for bboxes_label, bboxes_valid in zip(bboxes_labels_list, bboxes_valid_list):
target_det.append({"labels":torch.zeros(len(bboxes_label)).to(bboxes_label.device, torch.long), "boxes":bboxes_label})
if bboxes_valid:
all_bboxes_valid.append(torch.ones((min(24*24, len(bboxes_label)), 1)).to(bboxes_label.device, torch.long))
else:
all_bboxes_valid.append(torch.zeros((min(24*24, len(bboxes_label)), 1)).to(bboxes_label.device, torch.long))
all_bboxes_valid = torch.cat(all_bboxes_valid, 0)
loss_dict = self.model.owlvit.criterion(detection_result_batch, target_det, num_boxes)
for loss_k, loss_v in loss_dict.items():
if "loss_ce" in loss_k:
loss_dict[loss_k] = (loss_v*bboxes_valid_list.unsqueeze(-1)).mean()
else:
loss_dict[loss_k] = (loss_v*all_bboxes_valid).sum()
weight_dict = self.model.owlvit.criterion.weight_dict
detection_loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
detection_loss = detection_loss*self.det_loss_weight
model_output = output
output = model_output.logits
ce_loss = model_output.loss
ce_loss = ce_loss * self.ce_loss_weight
mask_bce_loss = 0
mask_dice_loss = 0
num_masks = 0
for batch_idx in range(len(pred_masks)):
gt_mask = gt_masks[batch_idx]
pred_mask = pred_masks[batch_idx]
masks_valid = masks_valid_list[batch_idx]
mask_bce_loss += (
sigmoid_ce_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
* gt_mask.shape[0] * masks_valid
).sum()
mask_dice_loss += (
dice_loss(pred_mask, gt_mask, num_masks=gt_mask.shape[0])
* gt_mask.shape[0] * masks_valid
).sum()
num_masks += masks_valid.sum()
mask_bce_loss = self.bce_loss_weight * mask_bce_loss / (num_masks + 1e-8)
mask_dice_loss = self.dice_loss_weight * mask_dice_loss / (num_masks + 1e-8)
mask_loss = mask_bce_loss + mask_dice_loss
loss = ce_loss + mask_loss + detection_loss
return {
"loss": loss,
"ce_loss": ce_loss,
"mask_bce_loss": mask_bce_loss,
"mask_dice_loss": mask_dice_loss,
"mask_loss": mask_loss,
"detection_loss": detection_loss,
"detection_loss_ce": loss_dict['loss_ce'],
"detection_loss_bbox": loss_dict['loss_bbox'],
"detection_loss_giou": loss_dict['loss_giou'],
}
def inference(
self,
images_clip,
images,
input_ids,
resize_list,
original_size_list,
max_new_tokens=32,
tokenizer=None,
mode = 'vqa'
):
assert mode in ['vqa', 'segmentation', 'detection']
with torch.no_grad():
outputs = self.generate(
images=images_clip,
input_ids=input_ids,
max_new_tokens=max_new_tokens,
num_beams=1,
output_hidden_states=True,
return_dict_in_generate=True,
)
output_hidden_states = outputs.hidden_states[-1]
output_ids = outputs.sequences
if mode == 'vqa':
return output_ids, None, None
loc_token_mask = output_ids[:, 1:] == self.loc_token_idx
# hack for IMAGE_TOKEN_INDEX (we suppose that there is only one image, and it is in the front)
loc_token_mask = torch.cat(
[
torch.zeros((loc_token_mask.shape[0], 255)).bool().cuda(),
loc_token_mask,
],
dim=1,
)
# seg
hidden_states_seg = []
assert len(self.model.text_hidden_fcs_seg) == 1
hidden_states_seg.append(self.model.text_hidden_fcs_seg[0](output_hidden_states))
last_hidden_state_seg = torch.stack(hidden_states_seg, dim=-1).sum(dim=-1)
# det
hidden_states_det = []
assert len(self.model.text_hidden_fcs_det) == 1
hidden_states_det.append(self.model.text_hidden_fcs_det[0](output_hidden_states))
last_hidden_state_det = torch.stack(hidden_states_det, dim=-1).sum(dim=-1)
pred_embeddings_seg = last_hidden_state_seg[loc_token_mask]
pred_embeddings_det = last_hidden_state_det[loc_token_mask]
loc_token_counts = loc_token_mask.int().sum(-1) # [bs, ]
loc_token_offset = loc_token_counts.cumsum(-1)
loc_token_offset = torch.cat(
[torch.zeros(1).long().cuda(), loc_token_offset], dim=0
)
pred_embeddings_seg_ = []
for i in range(len(loc_token_offset) - 1):
start_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]
pred_embeddings_seg_.append(pred_embeddings_seg[start_i:end_i])
pred_embeddings_seg = pred_embeddings_seg_
pred_embeddings_det_ = []
for i in range(len(loc_token_offset) - 1):
start_i, end_i = loc_token_offset[i], loc_token_offset[i + 1]
pred_embeddings_det_.append(pred_embeddings_det[start_i:end_i])
pred_embeddings_det = pred_embeddings_det_
image_embeddings = self.get_visual_embs(images)
multimask_output = False
pred_masks = []
for i in range(len(pred_embeddings_seg)):
(
sparse_embeddings,
dense_embeddings,
) = self.model.prompt_encoder(
points=None,
boxes=None,
masks=None,
text_embeds=pred_embeddings_seg[i].unsqueeze(1),
)
sparse_embeddings = sparse_embeddings.to(pred_embeddings_seg[i].dtype)
low_res_masks, iou_predictions = self.model.mask_decoder(
image_embeddings=self.model.visual_projection(image_embeddings[i].unsqueeze(0)).permute(0, 3, 1, 2),
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
pred_mask = F.interpolate(
low_res_masks.float(), original_size_list[i], mode="bilinear", align_corners=False
)
pred_masks.append(pred_mask[:, 0])
if mode == 'segmentation':
return None, pred_masks, None
# detection model
detection_result_batch = []
for i in range(len(pred_embeddings_det)):
bs = pred_embeddings_det[i].shape[0]
detection_result = self.model.owlvit(image_embeddings[i].unsqueeze(0).repeat(bs, 1, 1, 1), pred_embeddings_det[i].unsqueeze(1))
detection_result_batch.append(detection_result)
pred_logits = torch.cat([detection_result['pred_logits'] for detection_result in detection_result_batch], 0)
pred_boxes = torch.cat([detection_result['pred_boxes'] for detection_result in detection_result_batch], 0)
detection_result_batch = {'pred_logits':pred_logits, 'pred_boxes':pred_boxes}
return None, pred_masks, detection_result_batch
# Path: VisualSearch/model/llava/conversation.py
class SeparatorStyle(Enum):
class Conversation:
SINGLE = auto()
TWO = auto()
MPT = auto()
PLAIN = auto()
LLAMA_2 = auto()
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
W, H = image.size
H, W = longest_edge, shortest_edge
H, W = shortest_edge, longest_edge
def get_prompt(self):
def append_message(self, role, message):
def get_images(self, return_pil=False):
def expand2square(pil_img, background_color=(122, 116, 104)):
def to_gradio_chatbot(self):
def copy(self):
def dict(self):
# Path: VisualSearch/model/llava/mm_utils.py
def tokenizer_image_token(
prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None
):
prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split("<image>")]
def insert_separator(X, sep):
return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]
input_ids = []
offset = 0
if (
len(prompt_chunks) > 0
and len(prompt_chunks[0]) > 0
and prompt_chunks[0][0] == tokenizer.bos_token_id
):
offset = 1
input_ids.append(prompt_chunks[0][0])
for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):
input_ids.extend(x[offset:])
if return_tensors is not None:
if return_tensors == "pt":
return torch.tensor(input_ids, dtype=torch.long)
raise ValueError(f"Unsupported tensor type: {return_tensors}")
return input_ids
# Path: VisualSearch/utils/utils.py
def expand2square(pil_img, background_color):
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, 0))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, (0, 0))
return result
# Path: VisualSearch/utils/utils.py
DEFAULT_IM_END_TOKEN = "<im_end>"
# Path: VisualSearch/utils/utils.py
DEFAULT_IM_START_TOKEN = "<im_start>"
# Path: VisualSearch/utils/utils.py
DEFAULT_IMAGE_TOKEN = "<image>"
# Path: VisualSearch/utils/utils.py
IMAGE_TOKEN_INDEX = -200
# Path: visual_search.py
import argparse
import os
import sys
import json
import tqdm
import copy
import functools
import spacy
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import cv2
from queue import PriorityQueue
from PIL import Image
from transformers import AutoTokenizer, CLIPImageProcessor
from transformers import OwlViTProcessor
from VisualSearch.model.VSM import VSMForCausalLM
from VisualSearch.model.llava import conversation as conversation_lib
from VisualSearch.model.llava.mm_utils import tokenizer_image_token
from VisualSearch.utils.utils import expand2square
from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN,
DEFAULT_IMAGE_TOKEN, IMAGE_TOKEN_INDEX)
from matplotlib import pyplot as plt
nlp = spacy.load("en_core_web_sm")
def parse_args(args):
parser = argparse.ArgumentParser(description="Visual Search Evaluation")
parser.add_argument("--version", default="craigwu/seal_vsm_7b")
parser.add_argument("--benchmark-folder", default="vstar_bench", type=str)
parser.add_argument("--visualization", action="store_true", default=False)
parser.add_argument("--output_path", default="", type=str)
parser.add_argument("--confidence_low", default=0.3, type=float)
parser.add_argument("--confidence_high", default=0.5, type=float)
parser.add_argument("--target_cue_threshold", default=6.0, type=float)
parser.add_argument("--target_cue_threshold_decay", default=0.7, type=float)
parser.add_argument("--target_cue_threshold_minimum", default=3.0, type=float)
parser.add_argument("--minimum_size_scale", default=4.0, type=float)
parser.add_argument("--minimum_size", default=224, type=int)
parser.add_argument("--model_max_length", default=512, type=int)
parser.add_argument(
"--vision-tower", default="openai/clip-vit-large-patch14", type=str
)
parser.add_argument("--use_mm_start_end", action="store_true", default=True)
parser.add_argument(
"--conv_type",
default="llava_v1",
type=str,
choices=["llava_v1", "llava_llama_2"],
)
return parser.parse_args(args)
def tranverse(token):
children = [_ for _ in token.children]
if len(children) == 0:
return token.i, token.i
left_i = token.i
right_i = token.i
for child in children:
child_left_i, child_right_i = tranverse(child)
left_i = min(left_i, child_left_i)
right_i = max(right_i, child_right_i)
return left_i, right_i
def get_noun_chunks(token):
left_children = []
right_children = []
for child in token.children:
if child.i < token.i:
left_children.append(child)
else:
right_children.append(child)
start_token_i = token.i
for left_child in left_children[::-1]:
if left_child.dep_ in ['amod', 'compound', 'poss']:
start_token_i, _ = tranverse(left_child)
else:
break
end_token_i = token.i
for right_child in right_children:
if right_child.dep_ in ['relcl', 'prep']:
_, end_token_i = tranverse(right_child)
else:
break
return start_token_i, end_token_i
def filter_chunk_list(chunks):
def overlap(min1, max1, min2, max2):
return min(max1, max2) - max(min1, min2)
chunks = sorted(chunks, key=lambda chunk: chunk[1]-chunk[0], reverse=True)
filtered_chunks = []
for chunk in chunks:
flag=True
for exist_chunk in filtered_chunks:
if overlap(exist_chunk[0], exist_chunk[1], chunk[0], chunk[1]) >= 0:
flag = False
break
if flag:
| filtered_chunks.append(chunk) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: worm128/AI-YinMei
# Path: text-generation-webui/extensions/superboogav2/utils.py
def create_metadata_source(source: str):
return {'source': source}
# Path: text-generation-webui/extensions/superboogav2/chromadb.py
def make_collector():
return ChromaCollector(SentenceTransformerEmbedder())
# Path: text-generation-webui/extensions/superboogav2/download_urls.py
def feed_url_into_collector(urls, collector):
all_text = ''
cumulative = ''
urls = urls.strip().split('\n')
cumulative += f'Loading {len(urls)} URLs with {parameters.get_num_threads()} threads...\n\n'
yield cumulative
for update, contents in _download_urls(urls, threads=parameters.get_num_threads()):
yield cumulative + update
cumulative += 'Processing the HTML sources...'
yield cumulative
for content in contents:
soup = BeautifulSoup(content, features="lxml")
for script in soup(["script", "style"]):
script.extract()
strings = soup.stripped_strings
if parameters.get_is_strong_cleanup():
strings = [s for s in strings if re.search("[A-Za-z] ", s)]
text = '\n'.join([s.strip() for s in strings])
all_text += text
process_and_add_to_collector(all_text, collector, False, create_metadata_source('url-download'))
# Path: text-generation-webui/extensions/superboogav2/data_processor.py
def process_and_add_to_collector(corpus: str, collector: ChromaCollector, clear_collector_before_adding: bool, metadata: dict):
# Defining variables
chunk_lens = [int(len.strip()) for len in parameters.get_chunk_len().split(',')]
context_len = [int(len.strip()) for len in parameters.get_context_len().split(',')]
if len(context_len) >= 3:
raise f"Context len has too many values: {len(context_len)}"
if len(context_len) == 2:
context_left = context_len[0]
context_right = context_len[1]
else:
context_left = context_right = context_len[0]
data_chunks = []
data_chunks_with_context = []
data_chunk_starting_indices = []
# Handling chunk_regex
if parameters.get_chunk_regex():
if parameters.get_chunk_separator():
cumulative_length = 0 # This variable will store the length of the processed corpus
sections = corpus.split(parameters.get_chunk_separator())
for section in sections:
special_chunks = list(re.finditer(parameters.get_chunk_regex(), section))
for match in special_chunks:
chunk = match.group(0)
start_index = match.start()
end_index = start_index + len(chunk)
context = section[max(0, start_index - context_left):min(len(section), end_index + context_right)]
data_chunks.append(chunk)
data_chunks_with_context.append(context)
data_chunk_starting_indices.append(cumulative_length + max(0, start_index - context_left))
cumulative_length += len(section) + len(parameters.get_chunk_separator()) # Update the length of the processed corpus
else:
special_chunks = list(re.finditer(parameters.get_chunk_regex(), corpus))
for match in special_chunks:
chunk = match.group(0)
start_index = match.start()
end_index = start_index + len(chunk)
context = corpus[max(0, start_index - context_left):min(len(corpus), end_index + context_right)]
data_chunks.append(chunk)
data_chunks_with_context.append(context)
data_chunk_starting_indices.append(max(0, start_index - context_left))
for chunk_len in chunk_lens:
# Breaking the data into chunks and adding those to the db
if parameters.get_chunk_separator():
cumulative_length = 0 # This variable will store the length of the processed corpus
sections = corpus.split(parameters.get_chunk_separator())
for section in sections:
chunks, chunks_with_context, context_start_indices = _create_chunks_with_context(section, chunk_len, context_left, context_right)
context_start_indices = [cumulative_length + i for i in context_start_indices] # Add the length of the processed corpus to each start index
data_chunks.extend(chunks)
data_chunks_with_context.extend(chunks_with_context)
data_chunk_starting_indices.extend(context_start_indices)
cumulative_length += len(section) + len(parameters.get_chunk_separator()) # Update the length of the processed corpus
else:
chunks, chunks_with_context, context_start_indices = _create_chunks_with_context(corpus, chunk_len, context_left, context_right)
data_chunks.extend(chunks)
data_chunks_with_context.extend(chunks_with_context)
data_chunk_starting_indices.extend(context_start_indices)
data_chunks = [preprocess_text_no_summary(chunk) for chunk in data_chunks]
data_chunks, data_chunks_with_context, data_chunk_starting_indices = _clear_chunks(
data_chunks, data_chunks_with_context, data_chunk_starting_indices
)
if clear_collector_before_adding:
collector.clear()
collector.add(data_chunks, data_chunks_with_context, data_chunk_starting_indices, [metadata]*len(data_chunks) if metadata is not None else None)
# Path: text-generation-webui/extensions/superboogav2/benchmark.py
def benchmark(config_path, collector):
# Get the current system date
sysdate = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"benchmark_{sysdate}.txt"
# Open the log file in append mode
with open(filename, 'a') as log:
with open(config_path, 'r') as f:
data = json.load(f)
total_points = 0
max_points = 0
for item in data:
filepath = item["text"]
corpus = ""
# Check if the file exists
if os.path.isfile(Path(filepath)):
# Open the file and read its content
with open(Path(filepath), 'r') as file:
corpus = file.read()
process_and_add_to_collector(corpus, collector, True, create_metadata_source('benchmark'))
else:
raise f'Cannot find specified file {filepath}.'
for question_group in item["questions"]:
question_variants = question_group["question_variants"]
criteria = question_group["criteria"]
for q in question_variants:
max_points += len(criteria)
processed_text = preprocess_text(q)
# Get the most similar chunks
results = collector.get_sorted_by_dist(processed_text, n_results=get_chunk_count(), max_token_count=get_max_token_count())
points = 0
for c in criteria:
for p in results:
if c in p:
points += 1
total_points += 1
break
info = f"The question '{q}' scored {points}/{len(criteria)} points."
print(info, file=log)
print('\n---\n', file=log)
print(f'##Total points:\n\n{total_points}/{max_points}', file=log)
return total_points, max_points
# Path: text-generation-webui/extensions/superboogav2/optimize.py
def optimize(collector, progress=gr.Progress()):
# Inform the user that something is happening.
progress(0, desc=f'Setting Up...')
# Track the current step
current_step = 0
# Track the best score
best_score = 0
# Dictionary for caching scores
scores_cache = {}
def objective_function(trial):
nonlocal current_step
nonlocal best_score
nonlocal scores_cache
params = {}
for key, val in Parameters.getInstance().hyperparameters.items():
if _is_optimization_param(val):
params[key] = trial.suggest_categorical(key, val['categories'])
_set_hyperparameters(params)
params_hash = _get_params_hash(params)
# If the score for these parameters is in the cache, return it
if params_hash in scores_cache:
return scores_cache[params_hash]
# Benchmark the current set of parameters.
score, max_score = benchmark(Path("extensions/superboogav2/benchmark_texts/questions.json"), collector)
# Cache the score
scores_cache[params_hash] = score
result = json.dumps(_convert_np_types(params), indent=4)
result += f'\nScore: {score}/{max_score}'
logger.debug(result)
# Increment the current step
current_step += 1
# Update the best score
best_score = max(best_score, score)
# Update the progress
progress(current_step / parameters.get_optimization_steps(), desc=f'Optimizing... {current_step}/{parameters.get_optimization_steps()}')
return -score
# Run the optimization.
study = optuna.create_study()
study.optimize(objective_function, n_trials=int(parameters.get_optimization_steps()))
best_params = study.best_params
_set_hyperparameters(best_params)
# Convert results to a markdown string.
str_result = f"## Best parameters:\n\n{_markdown_hyperparams()}\n\n## Score:\n\n{best_score}"
# Save to JSON file
with open('best_params.json', 'w') as fp:
json.dump(_convert_np_types(best_params), fp, indent=4)
return str_result
# Path: text-generation-webui/extensions/superboogav2/notebook_handler.py
def input_modifier_internal(string, collector):
# Sanity check.
if shared.is_chat():
return string
# Find the user input
pattern = re.compile(r"<\|begin-user-input\|>(.*?)<\|end-user-input\|>", re.DOTALL)
match = re.search(pattern, string)
if match:
# Preprocess the user prompt.
user_input = match.group(1).strip()
user_input = preprocess_text(user_input)
logger.debug(f"Preprocessed User Input: {user_input}")
# Get the most similar chunks
results = collector.get_sorted_by_dist(user_input, n_results=parameters.get_chunk_count(), max_token_count=int(parameters.get_max_token_count()))
# Make the injection
string = string.replace('<|injection-point|>', create_context_text(results))
return _remove_special_tokens(string)
# Path: text-generation-webui/extensions/superboogav2/chat_handler.py
def custom_generate_chat_prompt_internal(user_input: str, state: dict, collector: ChromaCollector, **kwargs):
if parameters.get_add_chat_to_data():
# Get the whole history as one string
history_as_text = _concatinate_history(kwargs['history'], state)
if history_as_text:
# Delete all documents that were auto-inserted
collector.delete(ids_to_delete=None, where=CHAT_METADATA)
# Insert the processed history
process_and_add_to_collector(history_as_text, collector, False, CHAT_METADATA)
if _should_query(user_input):
user_input = _remove_tag_if_necessary(user_input)
results = collector.get_sorted_by_dist(user_input, n_results=parameters.get_chunk_count(), max_token_count=int(parameters.get_max_token_count()))
# Check if the strategy is to modify the last message. If so, prepend or append to the user query.
if parameters.get_injection_strategy() == parameters.APPEND_TO_LAST:
user_input = user_input + create_context_text(results)
elif parameters.get_injection_strategy() == parameters.PREPEND_TO_LAST:
user_input = create_context_text(results) + user_input
elif parameters.get_injection_strategy() == parameters.HIJACK_LAST_IN_CONTEXT:
_hijack_last(create_context_text(results), kwargs['history'], state['truncation_length'], state)
return chat.generate_chat_prompt(user_input, state, **kwargs)
# Path: text-generation-webui/extensions/superboogav2/api.py
class APIManager:
def __init__(self, collector: ChromaCollector):
self.server = None
self.collector = collector
self.is_running = False
def start_server(self, port: int):
if self.server is not None:
print("Server already running.")
return
address = '0.0.0.0' if shared.args.listen else '127.0.0.1'
self.server = CustomThreadingHTTPServer((address, port), Handler, self.collector)
logger.info(f'Starting chromaDB API at http://{address}:{port}/api')
Thread(target=self.server.serve_forever, daemon=True).start()
self.is_running = True
def stop_server(self):
if self.server is not None:
logger.info(f'Stopping chromaDB API.')
self.server.shutdown()
self.server.server_close()
self.server = None
self.is_running = False
def is_server_running(self):
return self.is_running
# Path: text-generation-webui/extensions/superboogav2/script.py
import os
import textwrap
import codecs
import gradio as gr
import extensions.superboogav2.parameters as parameters
from pathlib import Path
from modules.logging_colors import logger
from modules import shared
from .utils import create_metadata_source
from .chromadb import make_collector
from .download_urls import feed_url_into_collector
from .data_processor import process_and_add_to_collector
from .benchmark import benchmark
from .optimize import optimize
from .notebook_handler import input_modifier_internal
from .chat_handler import custom_generate_chat_prompt_internal
from .api import APIManager
parameters.get_delta_start(),
parameters.get_min_num_length(),
parameters.get_num_conversion_strategy(),
preprocess_pipeline,
parameters.get_chunk_count(),
parameters.get_context_len(),
parameters.get_chunk_len()
]
def _apply_settings(optimization_steps, time_power, time_steepness, significant_level, min_sentences, new_dist_strat, delta_start, min_number_length, num_conversion,
preprocess_pipeline, api_port, api_on, injection_strategy, add_chat_to_data, manual, postfix, data_separator, prefix, max_token_count,
chunk_count, chunk_sep, context_len, chunk_regex, chunk_len, threads, strong_cleanup):
logger.debug('Applying settings.')
try:
parameters.set_optimization_steps(optimization_steps)
parameters.set_significant_level(significant_level)
parameters.set_min_num_sentences(min_sentences)
parameters.set_new_dist_strategy(new_dist_strat)
parameters.set_delta_start(delta_start)
parameters.set_min_num_length(min_number_length)
parameters.set_num_conversion_strategy(num_conversion)
parameters.set_api_port(api_port)
parameters.set_api_on(api_on)
parameters.set_injection_strategy(injection_strategy)
parameters.set_add_chat_to_data(add_chat_to_data)
parameters.set_manual(manual)
parameters.set_postfix(codecs.decode(postfix, 'unicode_escape'))
parameters.set_data_separator(codecs.decode(data_separator, 'unicode_escape'))
parameters.set_prefix(codecs.decode(prefix, 'unicode_escape'))
parameters.set_max_token_count(max_token_count)
parameters.set_time_power(time_power)
parameters.set_time_steepness(time_steepness)
parameters.set_chunk_count(chunk_count)
parameters.set_chunk_separator(codecs.decode(chunk_sep, 'unicode_escape'))
parameters.set_context_len(context_len)
parameters.set_chunk_regex(chunk_regex)
parameters.set_chunk_len(chunk_len)
parameters.set_num_threads(threads)
parameters.set_strong_cleanup(strong_cleanup)
preprocess_choices = ['Lower Cases', 'Remove Punctuation', 'Remove Adverbs', 'Remove Stop Words', 'Lemmatize', 'Merge Spaces', 'Strip Edges']
for preprocess_method in preprocess_choices:
if preprocess_method == 'Lower Cases':
parameters.set_to_lower(preprocess_method in preprocess_pipeline)
elif preprocess_method == 'Remove Punctuation':
parameters.set_remove_punctuation(preprocess_method in preprocess_pipeline)
elif preprocess_method == 'Remove Adverbs':
parameters.set_remove_specific_pos(preprocess_method in preprocess_pipeline)
elif preprocess_method == 'Remove Stop Words':
parameters.set_remove_stopwords(preprocess_method in preprocess_pipeline)
elif preprocess_method == 'Lemmatize':
parameters.set_lemmatize(preprocess_method in preprocess_pipeline)
elif preprocess_method == 'Merge Spaces':
parameters.set_merge_spaces(preprocess_method in preprocess_pipeline)
elif preprocess_method == 'Strip Edges':
parameters.set_strip(preprocess_method in preprocess_pipeline)
# Based on API on/off, start or stop the server
if api_manager is not None:
if parameters.get_api_on() and (not api_manager.is_server_running()):
api_manager.start_server(parameters.get_api_port())
elif (not parameters.get_api_on()) and api_manager.is_server_running():
api_manager.stop_server()
except Exception as e:
logger.warn(f'Could not properly apply settings: {str(e)}')
def custom_generate_chat_prompt(user_input, state, **kwargs):
return custom_generate_chat_prompt_internal(user_input, state, collector, **kwargs)
def input_modifier(string):
return input_modifier_internal(string, collector)
def ui():
with gr.Accordion("Click for more information...", open=False):
gr.Markdown(textwrap.dedent("""
## About
This extension takes a dataset as input, breaks it into chunks, and adds the result to a local/offline Chroma database.
The database is then queried during inference time to get the excerpts that are closest to your input. The idea is to create an arbitrarily large pseudo context.
The core methodology was developed and contributed by kaiokendev, who is working on improvements to the method in this repository: https://github.com/kaiokendev/superbig
## Data input
Start by entering some data in the interface below and then clicking on "Load data".
Each time you load some new data, the old chunks are discarded.
## Chat mode
#### Instruct
On each turn, the chunks will be compared to your current input and the most relevant matches will be appended to the input in the following format:
```
Consider the excerpts below as additional context:
...
```
The injection doesn't make it into the chat history. It is only used in the current generation.
#### Regular chat
The chunks from the external data sources are ignored, and the chroma database is built based on the chat history instead. The most relevant past exchanges relative to the present input are added to the context string. This way, the extension acts as a long term memory.
## Notebook/default modes
Your question must be manually specified between `<|begin-user-input|>` and `<|end-user-input|>` tags, and the injection point must be specified with `<|injection-point|>`.
The special tokens mentioned above (`<|begin-user-input|>`, `<|end-user-input|>`, and `<|injection-point|>`) are removed in the background before the text generation begins.
Here is an example in Vicuna 1.1 format:
| ``` |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sinoyou/nelf-pro
# Path: nerfstudio/field_components/mlp.py
class MLP(FieldComponent):
"""Multilayer perceptron
Args:
in_dim: Input layer dimension
num_layers: Number of network layers
layer_width: Width of each MLP layer
out_dim: Ouput layer dimension. Uses layer_width if None.
activation: intermediate layer activation function.
out_activation: output activation function.
"""
def __init__(
self,
in_dim: int,
num_layers: int,
layer_width: int,
out_dim: Optional[int] = None,
skip_connections: Optional[Tuple[int]] = None,
activation: Optional[nn.Module] = nn.ReLU(),
out_activation: Optional[nn.Module] = None,
) -> None:
super().__init__()
self.in_dim = in_dim
assert self.in_dim > 0
self.out_dim = out_dim if out_dim is not None else layer_width
self.num_layers = num_layers
self.layer_width = layer_width
self.skip_connections = skip_connections
self._skip_connections: Set[int] = set(skip_connections) if skip_connections else set()
self.activation = activation
self.out_activation = out_activation
self.net = None
self.build_nn_modules()
def build_nn_modules(self) -> None:
"""Initialize multi-layer perceptron."""
layers = []
if self.num_layers == 1:
layers.append(nn.Linear(self.in_dim, self.out_dim))
else:
for i in range(self.num_layers - 1):
if i == 0:
assert i not in self._skip_connections, "Skip connection at layer 0 doesn't make sense."
layers.append(nn.Linear(self.in_dim, self.layer_width))
elif i in self._skip_connections:
layers.append(nn.Linear(self.layer_width + self.in_dim, self.layer_width))
else:
layers.append(nn.Linear(self.layer_width, self.layer_width))
layers.append(nn.Linear(self.layer_width, self.out_dim))
self.layers = nn.ModuleList(layers)
def forward(self, in_tensor: TensorType["bs":..., "in_dim"]) -> TensorType["bs":..., "out_dim"]:
"""Process input with a multilayer perceptron.
Args:
in_tensor: Network input
Returns:
MLP network output
"""
x = in_tensor
for i, layer in enumerate(self.layers):
# as checked in `build_nn_modules`, 0 should not be in `_skip_connections`
if i in self._skip_connections:
x = torch.cat([in_tensor, x], -1)
x = layer(x)
if self.activation is not None and i < len(self.layers) - 1:
x = self.activation(x)
if self.out_activation is not None:
x = self.out_activation(x)
return x
# Path: nerfstudio/field_components/embedding.py
class Embedding(FieldComponent):
"""Index into embeddings.
# TODO: add different types of initializations
Args:
in_dim: Number of embeddings
out_dim: Dimension of the embedding vectors
"""
def __init__(self, in_dim: int, out_dim: int) -> None:
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.build_nn_modules()
def build_nn_modules(self) -> None:
self.embedding = torch.nn.Embedding(self.in_dim, self.out_dim)
def mean(self, dim=0):
"""Return the mean of the embedding weights along a dim."""
return self.embedding.weight.mean(dim)
def forward(self, in_tensor: TensorType[..., "input_dim"]) -> TensorType[..., "output_dim"]:
"""Call forward
Args:
in_tensor: input tensor to process
"""
return self.embedding(in_tensor)
# Path: nerfstudio/field_components/activations.py
class _TruncExp(Function): # pylint: disable=abstract-method
def forward(ctx, x): # pylint: disable=arguments-differ
def backward(ctx, g): # pylint: disable=arguments-differ
# Path: nerfstudio/fields/base_field.py
class Field(nn.Module):
"""Base class for fields."""
def __init__(self) -> None:
super().__init__()
self._sample_locations = None
self._density_before_activation = None
def density_fn(self, positions: TensorType["bs":..., 3]) -> TensorType["bs":..., 1]:
"""Returns only the density. Used primarily with the density grid.
Args:
positions: the origin of the samples/frustums
"""
# Need to figure out a better way to descibe positions with a ray.
if isinstance(positions, RaySamples):
ray_samples = positions
else:
ray_samples = RaySamples(
frustums=Frustums(
origins=positions,
directions=torch.ones_like(positions),
starts=torch.zeros_like(positions[..., :1]),
ends=torch.zeros_like(positions[..., :1]),
pixel_area=torch.ones_like(positions[..., :1]),
)
)
density, _ = self.get_density(ray_samples)
return density
@abstractmethod
def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType[..., 1], Union[TensorType[..., "num_features"], Dict]]:
"""Computes and returns the densities. Returns a tensor of densities and a tensor of features.
Args:
ray_samples: Samples locations to compute density.
"""
@abstractmethod
def get_outputs(
self, ray_samples: RaySamples, density_embedding: Optional[Union[TensorType, Dict]] = None
) -> Dict[str, TensorType]:
"""Computes and returns the colors. Returns output field values.
Args:
ray_samples: Samples locations to compute outputs.
density_embedding: Density embeddings to condition on.
"""
def forward(self, ray_samples: RaySamples):
"""Evaluates the field at points along the ray.
Args:
ray_samples: Samples to evaluate field on.
"""
density, density_embedding = self.get_density(ray_samples)
field_outputs = self.get_outputs(ray_samples, density_embedding=density_embedding)
field_outputs['density'] = density # type: ignore
return field_outputs
# Path: nerfstudio/field_components/factor_field.py
class FactorField(nn.Module):
'''
This field is on CUDA device and directly used in the computation graph.
'''
@staticmethod
def factory(total_num: int, feat_dim: int, size: tuple):
CONSOLE.log(f'using CUDA fields for factors. ')
return CUDAField(total_num, feat_dim, size)
def register_optimizer_params(self, optimizer: torch.optim.Optimizer):
pass
@abstractmethod
def upsample(self, target_size: tuple):
pass
def forward(self):
raise NotImplementedError('this is not called, only for inheritance. ')
@abstractmethod
def load(self, path: str):
raise NotImplementedError('load method not implemented. ')
@abstractmethod
def save(self, path: str):
raise NotImplementedError('save method not implemented. ')
@abstractmethod
def grid_sample(self, x, y, camera_indices, **kwargs):
pass
@abstractmethod
def get_cuda_fields(self):
pass
def save_cpu_fields(self, step: int, checkpoint_dir: Path, name: str):
pass
def load_cpu_fields(self, step: int, checkpoint_dir: Path, name: str):
pass
@abstractmethod
def check_field_number_consistency(self, probe_num: int):
pass
# Path: nerfstudio/field_components/factor_field.py
def sample_basis_factor_features(
num_near_basis: int,
field: FactorField,
probes: Probes,
positions: torch.Tensor,
camera_indices: torch.Tensor,
freq_theta: float,
freq_phi: float
):
'''
Get factor features from the basis factor fields via bilinear interpolation sampling.
'''
index_near_basis, pos_near_basis = probes.get_nearby_basis_index_and_pos(camera_indices[:, 0], num_near_basis) # (N_rays, num_near), (N_rays, num_near_basis, 3)
index_near_basis = index_near_basis.unsqueeze(dim=1).repeat(1, positions.shape[1], 1)
# compute theta and phi
displacement = positions[:, :, None, :] - pos_near_basis[:, None, :, :] # (N_rays, N_samples, num_near_basis, 3)
direction = F.normalize(displacement, dim=-1, eps=1e-6)
theta = torch.acos(direction[..., 2]) # [0, pi]
phi = torch.atan2(direction[..., 1], direction[..., 0])
# frequency warping and rescaling
theta = apply_theta_wraping(theta, freq_theta)
phi = apply_phi_wraping(phi, freq_phi)
theta = rescale(theta, 0, np.pi, -1, 1)
phi = rescale(phi, -np.pi, np.pi, -1, 1)
# sample features
features = field.grid_sample(phi, theta, index_near_basis, align_corners=True, mode='bilinear', padding_mode='zeros') # N_rays, N_samples, num_near_basis, feat_dim
ret = {}
ret['features'] = features
return ret
# Path: nerfstudio/field_components/factor_field.py
def sample_core_factor_features(
num_near_core: int,
field_angular: FactorField,
field_radial: FactorField,
probes: Probes,
positions: torch.Tensor,
camera_indices: torch.Tensor,
return_combined: bool = True,
):
'''
Get factor features from the core factor fields via bilinear interpolation sampling.
'''
index_near_core, pos_near_core = probes.get_nearby_core_index_and_pos(camera_indices[:, 0], num_near_core) # (N_rays, num_near_core), (N_rays, num_near_core, 3)
index_near_core = index_near_core.unsqueeze(dim=1).repeat(1, positions.shape[1], 1)
# compute theta, phi and t
displacement = positions[:, :, None, :] - pos_near_core[:, None, :, :] # (N_rays, N_samples, num_near_core, 3)
direction = F.normalize(displacement, dim=-1)
theta = torch.acos(direction[..., 2])
phi = torch.atan2(direction[..., 1], direction[..., 0])
t = torch.norm(displacement, dim=-1)
# rescaling
theta = rescale(theta, 0, np.pi, -1, 1)
phi = rescale(phi, -np.pi, np.pi, -1, 1)
t = disparity_warping(t) * 2 - 1.0 # z-axis, range (-1, 1)
t = t.clamp(-1.0, 1.0)
angular_features = field_angular.grid_sample(x=phi, y=theta, camera_indices=index_near_core, mode='bilinear', padding_mode='zeros', align_corners=True) # N_rays, N_samples, num_near_core, feat_dim
center = torch.zeros_like(phi, device=phi.device)
radial_features = field_radial.grid_sample(x=t, y=center, camera_indices=index_near_core, mode='bilinear', padding_mode='zeros', align_corners=True) # N_rays, N_samples, num_near_core, feat_dim
core_features = angular_features * radial_features
ret = {}
if return_combined:
ret['features'] = core_features
else:
ret['angular_features'] = angular_features
ret['radial_features'] = radial_features
return ret
# Path: nerfstudio/field_components/rsh.py
def rsh_cart_3(xyz: torch.Tensor):
"""Computes all real spherical harmonics up to degree 3.
This is an autogenerated method. See
https://github.com/cheind/torch-spherical-harmonics
for more information.
Params:
xyz: (N,...,3) tensor of points on the unit sphere
Returns:
rsh: (N,...,16) real spherical harmonics
projections of input. Ynm is found at index
`n*(n+1) + m`, with `0 <= n <= degree` and
`-n <= m <= n`.
"""
x = xyz[..., 0]
y = xyz[..., 1]
z = xyz[..., 2]
x2 = x**2
y2 = y**2
z2 = z**2
xy = x * y
xz = x * z
yz = y * z
return torch.stack(
[
xyz.new_tensor(0.282094791773878).expand(xyz.shape[:-1]),
-0.48860251190292 * y,
0.48860251190292 * z,
-0.48860251190292 * x,
1.09254843059208 * xy,
-1.09254843059208 * yz,
0.94617469575756 * z2 - 0.31539156525252,
-1.09254843059208 * xz,
0.54627421529604 * x2 - 0.54627421529604 * y2,
-0.590043589926644 * y * (3.0 * x2 - y2),
2.89061144264055 * xy * z,
0.304697199642977 * y * (1.5 - 7.5 * z2),
1.24392110863372 * z * (1.5 * z2 - 0.5) - 0.497568443453487 * z,
0.304697199642977 * x * (1.5 - 7.5 * z2),
1.44530572132028 * z * (x2 - y2),
-0.590043589926644 * x * (x2 - 3.0 * y2),
],
-1,
)
# Path: nerfstudio/cameras/rays.py
class RaySamples(TensorDataclass):
"""Samples along a ray"""
frustums: Frustums
"""Frustums along ray."""
camera_indices: Optional[TensorType["bs":..., 1]] = None
"""Camera index."""
deltas: Optional[TensorType["bs":..., 1]] = None
""""width" of each sample."""
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_to_euclidean_fn: Optional[Callable] = None
"""Function to convert bins to euclidean distance."""
metadata: Optional[Dict[str, TensorType["bs":..., "latent_dims"]]] = None
"""addtional information relevant to generating ray samples"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
probes: Optional[Probes] = None
"""Probe Object. This object doesn't follow the same shape pattern as the other fields.
Lazy broadcasting is used for preventing CUDA memory overflow. """
def get_weights(self, densities: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
return weights
def get_weights_and_transmittance(
self, densities: TensorType[..., "num_samples", 1]
) -> Tuple[TensorType[..., "num_samples", 1], TensorType[..., "num_samples", 1]]:
"""Return weights and transmittance based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights and transmittance for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
return weights, transmittance
def get_weights_from_alphas(self, alphas: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
return weights
def get_weights_and_transmittance_from_alphas(
self, alphas: TensorType[..., "num_samples", 1]
) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
return weights, transmittance
# Path: nerfstudio/fields/nelfpro_field.py
from rich.progress import Console
from typing import Dict, Optional, Union
from torch import nn
from torchtyping import TensorType
from nerfstudio.field_components.mlp import MLP
from nerfstudio.field_components.embedding import Embedding
from nerfstudio.field_components.activations import trunc_exp
from nerfstudio.fields.base_field import Field
from nerfstudio.field_components.factor_field import (
FactorField,
sample_basis_factor_features,
sample_core_factor_features,
)
from nerfstudio.field_components.rsh import rsh_cart_3
from nerfstudio.cameras.rays import RaySamples
import torch
CONSOLE = Console(width=120)
class NeLFProField(Field):
def __init__(
self,
num_images,
num_basis: int,
near_basis: int,
dim_basis: int,
resolution_basis: int,
num_core: int,
near_core: int,
dim_core: int,
resolution_core_angular: int,
resolution_core_radial: int,
freq_theta: int,
freq_phi: int,
use_appearance_embedding: bool,
apperance_embedding_dim: int = 16,
num_layers_geometry: int = 2,
hidden_dim_geometry: int = 128,
geo_feat_dim: int = 31,
num_layers_color: int = 3,
hidden_dim_color: int = 64,
):
super().__init__()
# config for basis factor
self.num_basis = num_basis
self.near_basis = near_basis
self.dim_basis = dim_basis
self.resolution_basis = resolution_basis
# config for core factor
self.num_core = num_core
self.near_core = near_core
self.dim_core = dim_core
self.resolution_core_angular = resolution_core_angular
self.resolution_core_radial = resolution_core_radial
# config for geometry and color mlps
self.num_layers_geometry = num_layers_geometry
self.hidden_dim_geometry = hidden_dim_geometry
self.geo_feat_dim = geo_feat_dim
self.num_layers_color = num_layers_color
self.hidden_dim_color = hidden_dim_color
# config for frequency warping on basis factor fields
self.freq_theta = freq_theta
self.freq_phi = freq_phi
# config for apperance warping
self.use_appearance_embedding = use_appearance_embedding
self.num_images = num_images
self.field_basis = FactorField.factory(
total_num = self.num_basis,
feat_dim = self.dim_basis,
size = (self.resolution_basis, self.resolution_basis * 2),
)
self.field_core_angular = FactorField.factory(
total_num = self.num_core,
feat_dim = self.dim_core,
size = (self.resolution_core_angular, self.resolution_core_angular * 2),
)
self.field_core_radial = FactorField.factory(
| total_num = self.num_core, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CosmicLaca/ComfyUI_Primere_Nodes
# Path: Nodes/modules/latent_noise.py
class PowerLawNoise(nn.Module):
def __init__(self, device='cpu'):
super(PowerLawNoise, self).__init__()
self.device = device
@staticmethod
def get_noise_types():
return ["white", "blue", "brownian_fractal", "violet"]
def get_generator(self, noise_type):
if noise_type in self.get_noise_types():
if noise_type == "white":
return self.white_noise
elif noise_type == "blue":
return self.blue_noise
elif noise_type == "violet":
return self.violet_noise
elif noise_type == "brownian_fractal":
return self.brownian_fractal_noise
else:
raise ValueError(f"`noise_type` is invalid. Valid types are {', '.join(self.get_noise_types())}")
def set_seed(self, seed):
if seed is not None:
torch.manual_seed(seed)
def white_noise(self, batch_size, width, height, scale, seed, alpha=0.0, **kwargs):
self.set_seed(seed)
scale = scale
noise_real = torch.randn((batch_size, 1, height, width), device=self.device)
noise_power_law = torch.sign(noise_real) * torch.abs(noise_real) ** alpha
noise_power_law *= scale
return noise_power_law.to(self.device)
def blue_noise(self, batch_size, width, height, scale, seed, alpha=2.0, **kwargs):
self.set_seed(seed)
noise = torch.randn(batch_size, 1, height, width, device=self.device)
freq_x = fft.fftfreq(width, 1.0)
freq_y = fft.fftfreq(height, 1.0)
Fx, Fy = torch.meshgrid(freq_x, freq_y, indexing="ij")
power = (Fx**2 + Fy**2)**(alpha / 2.0)
power[0, 0] = 1.0
power = power.unsqueeze(0).expand(batch_size, 1, width, height).permute(0, 1, 3, 2).to(device=self.device)
noise_fft = fft.fftn(noise)
power = power.to(noise_fft)
noise_fft = noise_fft / torch.sqrt(power)
noise_real = fft.ifftn(noise_fft).real
noise_real = noise_real - noise_real.min()
noise_real = noise_real / noise_real.max()
noise_real = noise_real * scale
return noise_real.to(self.device)
def violet_noise(self, batch_size, width, height, alpha=1.0, device='cpu', **kwargs):
white_noise = torch.randn((batch_size, 1, height, width), device=device)
violet_noise = torch.sign(white_noise) * torch.abs(white_noise) ** (alpha / 2.0)
violet_noise /= torch.max(torch.abs(violet_noise))
return violet_noise
def brownian_fractal_noise(self, batch_size, width, height, scale, seed, alpha=1.0, modulator=1.0, **kwargs):
def add_particles_to_grid(grid, particle_x, particle_y):
for x, y in zip(particle_x, particle_y):
grid[y, x] = 1
def move_particles(particle_x, particle_y):
dx = torch.randint(-1, 2, (batch_size, n_particles), device=self.device)
dy = torch.randint(-1, 2, (batch_size, n_particles), device=self.device)
particle_x = torch.clamp(particle_x + dx, 0, width - 1)
particle_y = torch.clamp(particle_y + dy, 0, height - 1)
return particle_x, particle_y
self.set_seed(seed)
n_iterations = int(5000 * modulator)
fy = fft.fftfreq(height).unsqueeze(1) ** 2
fx = fft.fftfreq(width) ** 2
f = fy + fx
power = torch.sqrt(f) ** alpha
power[0, 0] = 1.0
grid = torch.zeros(height, width, dtype=torch.uint8, device=self.device)
n_particles = n_iterations // 10
particle_x = torch.randint(0, int(width), (batch_size, n_particles), device=self.device)
particle_y = torch.randint(0, int(height), (batch_size, n_particles), device=self.device)
neighborhood = torch.tensor([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]], dtype=torch.uint8, device=self.device)
for _ in range(n_iterations):
add_particles_to_grid(grid, particle_x, particle_y)
particle_x, particle_y = move_particles(particle_x, particle_y)
brownian_tree = grid.clone().detach().float().to(self.device)
brownian_tree = brownian_tree / brownian_tree.max()
brownian_tree = F.interpolate(brownian_tree.unsqueeze(0).unsqueeze(0), size=(height, width), mode='bilinear', align_corners=False)
brownian_tree = brownian_tree.squeeze(0).squeeze(0)
fy = fft.fftfreq(height).unsqueeze(1) ** 2
fx = fft.fftfreq(width) ** 2
f = fy + fx
power = torch.sqrt(f) ** alpha
power[0, 0] = 1.0
noise_real = brownian_tree * scale
amplitude = 1.0 / (scale ** (alpha / 2.0))
noise_real *= amplitude
noise_fft = fft.fftn(noise_real.to(self.device))
noise_fft = noise_fft / power.to(self.device)
noise_real = fft.ifftn(noise_fft).real
noise_real *= scale
return noise_real.unsqueeze(0).unsqueeze(0)
def forward(self, batch_size, width, height, alpha=2.0, scale=1.0, modulator=1.0, noise_type="white", seed=None):
if noise_type not in self.get_noise_types():
raise ValueError(f"`noise_type` is invalid. Valid types are {', '.join(self.get_noise_types())}")
if seed is None:
seed = torch.randint(0, 2**32 - 1, (1,)).item()
channels = []
for i in range(3):
gen_seed = seed + i
random.seed(gen_seed)
noise = normalize(self.get_generator(noise_type)(batch_size, width, height, scale=scale, seed=gen_seed, alpha=alpha, modulator=modulator))
channels.append(noise)
noise_image = torch.cat((channels[0], channels[1], channels[2]), dim=1)
noise_image = (noise_image - noise_image.min()) / (noise_image.max() - noise_image.min())
noise_image = noise_image.permute(0, 2, 3, 1).float()
return noise_image.to(device="cpu")
# Path: Nodes/modules/adv_encode.py
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
tokenized = clip.tokenize(text, return_word_ids=True)
if isinstance(clip.cond_stage_model, (SDXLClipModel, SDXLRefinerClipModel, SDXLClipG)):
embs_l = None
embs_g = None
pooled = None
if 'l' in tokenized and isinstance(clip.cond_stage_model, SDXLClipModel):
embs_l, _ = advanced_encode_from_tokens(tokenized['l'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
w_max=w_max,
return_pooled=False)
if 'g' in tokenized:
embs_g, pooled = advanced_encode_from_tokens(tokenized['g'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_g),
w_max=w_max,
return_pooled=True,
apply_to_pooled=apply_to_pooled)
return prepareXL(embs_l, embs_g, pooled, clip_balance)
else:
return advanced_encode_from_tokens(tokenized['l'],
token_normalization,
weight_interpretation,
lambda x: (clip.encode_from_tokens({'l': x}), None),
w_max=w_max)
# Path: Nodes/modules/adv_encode.py
def advanced_encode_XL(clip, text1, text2, token_normalization, weight_interpretation, w_max=1.0, clip_balance=.5, apply_to_pooled=True):
tokenized1 = clip.tokenize(text1, return_word_ids=True)
tokenized2 = clip.tokenize(text2, return_word_ids=True)
embs_l, _ = advanced_encode_from_tokens(tokenized1['l'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_l),
w_max=w_max,
return_pooled=False)
embs_g, pooled = advanced_encode_from_tokens(tokenized2['g'],
token_normalization,
weight_interpretation,
lambda x: encode_token_weights(clip, x, encode_token_weights_g),
w_max=w_max,
return_pooled=True,
apply_to_pooled=apply_to_pooled)
gcd_num = gcd(embs_l.shape[1], embs_g.shape[1])
repeat_l = int((embs_g.shape[1] / gcd_num) * embs_l.shape[1])
repeat_g = int((embs_l.shape[1] / gcd_num) * embs_g.shape[1])
return prepareXL(embs_l.expand((-1,repeat_l,-1)), embs_g.expand((-1,repeat_g,-1)), pooled, clip_balance)
# Path: Nodes/Dashboard.py
from ..components.tree import TREE_DASHBOARD
from ..components.tree import PRIMERE_ROOT
from .modules.latent_noise import PowerLawNoise
from .modules.adv_encode import advanced_encode, advanced_encode_XL
from nodes import MAX_RESOLUTION
from ..components import utility
from pathlib import Path
from ..components import hypernetwork
from ..utils import comfy_dir
import comfy.samplers
import folder_paths
import nodes
import torch
import torch.nn.functional as F
import random
import os
import tomli
import re
import requests
import comfy.sd
import comfy.utils
import comfy_extras.nodes_model_advanced as nodes_model_advanced
"lycoris_keyword_selection": (["Select in order", "Random select"], {"default": "Select in order"}),
"lycoris_keywords_num": ("INT", {"default": 1, "min": 1, "max": 50, "step": 1}),
"lycoris_keyword_weight": ("FLOAT", {"default": 1.0, "min": 0, "max": 10.0, "step": 0.1}),
}
}
def load_networks(self, model, clip, positive_prompt, process_lora, process_lycoris, process_hypernetwork, copy_weight_to_clip, lora_clip_custom_weight, lycoris_clip_custom_weight, use_lora_keyword, use_lycoris_keyword, lora_keyword_placement, lycoris_keyword_placement, lora_keyword_selection, lycoris_keyword_selection, lora_keywords_num, lycoris_keywords_num, lora_keyword_weight, lycoris_keyword_weight, hypernetwork_safe_load = True):
NETWORK_START = []
cloned_model = model
cloned_clip = clip
list_of_keyword_items = []
lora_keywords_num_set = lora_keywords_num
lycoris_keywords_num_set = lycoris_keywords_num
model_lora_keyword = [None, None]
model_lyco_keyword = [None, None]
lora_stack = []
lycoris_stack = []
hnet_stack = []
HypernetworkList = folder_paths.get_filename_list("hypernetworks")
LoraList = folder_paths.get_filename_list("loras")
LYCO_DIR = os.path.join(comfy_dir, 'models', 'lycoris')
folder_paths.add_model_folder_path("lycoris", LYCO_DIR)
LyCORIS = folder_paths.get_filename_list("lycoris")
LycorisList = folder_paths.filter_files_extensions(LyCORIS, ['.ckpt', '.safetensors'])
if process_lora == True:
NETWORK_START.append('<lora:')
if process_lycoris == True:
NETWORK_START.append('<lyco:')
if process_hypernetwork == True:
NETWORK_START.append('<hypernet:')
if len(NETWORK_START) == 0:
return (model, clip, lora_stack, lycoris_stack, hnet_stack, model_lora_keyword, model_lyco_keyword)
else:
NETWORK_END = ['>'] + NETWORK_START
NETWORK_TUPLE = utility.get_networks_prompt(NETWORK_START, NETWORK_END, positive_prompt)
if (len(NETWORK_TUPLE) == 0):
return (model, clip, lora_stack, lycoris_stack, hnet_stack, model_lora_keyword, model_lyco_keyword)
else:
for NETWORK_DATA in NETWORK_TUPLE:
NetworkName = NETWORK_DATA[0]
try:
NetworkStrenght = float(NETWORK_DATA[1])
except ValueError:
NetworkStrenght = 1
NetworkType = NETWORK_DATA[2]
if (process_lora == True and NetworkType == 'LORA'):
lora_name = utility.get_closest_element(NetworkName, LoraList)
if lora_name is not None:
lora_path = folder_paths.get_full_path("loras", lora_name)
lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
if (copy_weight_to_clip == True):
lora_clip_custom_weight = NetworkStrenght
lora_stack.append([lora_name, NetworkStrenght, lora_clip_custom_weight])
cloned_model, cloned_clip = comfy.sd.load_lora_for_models(cloned_model, cloned_clip, lora, NetworkStrenght, lora_clip_custom_weight)
if use_lora_keyword == True:
ModelKvHash = utility.get_model_hash(lora_path)
if ModelKvHash is not None:
KEYWORD_PATH = os.path.join(PRIMERE_ROOT, 'front_end', 'keywords', 'lora-keyword.txt')
keywords = utility.get_model_keywords(KEYWORD_PATH, ModelKvHash, lora_name)
if keywords is not None and keywords != "":
if keywords.find('|') > 1:
keyword_list = [word.strip() for word in keywords.split('|')]
keyword_list = list(filter(None, keyword_list))
if (len(keyword_list) > 0):
lora_keywords_num = lora_keywords_num_set
keyword_qty = len(keyword_list)
if (lora_keywords_num > keyword_qty):
lora_keywords_num = keyword_qty
if lora_keyword_selection == 'Select in order':
list_of_keyword_items.extend(keyword_list[:lora_keywords_num])
else:
list_of_keyword_items.extend(random.sample(keyword_list, lora_keywords_num))
else:
list_of_keyword_items.append(keywords)
if len(list_of_keyword_items) > 0:
if lora_keyword_selection != 'Select in order':
random.shuffle(list_of_keyword_items)
list_of_keyword_items = list(set(list_of_keyword_items))
keywords = ", ".join(list_of_keyword_items)
if (lora_keyword_weight != 1):
keywords = '(' + keywords + ':' + str(lora_keyword_weight) + ')'
model_lora_keyword = [keywords, lora_keyword_placement]
if (process_lycoris == True and NetworkType == 'LYCORIS'):
lycoris_name = utility.get_closest_element(NetworkName, LycorisList)
if lycoris_name is not None:
lycoris_path = folder_paths.get_full_path("lycoris", lycoris_name)
lycoris = comfy.utils.load_torch_file(lycoris_path, safe_load=True)
if (copy_weight_to_clip == True):
lycoris_clip_custom_weight = NetworkStrenght
lycoris_stack.append([lycoris_name, NetworkStrenght, lycoris_clip_custom_weight])
cloned_model, cloned_clip = comfy.sd.load_lora_for_models(cloned_model, cloned_clip, lycoris, NetworkStrenght, lycoris_clip_custom_weight)
if use_lycoris_keyword == True:
ModelKvHash = utility.get_model_hash(lycoris_path)
if ModelKvHash is not None:
KEYWORD_PATH = os.path.join(PRIMERE_ROOT, 'front_end', 'keywords', 'lora-keyword.txt')
keywords = utility.get_model_keywords(KEYWORD_PATH, ModelKvHash, lycoris_name)
if keywords is not None and keywords != "":
if keywords.find('|') > 1:
keyword_list = [word.strip() for word in keywords.split('|')]
keyword_list = list(filter(None, keyword_list))
if (len(keyword_list) > 0):
lycoris_keywords_num = lycoris_keywords_num_set
keyword_qty = len(keyword_list)
if (lycoris_keywords_num > keyword_qty):
lycoris_keywords_num = keyword_qty
| if lycoris_keyword_selection == 'Select in order': |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: amazon-science/c2f-seg
# Path: data/dataloader_vqgan.py
def load_dataset(args, config):
if args.dataset=="KINS":
train_dataset = KINS_VQ_dataset(config, mode='train')
val_dataset = KINS_VQ_dataset(config, mode='test')
elif args.dataset=="MOViD_A":
train_dataset = Movid_A_VQ_Dataset(config, mode="train")
val_dataset = Movid_A_VQ_Dataset(config, mode="test")
elif args.dataset=="COCOA":
train_dataset = COCOA_VQ_dataset(config, mode="train")
val_dataset = COCOA_VQ_dataset(config, mode="test")
elif args.dataset=="Fishbowl":
train_dataset = FishBowl_VQ_Dataset(config, mode="train")
val_dataset = FishBowl_VQ_Dataset(config, mode="test")
return train_dataset, val_dataset
# Path: utils/evaluation.py
def get_IoU(pt_mask, gt_mask):
# pred_mask [N, Image_W, Image_H]
# gt_mask [N, Image_W, Image_H]
SMOOTH = 1e-10
intersection = (pt_mask & gt_mask).sum((-1, -2)).to(torch.float32) # [N, 1]
union = (pt_mask | gt_mask).sum((-1, -2)).to(torch.float32) # [N, 1]
iou = (intersection + SMOOTH) / (union + SMOOTH) # [N, 1]
return iou
# Path: utils/logger.py
def setup_logger(work_dir=None, logfile_name='log.txt', logger_name='log'):
"""Sets up logger from target work directory.
The function will sets up a logger with `DEBUG` log level. Two handlers will
be added to the logger automatically. One is the `sys.stdout` stream, with
`INFO` log level, which will print improtant messages on the screen. The other
is used to save all messages to file `$WORK_DIR/$LOGFILE_NAME`. Messages will
be added time stamp and log level before logged.
NOTE: If `work_dir` or `logfile_name` is empty, the file stream will be
skipped.
Args:
work_dir: The work directory. All intermediate files will be saved here.
(default: None)
logfile_name: Name of the file to save log message. (default: `log.txt`)
logger_name: Unique name for the logger. (default: `logger`)
Returns:
A `logging.Logger` object.
Raises:
SystemExit: If the work directory has already existed, of the logger with
specified name `logger_name` has already existed.
"""
logger = logging.getLogger(logger_name)
formatter = logging.Formatter("[%(asctime)s][%(levelname)s] %(message)s")
if not logger.handlers:
logger.setLevel(logging.DEBUG)
# Print log message with `INFO` level or above onto the screen.
sh = logging.StreamHandler(stream=sys.stdout)
# sh.setLevel(logging.INFO)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.propagate = False
if not work_dir or not logfile_name:
return logger
if os.path.exists(work_dir):
print(f'Work directory `{work_dir}` has already existed!')
os.makedirs(work_dir, exist_ok=True)
# Save log message with all levels in log file.
fh = logging.FileHandler(os.path.join(work_dir, logfile_name))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
# Path: utils/utils.py
class Config(object):
def __init__(self, config_path):
with open(config_path, 'r') as f:
self._yaml = f.read()
self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)
self._dict['path'] = os.path.dirname(config_path)
def __getattr__(self, name):
if self._dict.get(name) is not None:
return self._dict[name]
return None
def print(self):
print('Model configurations:')
print('---------------------------------')
print(self._yaml)
print('')
print('---------------------------------')
print('')
# Path: utils/utils.py
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, max_iters=None, width=25, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.max_iters = max_iters
self.iters = 0
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
self._values[k] = v
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
# if self.target is not None and current < self.target:
if self.max_iters is None or self.iters < self.max_iters:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60,
eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
if 'lr' in k:
info += ' %.3e' % self._values[k]
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.iters += 1
self.update(self._seen_so_far + n, values)
# Path: utils/utils.py
def to_cuda(meta, device):
for k in meta:
if meta[k] is not None:
meta[k] = meta[k].to(device)
return meta
# Path: utils/utils.py
def stitch_images(inputs, outputs, img_per_row=2, mode="L"):
gap = 5
columns = len(outputs) + 1
height, width = inputs[0][:, :, 0].shape
img = Image.new(mode,
(width * img_per_row * columns + gap * (img_per_row - 1), height * int(len(inputs) / img_per_row)))
images = [inputs] + outputs
for ix in range(len(inputs)):
xoffset = int(ix % img_per_row) * width * columns + int(ix % img_per_row) * gap
yoffset = int(ix / img_per_row) * height
for cat in range(len(images)):
im = (np.array((images[cat][ix]).cpu())*255).astype(np.uint8).squeeze()
im = Image.fromarray(im, mode)
img.paste(im, (xoffset + cat * width, yoffset))
return img
# Path: utils/utils.py
def get_lr_schedule_with_steps(decay_type, optimizer, drop_steps=None, gamma=None, total_steps=None):
def lr_lambda(current_step):
if decay_type == 'fix':
return 1.0
elif decay_type == 'linear':
return 1.0 * (current_step / total_steps)
elif decay_type == 'cos':
return 1.0 * (math.cos((current_step / total_steps) * math.pi) + 1) / 2
elif decay_type == 'milestone':
return 1.0 * math.pow(gamma, int(current_step / drop_steps))
else:
raise NotImplementedError
return LambdaLR(optimizer, lr_lambda)
# Path: utils/utils.py
def torch_init_model(model, init_checkpoint, key):
state_dict = torch.load(init_checkpoint, map_location='cpu')[key]
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='')
print("missing keys:{}".format(missing_keys))
print('unexpected keys:{}'.format(unexpected_keys))
print('error msgs:{}'.format(error_msgs))
# Path: taming_src/vqperceptual.py
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(self, config):
super().__init__()
disc_start = config['disc_start']
codebook_weight = config['codebook_weight']
pixelloss_weight = 1.0
disc_num_layers = config['disc_num_layers']
disc_in_channels = config['disc_in_channels']
disc_factor = 1.0
disc_weight = config['disc_weight']
perceptual_weight = 1.0
use_actnorm = False
disc_conditional = config['disc_conditional']
disc_ndf = 64
disc_loss="hinge"
assert disc_loss in ["hinge", "vanilla"]
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
ndf=disc_ndf).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
global_step, cond=None, split="train"):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
# nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
nll_loss = torch.mean(nll_loss)
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = -torch.mean(logits_fake)
# try:
# d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
# except RuntimeError:
# assert not self.training
# d_weight = torch.tensor(0.0)
# d_weight = torch.tensor(1.0)
# with torch.no_grad():
# d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
# disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
# loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
return nll_loss, g_loss, self.codebook_weight * codebook_loss.mean()
# log = [("{}/total_loss".format(split), loss.clone().detach().mean().item()),
# ("{}/quant_loss".format(split), codebook_loss.detach().mean().item()),
# ("{}/nll_loss".format(split), nll_loss.detach().mean().item()),
# ("{}/rec_loss".format(split), rec_loss.detach().mean().item()),
# ("{}/p_loss".format(split), p_loss.detach().mean().item()),
# ("{}/d_weight".format(split), d_weight.detach().item()),
# ("{}/disc_factor".format(split), disc_factor),
# ("{}/g_loss".format(split), g_loss.detach().mean().item())]
# return loss#, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = [("{}/disc_loss".format(split), d_loss.clone().detach().mean().item()),
("{}/logits_real".format(split), logits_real.detach().mean().item()),
("{}/logits_fake".format(split), logits_fake.detach().mean().item())]
return d_loss#, log
# Path: taming_src/vqperceptual.py
def adopt_weight(weight, global_step, threshold=0, value=0.):
if global_step < threshold:
weight = value
return weight
# Path: taming_src/taming_models.py
class VQModel(nn.Module):
def __init__(self, config):
super(VQModel, self).__init__()
self.config = config
self.iteration = 0
self.name = config.model_type
self.m_path = os.path.join(config.path, self.name)
self.eps = 1e-6
self.ddconfig = config.model['params']['ddconfig']
n_embed = config.model['params']['n_embed']
embed_dim = config.model['params']['embed_dim']
self.encoder = Encoder(self.ddconfig).to(config.device)
self.decoder = Decoder(self.ddconfig).to(config.device)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25).to(config.device).to(config.device)
self.quant_conv = torch.nn.Conv2d(self.ddconfig["z_channels"], embed_dim, 1).to(config.device)
# self.quant_proj = torch.nn.Linear(self.ddconfig["z_channels"], embed_dim).to(config.device)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, self.ddconfig["z_channels"], 1).to(config.device)
# self.pose_quant_proj = torch.nn.Linear(embed_dim, self.ddconfig["z_channels"]).to(config.device)
def encode(self, x, mask=None):
h = self.encoder(x) # dim=256
h = self.quant_conv(h) # dim=256
if mask is not None:
mask = F.max_pool2d(mask, kernel_size=int(mask.shape[2] / h.shape[2]),
stride=int(mask.shape[2] / h.shape[2]))
quant = quant * mask + h * (1 - mask)
quant, emb_loss, info = self.quantize(h, mask)
return quant, emb_loss, info
def decode(self, quant):
quant = self.post_quant_conv(quant) # dim: 256
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, x, mask=None):
quant, diff, _ = self.encode(x, mask) # quant dim: 256
dec = self.decode(quant)
return dec, diff
def get_last_layer(self):
return self.decoder.conv_out.weight
def restore(self, ckpt_file, g_opt=None, d_opt=None):
torch_init_model(self, ckpt_file, "state_dict")
saving = torch.load(ckpt_file, map_location='cpu')
if 'optimizer_states' in saving and g_opt is not None and d_opt is not None:
opt_state = saving['optimizer_states']
g_opt.load_state_dict(opt_state[0])
d_opt.load_state_dict(opt_state[1])
print(f"Restored from {ckpt_file}")
return g_opt, d_opt
def save(self, prefix=None, g_opt=None, d_opt=None):
if prefix is not None:
save_path = self.m_path + "_{}.pth".format(prefix)
else:
save_path = self.m_path + ".pth"
print('\nsaving {} {}...\n'.format(self.name, prefix))
all_saving = {'state_dict': self.state_dict(),
'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}
torch.save(all_saving, save_path)
# Path: train_vq.py
import os
import cv2
import random
import numpy as np
import torch
import argparse
import time
from shutil import copyfile
from torch.utils.data import DataLoader
from data.dataloader_vqgan import load_dataset
from utils.evaluation import get_IoU
from utils.logger import setup_logger
from utils.utils import Config, Progbar, to_cuda, stitch_images
from utils.utils import get_lr_schedule_with_steps, torch_init_model
from taming_src.vqperceptual import VQLPIPSWithDiscriminator, adopt_weight
from taming_src.taming_models import VQModel
def restore(ckpt_file, g_model, d_model, g_opt, d_opt):
torch_init_model(g_model, ckpt_file, "g_model")
torch_init_model(d_model, ckpt_file, "d_model")
saving = torch.load(ckpt_file, map_location='cpu')
# if 'optimizer_states' in saving:
# opt_state = saving['optimizer_states']
# # print(opt_state[0])
# g_opt.load_state_dict(opt_state[0])
# d_opt.load_state_dict(opt_state[1])
print(f"Restored from {ckpt_file}")
return g_opt, d_opt
def save(g_model, d_model, m_path, prefix=None, g_opt=None, d_opt=None):
if prefix is not None:
save_path = m_path + "_{}.pth".format(prefix)
else:
save_path = m_path + ".pth"
print('\nsaving {}...\n'.format(save_path))
all_saving = {'g_model': g_model.state_dict(),
'd_model': d_model.state_dict(),
'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}
torch.save(all_saving, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True, help='model checkpoints path')
parser.add_argument('--finetune_path', type=str, required=False, default=None)
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--learn_type', default="mask", type=str)
parser.add_argument('--check_point_path', default="../check_points", type=str)
parser.add_argument('--dataset', default="Kins", type=str)
args = parser.parse_args()
args.path = os.path.join(args.check_point_path, args.path)
config_path = os.path.join(args.path, 'vqgan_{}.yml'.format(args.dataset))
# create checkpoints path if does't exist
if not os.path.exists(args.path):
os.makedirs(args.path)
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('configs/vqgan_{}.yml'.format(args.dataset), config_path)
# load config file
config = Config(config_path)
config.path = args.path
# cuda visble devices
local_rank = 0
log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
if local_rank == 0:
logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file)
for k in config._dict:
logger.info("{}:{}".format(k, config._dict[k]))
else:
logger = None
# save samples and eval pictures
os.makedirs(os.path.join(args.path, 'samples'), exist_ok=True)
# os.makedirs(os.path.join(args.path, 'eval'), exist_ok=True)
# init device
if torch.cuda.is_available():
config.device = torch.device("cuda")
torch.backends.cudnn.benchmark = True # cudnn auto-tuner
else:
config.device = torch.device("cpu")
cv2.setNumThreads(0)
# initialize random seed
torch.manual_seed(config.seed)
np.random.seed(config.seed)
random.seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
train_dataset, val_dataset = load_dataset(args, config)
train_loader = DataLoader(
| dataset=train_dataset, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: alipay/PainlessInferenceAcceleration
# Path: pia/lookahead/common/lookahead_cache.py
class LookaheadCache():
def __init__(self, debug=False, eos=2, stop_words=None, max_node=512, max_output_node=256):
self.debug = debug
self.eos = eos
self.max_node = max_node
self.max_output_node = max_output_node
self.mem = {}
self._output_ids = defaultdict(list)
self._update_trees = set()
self._update_input_trees = set()
self.stop_words = stop_words if stop_words is not None else {}
self.default_mask = np.ones((1, 1), dtype=np.int64)
def put(self, token_ids, branch_length=8, final=False, mode='output', idx=-1):
if self.eos in token_ids:
token_ids = token_ids[:token_ids.index(self.eos)]
if len(token_ids) >= 2:
ts = len(token_ids) # ts: token_ids size
for i in range(ts - 1):
token_id = token_ids[i]
tup = token_ids[i + 1:i + branch_length + 1]
if self.debug:
print(f'input token:{token_id} tokens:{tup}')
tree = self.mem.get(token_id, None)
if tree is not None:
tree.put(tup, mode=mode, idx=idx)
else:
tree = Tree(token_id, max_node=self.max_node, max_output_node=self.max_output_node)
tree.put(tup, mode=mode, idx=idx)
self.mem[token_id] = tree
self._update_trees.add(tree)
if mode == 'input':
self._update_input_trees.add(tree)
if final:
self.reset_input_freqs()
self.squeeze_branch_counts()
def stream_put(self, token_ids, branch_length=8, final=False, mode='output', idx=0):
# idx is only used for caching output_ids
assert mode == 'output' and idx >= 0
if self.eos in token_ids:
token_ids = token_ids[:token_ids.index(self.eos)]
self._output_ids[idx].extend(token_ids)
output_ids = self._output_ids[idx]
ts = len(output_ids)
min_branch_length = 1 if final else branch_length
if ts > min_branch_length:
for i in range(ts - min_branch_length):
token_id = output_ids[i]
tup = output_ids[i + 1:i + branch_length + 1]
if self.debug:
print(f'input token:{token_id} tokens:{tup}')
tree = self.mem.get(token_id, None)
if tree:
tree.put(tup, mode='output', idx=-1)
else:
tree = Tree(token_id, max_node=self.max_node, max_output_node=self.max_output_node)
tree.put(tup, mode='output', idx=-1)
self.mem[token_id] = tree
self._update_trees.add(tree)
if not final:
self._output_ids[idx] = output_ids[ts - branch_length:]
if final:
self._output_ids[idx] = []
self.reset_input_freqs()
self.squeeze_branch_counts()
def hier_get(self, token_ids, decoding_length=64, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',
idx=0):
assert mode in ('input', 'output', 'mix')
decoding_masks = self.default_mask
if decoding_length <= 1 or branch_length == 0:
return token_ids[-1:], decoding_masks, []
decoding_ids = None
sizes = [0, 0]
match_count = len(token_ids)
for i, t in enumerate(token_ids):
tree = self.mem.get(t, None)
if tree is not None:
ids = token_ids[i + 1:]
if t in self.stop_words and len(ids) == 0:
continue
decoding_ids, decoding_masks, sizes = tree.get(ids,
max_size=decoding_length,
max_length=branch_length,
min_input_size=min_input_size,
min_output_size=min_output_size,
mode=mode,
idx=idx)
s = len(decoding_ids)
match_count = len(token_ids) - i
# token count is enough, not need retrieve again
if s >= branch_length:
break
if decoding_ids is None:
decoding_ids = token_ids[-1:]
return decoding_ids, decoding_masks, sizes
def par_get(self, token_ids, decoding_length=16, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',
idx=0):
output_ids, decoding_masks, decoding_lengths = self.trie_get(token_ids,
decoding_length=decoding_length,
branch_length=branch_length,
min_input_size=min_input_size,
min_output_size=min_output_size,
mode=mode,
idx=idx)
sets = []
true_decoding_length = len(output_ids) - 1
for i in range(true_decoding_length, 0, -1):
indices, = np.nonzero(decoding_masks[i, 1:])
indices = set(indices)
flag = True
for ss in sets:
if len(indices - ss) == 0:
flag = False
break
if flag:
sets.append(indices)
sets.reverse()
count = 0
max_decoding_length = true_decoding_length
branches = []
for indices in sets:
indices = sorted(list(indices))
rest_count = max_decoding_length - count
indices = indices[:rest_count]
count += len(indices)
branch = []
for i in indices:
branch.append(output_ids[i + 1])
branches.append(branch)
if count >= max_decoding_length:
break
ids = [output_ids[0]]
masks = np.tril(np.ones((count + 1, count + 1)), 0)
count = 1
for branch in branches:
ids.extend(branch)
length = len(branch)
masks[count:count + length, 1:count] = 0
count += length
return ids, masks, [count - 1]
def one_get(self, token_ids, decoding_length=64, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',
idx=0):
assert mode in ('input', 'output', 'mix')
decoding_masks = self.default_mask
if decoding_length <= 1 or branch_length == 0:
return token_ids[-1:], decoding_masks, []
decoding_ids = None
sizes = [0, 0]
for i, t in enumerate(token_ids):
tree = self.mem.get(t, None)
if tree is not None:
ids = token_ids[i + 1:]
if t in self.stop_words and len(ids) == 0:
continue
decoding_ids, decoding_masks, sizes = tree.get_one_branch(ids,
max_length=branch_length,
mode=mode,
idx=idx)
s = len(decoding_ids)
# token count is enough, not need retrieve again
if s >= branch_length // 2:
break
print(f'{decoding_ids=}')
if decoding_ids is None:
decoding_ids = token_ids[-1:]
return decoding_ids, decoding_masks, sizes
def bat_get(self, token_id_list, decoding_length=64, branch_length=8, decoding_cursors=None, mode='output',
indices=None, decoding_mode='hier'):
assert mode in ('input', 'output', 'mix')
assert decoding_mode in ('hier', 'one')
bs = len(token_id_list)
assert bs == len(decoding_cursors) and bs == len(indices), f'{bs=} {len(decoding_cursors)=} {len(indices)=}'
decoding_id_list = []
decoding_mask_list = []
size_list = []
min_cur = min(decoding_cursors)
max_cur = max(decoding_cursors)
bs = len(decoding_cursors)
for sub_idx, token_ids in enumerate(token_id_list):
update_decoding_length = decoding_length // bs
min_input_size = 0
min_output_size = max(update_decoding_length // 2, 1)
method_name = decoding_mode + '_get'
decoding_ids, decoding_masks, sizes = getattr(self, method_name)(token_ids,
decoding_length=update_decoding_length,
branch_length=branch_length,
min_input_size=min_input_size,
min_output_size=min_output_size,
mode=mode,
idx=indices[sub_idx])
decoding_id_list.append(decoding_ids)
decoding_mask_list.append(decoding_masks)
size_list.append(sizes)
bs = len(token_id_list)
max_size = max([len(x) for x in decoding_id_list])
decoding_masks = np.zeros((bs, max_size, max_cur - min_cur + max_size), dtype=np.int64)
for i, decoding_ids in enumerate(decoding_id_list):
org_size = len(decoding_ids)
gap = max_size - org_size
if gap > 0:
decoding_ids.extend([self.eos] * gap)
cur = decoding_cursors[i]
decoding_masks[i, :org_size, cur - min_cur:cur - min_cur + org_size] = decoding_mask_list[i]
decoding_masks[i, :, :cur - min_cur + 1] = 1
return decoding_id_list, decoding_masks, size_list
def fresh(self):
self.mem = {}
def reset_input_freqs(self):
if len(self._update_input_trees) > 0:
for c in self._update_input_trees:
c.reset_input_freq()
self._update_input_trees.clear()
def squeeze_branch_counts(self):
if len(self._update_trees) >= 1024:
for c in self._update_trees:
c.squeeze()
self._update_trees.clear()
def save_mem(self, save_mem_dir):
cache_mem = self.mem
serialized_object = pickle.dumps(cache_mem)
json_string = json.dumps(serialized_object.decode('latin-1'))
with open(save_mem_dir, 'w') as f:
json.dump(json_string, f)
def load_mem(self, load_mem_dir):
with open(load_mem_dir, 'r') as f:
json_string = json.load(f)
deserialized_object = pickle.loads(json.loads(json_string).encode('latin-1'))
cache_mem = deserialized_object
self.mem = cache_mem
# Path: pia/lookahead/common/lookahead_generation_utils.py
class GenerationMode(ExplicitEnum):
"""
Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.
"""
# Non-beam methods
CONTRASTIVE_SEARCH = "contrastive_search"
GREEDY_SEARCH = "greedy_search"
LOOKAHEAD_GENERATION = "lookahead_generation"
SAMPLE = "sample"
ASSISTED_GENERATION = "assisted_generation"
# Beam methods
BEAM_SEARCH = "beam_search"
BEAM_SAMPLE = "beam_sample"
CONSTRAINED_BEAM_SEARCH = "constrained_beam_search"
GROUP_BEAM_SEARCH = "group_beam_search"
# Path: pia/lookahead/common/lookahead_generation_utils.py
class LookaheadDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for
each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: torch.LongTensor = None
scores: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
kwargs: Optional[Dict] = None
# Path: pia/lookahead/common/pretrained_model_batch.py
import copy
import inspect
import time
import warnings
import numpy as np
import torch
import torch.distributed as dist
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from transformers import PreTrainedModel
from transformers.generation.beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from transformers.generation.beam_search import BeamSearchScorer, ConstrainedBeamSearchScorer
from transformers.generation.logits_process import (
LogitsProcessorList,
MinLengthLogitsProcessor,
)
from transformers.generation.stopping_criteria import (
MaxLengthCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
from transformers.generation.utils import (
GreedySearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput)
from transformers.generation.utils import (
GreedySearchOutput,
GenerateOutput)
from transformers.utils import ModelOutput, logging
from transformers.generation.configuration_utils import GenerationConfig
from pia.lookahead.common.lookahead_cache import LookaheadCache
from pia.lookahead.common.lookahead_generation_utils import GenerationMode, LookaheadDecoderOnlyOutput
lengths = np.sum(decoding_masks[ib, :, cur - min_cur:], axis=1) - 1
larr = np.concatenate([lengths[:-1][(lengths[1:] - lengths[:-1]) <= 0], lengths[-1:]], axis=0)
ls = ','.join(larr.astype(np.int32).astype(np.str_))
decoding_qids = decoding_kwargs['decoding_qids'][ib]
size_str = ','.join([str(x) for x in decoding_lengths[ib]])
print(
f'batch_index:{ib}/{bs} decoding_length:{len(decoding_ids_)} accept_length:{max_match_count} '
f'query:{decoding_qids} source:{size_str} lengths:{ls} index:{max_match_index} '
f'branch_token:{max_decoding_ids_slice} next_token:{max_next_token_slice}')
model_kwargs['next_tokens'] = torch.tensor(update_next_token_list, device=device)
model_kwargs['next_token_list'] = update_next_token_list
model_kwargs['next_tokens_scores'] = []
model_kwargs['input_ids'] = input_ids
decoding_kwargs['decoding_cursors'] = decoding_cursors
decoding_kwargs['dls'].extend(dls)
decoding_kwargs['edls'].extend(edls)
model_kwargs['decoding_kwargs'] = decoding_kwargs
return model_kwargs
def _early_stop(self,
unfinished_sequences,
output_ids,
batch_indices,
model_kwargs):
decoding_kwargs = model_kwargs['decoding_kwargs']
input_ids = model_kwargs['input_ids']
unfinished_sequence_list = unfinished_sequences.tolist()
unfinished_index_list = []
for i, (seq,) in enumerate(unfinished_sequence_list):
if seq == 0:
idx = batch_indices[i]
output_ids[idx, :input_ids.size(-1)] = input_ids[i]
else:
unfinished_index_list.append(i)
output_batch_indices = [batch_indices[i] for i in unfinished_index_list]
bs = input_ids.size(0)
finished_count = bs - len(unfinished_index_list)
if finished_count > 0 and bs > 1 and finished_count != bs:
unfinished_indices = torch.tensor(unfinished_index_list, device=unfinished_sequences.device)
unfinished_sequences = unfinished_sequences[unfinished_indices]
model_kwargs['input_ids'] = input_ids[unfinished_indices]
position_ids = model_kwargs.get('position_ids', None)
if position_ids is not None:
position_ids = position_ids[unfinished_indices]
model_kwargs['position_ids'] = position_ids
model_kwargs['attention_mask'] = model_kwargs['attention_mask'][unfinished_indices]
decoding_kwargs = model_kwargs['decoding_kwargs']
decoding_cursors = decoding_kwargs['decoding_cursors']
decoding_kwargs['decoding_cursors'] = [decoding_cursors[i] for i in unfinished_index_list]
batch_indices = decoding_kwargs['batch_indices']
decoding_kwargs['batch_indices'] = [batch_indices[i] for i in unfinished_index_list]
past_key_values = []
for kv in model_kwargs['past_key_values']:
k, v = kv
k = k[unfinished_indices]
v = v[unfinished_indices]
past_key_values.append((k, v))
model_kwargs['past_key_values'] = tuple(past_key_values)
return unfinished_sequences, output_ids, output_batch_indices, model_kwargs
def _update_cache(self, past_key_values, batch_idx, kv_idx, prefix_and_next_count=None, max_match_count=None,
max_match_index=None):
for k, v in past_key_values:
k[batch_idx, :, prefix_and_next_count:prefix_and_next_count + max_match_count] = k[batch_idx, :, kv_idx]
v[batch_idx, :, prefix_and_next_count:prefix_and_next_count + max_match_count] = v[batch_idx, :, kv_idx]
def lookahead_generation(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: bool = False,
streamer: Optional["BaseStreamer"] = None,
**model_kwargs,
) -> Union[GreedySearchOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be
used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate()
instead. For an overview of generation strategies and code examples, check the [following
guide](../generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
| output_attentions (`bool`, *optional*, defaults to `False`): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Its-Haze/league-rpc-linux
# Path: league_rpc_linux/colors.py
class Colors:
"""
Dataclass, storing the different colors that is used in the program.
"""
dred = "\033[31m"
dgreen = "\033[32m"
yellow = "\033[33m"
dblue = "\033[34m"
dmagenta = "\033[35m"
dcyan = "\033[36m"
lgrey = "\033[37m"
dgray = "\033[90m"
red = "\033[91m"
green = "\033[92m"
orange = "\033[93m"
blue = "\033[94m"
magenta = "\033[95m"
cyan = "\033[96m"
white = "\033[97m"
reset = "\033[0m"
@property
def logo(self) -> str:
"""Just prints the LEAGUE rpc logo, in your favorite Terminal Emulator."""
return rf"""
{self.yellow} _ {self.dblue} _____ _____ _____ {self.reset}
{self.yellow} | | {self.dblue}| __ \| __ \ / ____|{self.reset}
{self.yellow} | | ___ __ _ __ _ _ _ ___ {self.dblue}| |__) | |__) | | {self.reset}
{self.yellow} | | / _ \/ _` |/ _` | | | |/ _ \ {self.dblue}| _ /| ___/| | {self.reset}
{self.yellow} | |___| __/ (_| | (_| | |_| | __/ {self.dblue}| | \ \| | | |____ {self.reset}
{self.yellow} |______\___|\__,_|\__, |\__,_|\___| {self.dblue}|_| \_\_| \_____|{self.reset}
{self.yellow} __/ | {self.reset}
{self.yellow} |___/ {self.reset}
"""
# Path: league_rpc_linux/lcu_api/base_data.py
async def gather_base_data(connection: Connection, module_data: ModuleData):
data = module_data.client_data
# Epoch time from which league client was started.
await gather_telemetry_data(connection, data)
await gather_summoner_data(connection, data)
# get Online/Away status
await gather_chat_status_data(connection, data)
await gather_ranked_data(connection, data)
await gather_gameflow_data(connection, data)
if data.gameflow_phase == GameFlowPhase.IN_PROGRESS:
# In Game
return
if data.gameflow_phase == GameFlowPhase.NONE:
# In Client
return
await gather_lobby_data(connection, data)
if data.queue_id == -1:
# custom game / practice tool / tutorial lobby
data.gamemode = "PRACTICETOOL"
data.map_id = 11
if data.is_practice:
data.queue = "Practice Tool"
data.max_players = 1
else:
data.queue = "Custom Game"
return
await gather_queue_data(connection, data)
# Path: league_rpc_linux/models/client_data.py
class ClientData:
# Path: league_rpc_linux/models/lcu/current_chat_status.py
class LolChatUser:
AVAILABILITY = "availability"
GAME_NAME = "gameName"
GAME_TAG = "gameTag"
ICON = "icon"
ID = "id"
LAST_SEEN_ONLINE_TIMESTAMP = "lastSeenOnlineTimestamp"
LOL = "lol" # This is a dictionary with unspecified key-value pairs
NAME = "name"
PATCHLINE = "patchline"
PID = "pid"
PLATFORM_ID = "platformId"
PRODUCT = "product"
PRODUCT_NAME = "productName"
PUUID = "puuid"
STATUS_MESSAGE = "statusMessage"
SUMMARY = "summary"
SUMMONER_ID = "summonerId"
TIME = "time"
AVAILABLE_CHAT_STATUSES = {CHAT := "chat", AWAY := "away", ONLINE := "online"}
# Path: league_rpc_linux/models/lcu/current_lobby.py
class LolLobbyLobbyDto:
CAN_START_ACTIVITY = "canStartActivity"
CHAT_ROOM_ID = "chatRoomId"
CHAT_ROOM_KEY = "chatRoomKey"
GAME_CONFIG = "gameConfig"
INVITATIONS = "invitations"
LOCAL_MEMBER = "localMember"
MEMBERS = "members"
PARTY_ID = "partyId"
PARTY_TYPE = "partyType"
RESTRICTIONS = "restrictions"
WARNINGS = "warnings"
# Path: league_rpc_linux/models/lcu/current_lobby.py
class LolLobbyLobbyGameConfigDto:
ALLOWABLE_PREMADE_SIZES = "allowablePremadeSizes"
CUSTOM_LOBBY_NAME = "customLobbyName"
CUSTOM_MUTATOR_NAME = "customMutatorName"
CUSTOM_REWARDS_DISABLED_REASONS = "customRewardsDisabledReasons"
CUSTOM_SPECTATOR_POLICY = "customSpectatorPolicy"
CUSTOM_SPECTATORS = "customSpectators"
CUSTOM_TEAM100 = "customTeam100"
CUSTOM_TEAM200 = "customTeam200"
GAME_MODE = "gameMode"
IS_CUSTOM = "isCustom"
IS_LOBBY_FULL = "isLobbyFull"
IS_TEAM_BUILDER_MANAGED = "isTeamBuilderManaged"
MAP_ID = "mapId"
MAX_HUMAN_PLAYERS = "maxHumanPlayers"
MAX_LOBBY_SIZE = "maxLobbySize"
MAX_TEAM_SIZE = "maxTeamSize"
PICK_TYPE = "pickType"
PREMADE_SIZE_ALLOWED = "premadeSizeAllowed"
QUEUE_ID = "queueId"
SHOW_POSITION_SELECTOR = "showPositionSelector"
# Path: league_rpc_linux/models/lcu/current_queue.py
class LolGameQueuesQueue:
ALLOWABLE_PREMADE_SIZES = "allowablePremadeSizes"
ARE_FREE_CHAMPIONS_ALLOWED = "areFreeChampionsAllowed"
ASSET_MUTATOR = "assetMutator"
CATEGORY = "category"
CHAMPIONS_REQUIRED_TO_PLAY = "championsRequiredToPlay"
DESCRIPTION = "description"
DETAILED_DESCRIPTION = "detailedDescription"
GAME_MODE = "gameMode"
GAME_TYPE_CONFIG = "gameTypeConfig"
ID = "id"
IS_RANKED = "isRanked"
IS_TEAM_BUILDER_MANAGED = "isTeamBuilderManaged"
IS_TEAM_ONLY = "isTeamOnly"
LAST_TOGGLED_OFF_TIME = "lastToggledOffTime"
LAST_TOGGLED_ON_TIME = "lastToggledOnTime"
MAP_ID = "mapId"
MAX_DIVISION_FOR_PREMADE_SIZE2 = "maxDivisionForPremadeSize2"
MAX_LEVEL = "maxLevel"
MAX_SUMMONER_LEVEL_FOR_FIRST_WIN_OF_THE_DAY = "maxSummonerLevelForFirstWinOfTheDay"
MAX_TIER_FOR_PREMADE_SIZE2 = "maxTierForPremadeSize2"
MAXIMUM_PARTICIPANT_LIST_SIZE = "maximumParticipantListSize"
MIN_LEVEL = "minLevel"
MINIMUM_PARTICIPANT_LIST_SIZE = "minimumParticipantListSize"
NAME = "name"
NUM_PLAYERS_PER_TEAM = "numPlayersPerTeam"
QUEUE_AVAILABILITY = "queueAvailability"
QUEUE_REWARDS = "queueRewards"
REMOVAL_FROM_GAME_ALLOWED = "removalFromGameAllowed"
REMOVAL_FROM_GAME_DELAY_MINUTES = "removalFromGameDelayMinutes"
SHORT_NAME = "shortName"
SHOW_POSITION_SELECTOR = "showPositionSelector"
SPECTATOR_ENABLED = "spectatorEnabled"
TYPE = "type"
# Path: league_rpc_linux/models/lcu/current_summoner.py
class Summoner:
ACCOUNT_ID = "accountId"
DISPLAY_NAME = "displayName"
INTERNAL_NAME = "internalName"
NAME_CHANGE_FLAG = "nameChangeFlag"
PERCENT_COMPLETE_FOR_NEXT_LEVEL = "percentCompleteForNextLevel"
PRIVACY = "privacy"
PROFILE_ICON_ID = "profileIconId"
PUUID = "puuid"
REROLL_POINTS = "rerollPoints"
CURRENT_POINTS = "currentPoints"
MAX_ROLL = "maxRolls"
NUMBER_OF_ROLLS = "numberOfRolls"
POINTS_COST_TO_ROLL = "pointsCostToRoll"
SUMMONER_ID = "summonerId"
SUMMONER_LEVEL = "summonerLevel"
UNNAMED = "unnamed"
XP_SINCE_LAST_LEVEL = "xpSinceLastLevel"
XP_UNTIL_NEXT_LEVEL = "xpUntilNextLevel"
# Path: league_rpc_linux/models/module_data.py
class ModuleData:
connector: Connector = field(default_factory=Connector)
client_data: ClientData = field(default_factory=ClientData)
rpc: Optional[Presence] = None
cli_args: Optional[Namespace] = None
# Path: league_rpc_linux/models/rpc_updater.py
class RPCUpdater:
scheduled_update: bool = False
def delay_update(self, module_data: ModuleData):
if not self.scheduled_update:
self.scheduled_update = True
Timer(1.0, self.update_rpc_and_reset_flag, args=(module_data,)).start()
def update_rpc_and_reset_flag(self, module_data: ModuleData):
self.update_rpc(module_data) # Assuming update_rpc is defined elsewhere
self.scheduled_update = False
@staticmethod
def in_client_rpc(
rpc: Presence,
module_data: ModuleData,
) -> None:
"""
Updates the RPC when the use is in the client.
"""
details = f"{module_data.client_data.availability}"
show_emojis: bool = module_data.cli_args.show_emojis # type:ignore
if show_emojis:
status_emojis = f"{'🟢' if module_data.client_data.availability == LolChatUser.ONLINE.capitalize() else ' 🔴'}"
# details = status_emojis + details
details = status_emojis + " " + details
rpc.update(
large_image=f"{PROFILE_ICON_BASE_URL}{module_data.client_data.summoner_icon}.png",
large_text="In Client",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
details=details,
state="In Client",
start=module_data.client_data.application_start_time,
)
@staticmethod
def in_lobby_rpc(
rpc: Presence,
module_data: ModuleData,
is_custom: bool,
) -> None:
if is_custom:
large_image = f"{PROFILE_ICON_BASE_URL}{str(module_data.client_data.summoner_icon)}.png"
large_text = f"{GAME_MODE_CONVERT_MAP.get(module_data.client_data.gamemode, module_data.client_data.gamemode)}"
small_image = BASE_MAP_ICON_URL.format(
map_name=MAP_ICON_CONVERT_MAP.get(module_data.client_data.map_id)
)
small_text = SMALL_TEXT
details = f"In Lobby: {module_data.client_data.queue}"
state = "Custom Lobby"
rpc.update(
large_image=large_image,
large_text=large_text,
small_image=small_image,
small_text=small_text,
details=details,
state=state,
start=module_data.client_data.application_start_time,
)
else:
large_image = f"{PROFILE_ICON_BASE_URL}{str(module_data.client_data.summoner_icon)}.png"
large_text = f"{GAME_MODE_CONVERT_MAP.get(module_data.client_data.gamemode, module_data.client_data.gamemode)}"
small_image = BASE_MAP_ICON_URL.format(
map_name=MAP_ICON_CONVERT_MAP.get(module_data.client_data.map_id)
)
small_text = SMALL_TEXT
details = f"{module_data.client_data.queue}"
state = f"In Lobby ({module_data.client_data.players}/{module_data.client_data.max_players})"
if module_data.cli_args.show_rank:
_large_text, _small_image, _small_text = RPCUpdater.show_ranked_data(
module_data
)
if all([_large_text, _small_image, _small_text]):
large_text, small_image, small_text = (
_large_text,
_small_image,
_small_text,
)
rpc.update(
large_image=large_image,
large_text=large_text,
small_image=small_image,
small_text=f"{small_text}",
details=details,
state=state,
start=module_data.client_data.application_start_time,
)
@staticmethod
def show_ranked_data(
module_data: ModuleData,
) -> tuple[str, ...]:
large_text = small_text = small_image = ""
match RANKED_TYPE_MAPPER.get(module_data.client_data.queue_type):
case "Ranked Solo/Duo":
summoner_rank = module_data.client_data.summoner_rank
if summoner_rank.tier:
(
small_text,
small_image,
) = summoner_rank.rpc_info
large_text = SMALL_TEXT
case "Ranked Flex":
summoner_rank = module_data.client_data.summoner_rank_flex
if summoner_rank.tier:
(
small_text,
small_image,
) = summoner_rank.rpc_info
large_text = SMALL_TEXT
case "Teamfight Tactics (Ranked)":
summoner_rank = module_data.client_data.tft_rank
if summoner_rank.tier:
(
small_text,
small_image,
) = summoner_rank.rpc_info
large_text = SMALL_TEXT
case "Arena":
summoner_rank = module_data.client_data.arena_rank
if summoner_rank.tier:
(
small_text,
small_image,
) = summoner_rank.rpc_info
large_text = SMALL_TEXT
case _:
...
return large_text, small_image, small_text
@staticmethod
def in_queue_rpc(rpc: Presence, module_data: ModuleData) -> None:
large_image = (
f"{PROFILE_ICON_BASE_URL}{module_data.client_data.summoner_icon}.png"
)
large_text = f"{GAME_MODE_CONVERT_MAP.get(module_data.client_data.gamemode, module_data.client_data.gamemode)}"
small_image = BASE_MAP_ICON_URL.format(
map_name=MAP_ICON_CONVERT_MAP.get(module_data.client_data.map_id)
)
small_text = SMALL_TEXT
if module_data.cli_args.show_rank:
_large_text, _small_image, _small_text = RPCUpdater.show_ranked_data(
module_data
)
if all([_large_text, _small_image, _small_text]):
large_text, small_image, small_text = (
_large_text,
_small_image,
_small_text,
)
rpc.update(
large_image=large_image,
large_text=large_text,
small_image=small_image,
small_text=small_text,
details=f"{module_data.client_data.queue}",
state="In Queue",
start=int(time.time()),
)
@staticmethod
def in_champ_select_rpc(rpc: Presence, module_data: ModuleData) -> None:
large_image = (
f"{PROFILE_ICON_BASE_URL}{module_data.client_data.summoner_icon}.png"
)
large_text = f"{GAME_MODE_CONVERT_MAP.get(module_data.client_data.gamemode, module_data.client_data.gamemode)}"
small_image = BASE_MAP_ICON_URL.format(
map_name=MAP_ICON_CONVERT_MAP.get(module_data.client_data.map_id)
)
small_text = SMALL_TEXT
if module_data.cli_args.show_rank:
_large_text, _small_image, _small_text = RPCUpdater.show_ranked_data(
module_data
)
if all([_large_text, _small_image, _small_text]):
large_text, small_image, small_text = (
_large_text,
_small_image,
_small_text,
)
rpc.update(
large_image=large_image,
large_text=large_text,
small_image=small_image,
small_text=small_text,
details=f"{module_data.client_data.queue}",
state="In Champ Select",
start=int(time.time()),
)
# The function that updates discord rich presence, depending on the data
def update_rpc(self, module_data: ModuleData):
data = module_data.client_data
rpc = module_data.rpc
if not isinstance(rpc, Presence):
# Only continue if rpc is of type Presence.
return
match data.gameflow_phase:
# This value will be set by "/lol-gameflow/v1/gameflow-phase"
case GameFlowPhase.IN_PROGRESS:
# Handled by the "inGame" flow in __main__.py
return
case GameFlowPhase.READY_CHECK:
# When the READY check comes. We want to just ignore (IN_QUEUE rpc will still show.)
return
case GameFlowPhase.NONE | GameFlowPhase.WAITING_FOR_STATS | GameFlowPhase.PRE_END_OF_GAME | GameFlowPhase.END_OF_GAME:
self.in_client_rpc(rpc, module_data)
return
case GameFlowPhase.CHAMP_SELECT | GameFlowPhase.GAME_START:
# In Champ Select
self.in_champ_select_rpc(rpc, module_data)
return
case GameFlowPhase.MATCHMAKING | GameFlowPhase.READY_CHECK:
# In Queue
self.in_queue_rpc(rpc, module_data)
return
case GameFlowPhase.LOBBY:
# In Lobby
if data.is_custom or data.is_practice:
self.in_lobby_rpc(rpc, module_data, is_custom=True)
else:
# matchmaking lobby
self.in_lobby_rpc(rpc, module_data, is_custom=False)
return
case _:
# other unhandled gameflow phases
print(f"Unhandled Gameflow Phase: {data.gameflow_phase}")
rpc.update(
large_image=f"{PROFILE_ICON_BASE_URL}{str(data.summoner_icon)}.png",
large_text=f"{data.gameflow_phase}",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
details=f"{data.gameflow_phase}",
state="Unhandled Gameflow Phase",
start=module_data.client_data.application_start_time,
)
# Path: league_rpc_linux/lcu_api/lcu_connector.py
from argparse import Namespace
from typing import Any, Optional
from lcu_driver.connection import Connection
from lcu_driver.events.responses import WebsocketEventResponse
from pypresence import Presence
from league_rpc_linux.colors import Colors
from league_rpc_linux.lcu_api.base_data import gather_base_data
from league_rpc_linux.models.client_data import ArenaStats, RankedStats, TFTStats
from league_rpc_linux.models.lcu.current_chat_status import LolChatUser
from league_rpc_linux.models.lcu.current_lobby import (
LolLobbyLobbyDto,
LolLobbyLobbyGameConfigDto,
)
from league_rpc_linux.models.lcu.current_queue import LolGameQueuesQueue
from league_rpc_linux.models.lcu.current_summoner import Summoner
from league_rpc_linux.models.module_data import ModuleData
from league_rpc_linux.models.rpc_updater import RPCUpdater
module_data = ModuleData()
rpc_updater = RPCUpdater()
## WS Events ##
@module_data.connector.ready # type:ignore
async def connect(connection: Connection):
print(
f"{Colors.green}Successfully connected to the League Client API.{Colors.reset}"
)
print(f"\n{Colors.orange}Gathering base data.{Colors.reset}")
await gather_base_data(connection, module_data)
print(f"{Colors.green}Successfully gathered base data.{Colors.reset}")
print(f"\n{Colors.orange}Updating Discord rpc with base data{Colors.reset}")
rpc_updater.delay_update(module_data)
print(f"{Colors.green}Discord RPC successfully updated{Colors.reset}")
print(f"\n{Colors.cyan}LeagueRPC is ready{Colors.reset}")
@module_data.connector.close # type:ignore
async def disconnect(_: Connection):
print(f"{Colors.red}Disconnected from the League Client API.{Colors.reset}")
@module_data.connector.ws.register( # type:ignore
"/lol-summoner/v1/current-summoner", event_types=("UPDATE",)
)
async def summoner_updated(_: Connection, event: WebsocketEventResponse) -> None:
data = module_data.client_data
event_data: dict[str, Any] = event.data # type:ignore
data.summoner_name = event_data[Summoner.DISPLAY_NAME]
data.summoner_level = event_data[Summoner.SUMMONER_LEVEL]
data.summoner_id = event_data[Summoner.SUMMONER_ID]
data.summoner_icon = event_data[Summoner.PROFILE_ICON_ID]
rpc_updater.delay_update(module_data)
@module_data.connector.ws.register( # type:ignore
| "/lol-chat/v1/me", event_types=("UPDATE",) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lipku/metahuman-stream
# Path: nerf_triplane/utils.py
def custom_meshgrid(*args):
# ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid
if pver.parse(torch.__version__) < pver.parse('1.10'):
return torch.meshgrid(*args)
else:
return torch.meshgrid(*args, indexing='ij')
# Path: nerf_triplane/utils.py
def get_audio_features(features, att_mode, index):
if att_mode == 0:
return features[[index]]
elif att_mode == 1:
left = index - 8
pad_left = 0
if left < 0:
pad_left = -left
left = 0
auds = features[left:index]
if pad_left > 0:
# pad may be longer than auds, so do not use zeros_like
auds = torch.cat([torch.zeros(pad_left, *auds.shape[1:], device=auds.device, dtype=auds.dtype), auds], dim=0)
return auds
elif att_mode == 2:
left = index - 4
right = index + 4
pad_left = 0
pad_right = 0
if left < 0:
pad_left = -left
left = 0
if right > features.shape[0]:
pad_right = right - features.shape[0]
right = features.shape[0]
auds = features[left:right]
if pad_left > 0:
auds = torch.cat([torch.zeros_like(auds[:pad_left]), auds], dim=0)
if pad_right > 0:
auds = torch.cat([auds, torch.zeros_like(auds[:pad_right])], dim=0) # [8, 16]
return auds
else:
raise NotImplementedError(f'wrong att_mode: {att_mode}')
# Path: nerf_triplane/utils.py
@torch.cuda.amp.autocast(enabled=False)
def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str='XYZ') -> torch.Tensor:
"""
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians as tensor of shape (..., 3).
convention: Convention string of three uppercase letters from
{"X", "Y", and "Z"}.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
# print(euler_angles, euler_angles.dtype)
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
raise ValueError("Invalid input euler angles.")
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = [
_axis_angle_rotation(c, e)
for c, e in zip(convention, torch.unbind(euler_angles, -1))
]
return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2])
# Path: nerf_triplane/utils.py
@torch.cuda.amp.autocast(enabled=False)
def convert_poses(poses):
# poses: [B, 4, 4]
# return [B, 3], 4 rot, 3 trans
out = torch.empty(poses.shape[0], 6, dtype=torch.float32, device=poses.device)
out[:, :3] = matrix_to_euler_angles(poses[:, :3, :3])
out[:, 3:] = poses[:, :3, 3]
return out
# Path: nerf_triplane/renderer.py
import math
import trimesh
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import raymarching
from .utils import custom_meshgrid, get_audio_features, euler_angles_to_matrix, convert_poses
# cascading
for cas in range(self.cascade):
bound = min(2 ** cas, self.bound)
half_grid_size = bound / self.grid_size
# scale to current cascade's resolution
cas_xyzs = xyzs * (bound - half_grid_size)
# add noise in [-hgs, hgs]
cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size
# query density
aud_norms = self.density(cas_xyzs.to(tmp_grid.dtype), enc_a, eye)['ambient_aud'].reshape(-1).detach().to(tmp_grid.dtype)
# assign
tmp_grid[cas, indices] = aud_norms
# dilate the density_grid (less aggressive culling)
tmp_grid = raymarching.morton3D_dilation(tmp_grid)
return tmp_grid
# # ema update
# valid_mask = (self.density_grid >= 0) & (tmp_grid >= 0)
# self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])
@torch.no_grad()
def get_eye_grid(self, S=128):
# call before each epoch to update extra states.
if not self.cuda_ray:
return
# use random auds (different expressions should have similar density grid...)
rand_idx = random.randint(0, self.aud_features.shape[0] - 1)
auds = get_audio_features(self.aud_features, self.att, rand_idx).to(self.density_bitfield.device)
# encode audio
enc_a = self.encode_audio(auds)
tmp_grid = torch.zeros_like(self.density_grid)
# use a random eye area based on training dataset's statistics...
if self.exp_eye:
eye = self.eye_area[[rand_idx]].to(self.density_bitfield.device) # [1, 1]
else:
eye = None
# full update
X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)
Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)
Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S)
for xs in X:
for ys in Y:
for zs in Z:
# construct points
xx, yy, zz = custom_meshgrid(xs, ys, zs)
coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)
indices = raymarching.morton3D(coords).long() # [N]
xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]
# cascading
for cas in range(self.cascade):
bound = min(2 ** cas, self.bound)
half_grid_size = bound / self.grid_size
# scale to current cascade's resolution
cas_xyzs = xyzs * (bound - half_grid_size)
# add noise in [-hgs, hgs]
cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size
# query density
eye_norms = self.density(cas_xyzs.to(tmp_grid.dtype), enc_a, eye)['ambient_eye'].reshape(-1).detach().to(tmp_grid.dtype)
# assign
tmp_grid[cas, indices] = eye_norms
# dilate the density_grid (less aggressive culling)
tmp_grid = raymarching.morton3D_dilation(tmp_grid)
return tmp_grid
# # ema update
# valid_mask = (self.density_grid >= 0) & (tmp_grid >= 0)
# self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])
def render(self, rays_o, rays_d, auds, bg_coords, poses, staged=False, max_ray_batch=4096, **kwargs):
# rays_o, rays_d: [B, N, 3], assumes B == 1
# auds: [B, 29, 16]
# eye: [B, 1]
# bg_coords: [1, N, 2]
# return: pred_rgb: [B, N, 3]
_run = self.run_cuda
B, N = rays_o.shape[:2]
device = rays_o.device
# never stage when cuda_ray
if staged and not self.cuda_ray:
# not used
raise NotImplementedError
else:
results = _run(rays_o, rays_d, auds, bg_coords, poses, **kwargs)
return results
def render_torso(self, rays_o, rays_d, auds, bg_coords, poses, staged=False, max_ray_batch=4096, **kwargs):
# rays_o, rays_d: [B, N, 3], assumes B == 1
# auds: [B, 29, 16]
# eye: [B, 1]
# bg_coords: [1, N, 2]
# return: pred_rgb: [B, N, 3]
_run = self.run_torso
B, N = rays_o.shape[:2]
device = rays_o.device
# never stage when cuda_ray
if staged and not self.cuda_ray:
# not used
raise NotImplementedError
else:
| results = _run(rays_o, bg_coords, poses, **kwargs) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mattcar15/bambu-connect
# Path: bambu_connect/CameraClient.py
class CameraClient:
def __init__(self, hostname, access_code, port=6000):
self.hostname = hostname
self.port = port
self.username = "bblp"
self.auth_packet = self.__create_auth_packet__(self.username, access_code)
self.streaming = False
self.stream_thread = None
def __create_auth_packet__(self, username, access_code):
d = bytearray()
d += struct.pack("IIL", 0x40, 0x3000, 0x0)
d += username.encode("ascii").ljust(32, b"\x00")
d += access_code.encode("ascii").ljust(32, b"\x00")
return d
def __find_jpeg__(self, buf, start_marker, end_marker):
start = buf.find(start_marker)
end = buf.find(end_marker, start + len(start_marker))
if start != -1 and end != -1:
return buf[start : end + len(end_marker)], buf[end + len(end_marker) :]
return None, buf
def capture_frame(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
jpeg_start = bytearray.fromhex("ff d8 ff e0")
jpeg_end = bytearray.fromhex("ff d9")
read_chunk_size = 1024
with socket.create_connection((self.hostname, self.port)) as sock:
with ctx.wrap_socket(sock, server_hostname=self.hostname) as ssock:
ssock.write(self.auth_packet)
buf = bytearray()
while True:
dr = ssock.recv(read_chunk_size)
if not dr:
break
buf += dr
img, buf = self.__find_jpeg__(buf, jpeg_start, jpeg_end)
if img:
return bytes(img)
def capture_stream(self, img_callback):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
jpeg_start = bytearray.fromhex("ff d8 ff e0")
jpeg_end = bytearray.fromhex("ff d9")
read_chunk_size = 1024
with socket.create_connection((self.hostname, self.port)) as sock:
with ctx.wrap_socket(sock, server_hostname=self.hostname) as ssock:
ssock.write(self.auth_packet)
buf = bytearray()
while self.streaming:
dr = ssock.recv(read_chunk_size)
if not dr:
break
buf += dr
img, buf = self.__find_jpeg__(buf, jpeg_start, jpeg_end)
if img:
img_callback(bytes(img))
def start_stream(self, img_callback):
if self.streaming:
print("Stream already running.")
return
self.streaming = True
self.stream_thread = threading.Thread(
target=self.capture_stream, args=(img_callback,)
)
self.stream_thread.start()
def stop_stream(self):
if not self.streaming:
print("Stream is not running.")
return
self.streaming = False
self.stream_thread.join()
# Path: bambu_connect/WatchClient.py
class WatchClient:
def __init__(self, hostname: str, access_code: str, serial: str):
self.hostname = hostname
self.access_code = access_code
self.serial = serial
self.client = self.__setup_mqtt_client()
self.values = {}
self.printerStatus = None
self.message_callback = None
def __setup_mqtt_client(self) -> mqtt.Client:
client = mqtt.Client()
client.username_pw_set("bblp", self.access_code)
client.tls_set(tls_version=ssl.PROTOCOL_TLS, cert_reqs=ssl.CERT_NONE)
client.tls_insecure_set(True)
client.on_connect = self.on_connect
client.on_message = self.on_message
return client
def on_connect(self, client: mqtt.Client, userdata: Any, flags: Any, rc: int):
client.subscribe(f"device/{self.serial}/report")
if self.on_connect_callback:
self.on_connect_callback()
def start(
self,
message_callback: Optional[Callable[[PrinterStatus], None]] = None,
on_connect_callback: Optional[Callable[[], None]] = None,
):
self.message_callback = message_callback
self.on_connect_callback = on_connect_callback
self.client.connect(self.hostname, 8883, 60)
self.client.loop_start()
def stop(self):
self.client.loop_stop()
self.client.disconnect()
def on_message(self, client, userdata, msg):
doc = json.loads(msg.payload)
try:
if not doc:
return
self.values = dict(self.values, **doc["print"])
self.printerStatus = PrinterStatus(**self.values)
if self.message_callback:
self.message_callback(self.printerStatus)
except KeyError:
pass
# Path: bambu_connect/ExecuteClient.py
class ExecuteClient:
def __init__(self, hostname: str, access_code: str, serial: str):
self.hostname = hostname
self.access_code = access_code
self.serial = serial
self.client = self.__setup_mqtt_client()
def __setup_mqtt_client(self):
client = mqtt.Client()
client.username_pw_set("bblp", self.access_code)
client.tls_set(tls_version=ssl.PROTOCOL_TLS, cert_reqs=ssl.CERT_NONE)
client.tls_insecure_set(True)
client.connect(self.hostname, 8883, 60)
return client
def disconnect(self):
self.client.disconnect()
def send_command(self, payload):
self.client.loop_start()
self.client.publish(f"device/{self.serial}/request", payload)
self.client.loop_stop()
def send_gcode(self, gcode):
payload = f'{{"print": {{"command": "gcode_line", "sequence_id": 2006, "param": "{gcode} \n"}}, "user_id":"1234567890"}}'
self.send_command(payload)
# this dumps all the printer stats, for minor print updates the printer will send them automatically.
def dump_info(self):
payload = f'{{"pushing": {{ "sequence_id": 1, "command": "pushall"}}, "user_id":"1234567890"}}'
self.send_command(payload)
# when using this, choose the send to printer option in bambu or cura slicer. Provide the file name (no path)
def start_print(self, file):
payload = json.dumps(
{
"print": {
"sequence_id": 13,
"command": "project_file",
"param": "Metadata/plate_1.gcode",
"subtask_name": f"{file}",
"url": f"ftp://{file}",
"bed_type": "auto",
"timelapse": False,
"bed_leveling": True,
"flow_cali": False,
"vibration_cali": True,
"layer_inspect": False,
"use_ams": False,
"profile_id": "0",
"project_id": "0",
"subtask_id": "0",
"task_id": "0",
}
}
)
self.send_command(payload)
# Path: bambu_connect/FileClient.py
class FileClient:
def __init__(self, hostname: str, access_code: str, serial: str):
self.hostname = hostname
self.access_code = access_code
self.serial = serial
def get_files(self, directory="/", extension=".3mf"):
command = [
"curl",
"--ftp-pasv",
"--insecure",
f"ftps://{self.hostname}{directory}",
"--user",
f"bblp:{self.access_code}",
]
result = subprocess.run(command, capture_output=True, text=True)
filtered_files = []
for line in result.stdout.split("\n"):
if line.strip():
parts = re.split(r"\s+", line, maxsplit=8)
filename = parts[-1]
if filename.endswith(extension):
filtered_files.append(filename)
return filtered_files
def download_file(self, remote_path: str, local_path: str, verbose=True):
if not os.path.exists(local_path):
os.makedirs(local_path)
local_file_path = os.path.join(local_path, os.path.basename(remote_path))
command = [
"curl",
"-o",
local_file_path,
"--ftp-pasv",
"--insecure",
f"ftps://{self.hostname}{remote_path}",
"--user",
f"bblp:{self.access_code}",
]
if verbose:
result = subprocess.run(command)
else:
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
if verbose:
print(result.stderr.decode())
return False
return True
# Path: bambu_connect/utils/models.py
class PrinterStatus:
upload: Optional[Upload] = None
nozzle_temper: Optional[float] = None
nozzle_target_temper: Optional[float] = None
bed_temper: Optional[float] = None
bed_target_temper: Optional[float] = None
chamber_temper: Optional[float] = None
mc_print_stage: Optional[str] = None
heatbreak_fan_speed: Optional[str] = None
cooling_fan_speed: Optional[str] = None
big_fan1_speed: Optional[str] = None
big_fan2_speed: Optional[str] = None
mc_percent: Optional[int] = None
mc_remaining_time: Optional[int] = None
ams_status: Optional[int] = None
ams_rfid_status: Optional[int] = None
hw_switch_state: Optional[int] = None
spd_mag: Optional[int] = None
spd_lvl: Optional[int] = None
print_error: Optional[int] = None
lifecycle: Optional[str] = None
wifi_signal: Optional[str] = None
gcode_state: Optional[str] = None
gcode_file_prepare_percent: Optional[str] = None
queue_number: Optional[int] = None
queue_total: Optional[int] = None
queue_est: Optional[int] = None
queue_sts: Optional[int] = None
project_id: Optional[str] = None
profile_id: Optional[str] = None
task_id: Optional[str] = None
subtask_id: Optional[str] = None
subtask_name: Optional[str] = None
gcode_file: Optional[str] = None
stg: Optional[List[Any]] = None
stg_cur: Optional[int] = None
print_type: Optional[str] = None
home_flag: Optional[int] = None
mc_print_line_number: Optional[str] = None
mc_print_sub_stage: Optional[int] = None
sdcard: Optional[bool] = None
force_upgrade: Optional[bool] = None
mess_production_state: Optional[str] = None
layer_num: Optional[int] = None
total_layer_num: Optional[int] = None
s_obj: Optional[List[Any]] = None
fan_gear: Optional[int] = None
hms: Optional[List[Any]] = None
online: Optional[Online] = None
ams: Optional[AMS] = None
ipcam: Optional[IPCam] = None
vt_tray: Optional[VTTray] = None
lights_report: Optional[List[LightsReport]] = None
upgrade_state: Optional[UpgradeState] = None
command: Optional[str] = None
msg: Optional[int] = None
sequence_id: Optional[str] = None
def __init__(self, **data):
self.upload = Upload(**data["upload"]) if "upload" in data else None
self.nozzle_temper = data.get("nozzle_temper")
self.nozzle_target_temper = data.get("nozzle_target_temper")
self.bed_temper = data.get("bed_temper")
self.bed_target_temper = data.get("bed_target_temper")
self.chamber_temper = data.get("chamber_temper")
self.mc_print_stage = data.get("mc_print_stage")
self.heatbreak_fan_speed = data.get("heatbreak_fan_speed")
self.cooling_fan_speed = data.get("cooling_fan_speed")
self.big_fan1_speed = data.get("big_fan1_speed")
self.big_fan2_speed = data.get("big_fan2_speed")
self.mc_percent = data.get("mc_percent")
self.mc_remaining_time = data.get("mc_remaining_time")
self.ams_status = data.get("ams_status")
self.ams_rfid_status = data.get("ams_rfid_status")
self.hw_switch_state = data.get("hw_switch_state")
self.spd_mag = data.get("spd_mag")
self.spd_lvl = data.get("spd_lvl")
self.print_error = data.get("print_error")
self.lifecycle = data.get("lifecycle")
self.wifi_signal = data.get("wifi_signal")
self.gcode_state = data.get("gcode_state")
self.gcode_file_prepare_percent = data.get("gcode_file_prepare_percent")
self.queue_number = data.get("queue_number")
self.queue_total = data.get("queue_total")
self.queue_est = data.get("queue_est")
self.queue_sts = data.get("queue_sts")
self.project_id = data.get("project_id")
self.profile_id = data.get("profile_id")
self.task_id = data.get("task_id")
self.subtask_id = data.get("subtask_id")
self.subtask_name = data.get("subtask_name")
self.gcode_file = data.get("gcode_file")
self.stg = data.get("stg", [])
self.stg_cur = data.get("stg_cur")
self.print_type = data.get("print_type")
self.home_flag = data.get("home_flag")
self.mc_print_line_number = data.get("mc_print_line_number")
self.mc_print_sub_stage = data.get("mc_print_sub_stage")
self.sdcard = data.get("sdcard", False)
self.force_upgrade = data.get("force_upgrade", False)
self.mess_production_state = data.get("mess_production_state")
self.layer_num = data.get("layer_num")
self.total_layer_num = data.get("total_layer_num")
self.s_obj = data.get("s_obj", [])
self.fan_gear = data.get("fan_gear")
self.hms = data.get("hms", [])
self.online = Online(**data["online"]) if "online" in data else None
self.ams = AMS(**data["ams"]) if "ams" in data else None
self.ipcam = IPCam(**data["ipcam"]) if "ipcam" in data else None
self.vt_tray = VTTray(**data["vt_tray"]) if "vt_tray" in data else None
self.lights_report = [LightsReport(**lr) for lr in data.get("lights_report", [])]
self.upgrade_state = UpgradeState(**data["upgrade_state"]) if "upgrade_state" in data else None
self.command = data.get("command")
self.msg = data.get("msg")
self.sequence_id = data.get("sequence_id")
# Path: bambu_connect/BambuClient.py
from .CameraClient import CameraClient
from .WatchClient import WatchClient
from .ExecuteClient import ExecuteClient
from .FileClient import FileClient
from .utils.models import PrinterStatus
from typing import Callable, Dict, Optional, Any
class BambuClient:
def __init__(self, hostname: str, access_code: str, serial: str):
self.cameraClient = CameraClient(hostname, access_code)
self.watchClient = WatchClient(hostname, access_code, serial)
self.executeClient = ExecuteClient(hostname, access_code, serial)
self.fileClient = FileClient(hostname, access_code, serial)
def __del__(self):
self.executeClient.disconnect()
############# Camera Wrappers #############
def start_camera_stream(self, img_callback):
self.cameraClient.start_stream(img_callback)
def stop_camera_stream(self):
self.cameraClient.stop_stream()
def capture_camera_frame(self):
return self.cameraClient.capture_frame()
############# WatchClient Wrappers #############
def start_watch_client(
self,
message_callback: Optional[Callable[[PrinterStatus], None]] = None,
on_connect_callback: Optional[Callable[[], None]] = None,
):
| self.watchClient.start(message_callback, on_connect_callback) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: linyq2117/TagCLIP
# Path: utils.py
def scoremap2bbox(scoremap, threshold, multi_contour_eval=False):
height, width = scoremap.shape
scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2)
_, thr_gray_heatmap = cv2.threshold(
src=scoremap_image,
thresh=int(threshold * np.max(scoremap_image)),
maxval=255,
type=cv2.THRESH_BINARY)
contours = cv2.findContours(
image=thr_gray_heatmap,
mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_SIMPLE)[_CONTOUR_INDEX]
if len(contours) == 0:
return np.asarray([[0, 0, 0, 0]]), 1
if not multi_contour_eval:
contours = [max(contours, key=cv2.contourArea)]
estimated_boxes = []
for contour in contours:
x, y, w, h = cv2.boundingRect(contour)
x0, y0, x1, y1 = x, y, x + w, y + h
x1 = min(x1, width - 1)
y1 = min(y1, height - 1)
estimated_boxes.append([x0, y0, x1, y1])
return np.asarray(estimated_boxes), len(contours)
# Path: utils.py
def parse_xml_to_dict(xml):
"""
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if len(xml) == 0:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
# Path: utils.py
def _convert_image_to_rgb(image):
return image.convert("RGB")
# Path: utils.py
def compute_AP(predictions, labels):
num_class = predictions.size(1)
ap = torch.zeros(num_class).to(predictions.device)
empty_class = 0
for idx_cls in range(num_class):
prediction = predictions[:, idx_cls]
label = labels[:, idx_cls]
#mask = label.abs() == 1
if (label > 0).sum() == 0:
empty_class += 1
continue
binary_label = torch.clamp(label, min=0, max=1)
sorted_pred, sort_idx = prediction.sort(descending=True)
sorted_label = binary_label[sort_idx]
tmp = (sorted_label == 1).float()
tp = tmp.cumsum(0)
fp = (sorted_label != 1).float().cumsum(0)
num_pos = binary_label.sum()
rec = tp/num_pos
prec = tp/(tp+fp)
ap_cls = (tmp*prec).sum()/num_pos
ap[idx_cls].copy_(ap_cls)
return ap
# Path: utils.py
def compute_F1(predictions, labels, mode_F1, k_val, use_relative=False):
if k_val >= 1:
idx = predictions.topk(dim=1, k=k_val)[1]
predictions.fill_(0)
predictions.scatter_(dim=1, index=idx, src=torch.ones(predictions.size(0), k_val, dtype=predictions.dtype).to(predictions.device))
else:
if use_relative:
ma = predictions.max(dim=1)[0]
mi = predictions.min(dim=1)[0]
step = ma - mi
thres = mi + k_val * step
for i in range(predictions.shape[0]):
predictions[i][predictions[i] > thres[i]] = 1
predictions[i][predictions[i] <= thres[i]] = 0
else:
predictions[predictions > k_val] = 1
predictions[predictions <= k_val] = 0
if mode_F1 == 'overall':
predictions = predictions.bool()
labels = labels.bool()
TPs = ( predictions & labels).sum()
FPs = ( predictions & ~labels).sum()
FNs = (~predictions & labels).sum()
eps = 1.e-9
Ps = TPs / (TPs + FPs + eps)
Rs = TPs / (TPs + FNs + eps)
p = Ps.mean()
r = Rs.mean()
f1 = 2*p*r/(p+r)
elif mode_F1 == 'category':
# calculate P and R
predictions = predictions.bool()
labels = labels.bool()
TPs = ( predictions & labels).sum(axis=0)
FPs = ( predictions & ~labels).sum(axis=0)
FNs = (~predictions & labels).sum(axis=0)
eps = 1.e-9
Ps = TPs / (TPs + FPs + eps)
Rs = TPs / (TPs + FNs + eps)
p = Ps.mean()
r = Rs.mean()
f1 = 2*p*r/(p+r)
elif mode_F1 == 'sample':
# calculate P and R
predictions = predictions.bool()
labels = labels.bool()
TPs = ( predictions & labels).sum(axis=1)
FPs = ( predictions & ~labels).sum(axis=1)
FNs = (~predictions & labels).sum(axis=1)
eps = 1.e-9
Ps = TPs / (TPs + FPs + eps)
Rs = TPs / (TPs + FNs + eps)
p = Ps.mean()
r = Rs.mean()
f1 = 2*p*r/(p+r)
return f1, p, r
# Path: utils.py
def _transform_resize(h, w):
return Compose([
#Resize(n_px, interpolation=BICUBIC),
Resize((h,w), interpolation=BICUBIC),
#CenterCrop(n_px),
#RandomHorizontalFlip(1.0),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
# Path: clip_text.py
BACKGROUND_CATEGORY_VOC = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','keyboard','helmet',
'cloud','house','mountain','ocean','road','rock','street','valley','bridge','sign',
]
BACKGROUND_CATEGORY_COCO = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','helmet',
'cloud','house','mountain','ocean','road','rock','street','valley','bridge',
]
# Path: classify.py
import clip
import torch
import cv2
import numpy as np
import pickle
import os
import math
import torch.nn.functional as F
import os
import argparse
import warnings
from PIL import Image
from tqdm import tqdm
from lxml import etree
from utils import scoremap2bbox, parse_xml_to_dict, _convert_image_to_rgb, compute_AP, compute_F1, _transform_resize
from clip_text import class_names_voc, BACKGROUND_CATEGORY_VOC, class_names_coco, BACKGROUND_CATEGORY_COCO, class_names_coco_stuff182_dict, coco_stuff_182_to_27
gt_label_id = []
with torch.no_grad():
text_features = clip.encode_text_with_prompt_ensemble(model, class_names, device)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
for im_idx, im in enumerate(tqdm(image_list)):
image_path = os.path.join(args.img_root, im)
label_id_list = all_label_list[im_idx]
label_id_list = [int(lid) for lid in label_id_list]
if args.dataset == 'cocostuff':
label_id_list = [coco_stuff_182_to_171[int(lid)] for lid in label_id_list]
gt_label_id.append(label_id_list)
pil_img = Image.open(image_path)
array_img = np.array(pil_img)
ori_height, ori_width = array_img.shape[:2]
if len(array_img.shape) == 2:
array_img = np.stack([array_img, array_img, array_img], axis=2)
pil_img = Image.fromarray(np.uint8(array_img))
if model_type == 'clip':
patch_size = 16
preprocess = _transform_resize(int(np.ceil(int(ori_height) / patch_size) * patch_size), int(np.ceil(int(ori_width) / patch_size) * patch_size))
image = preprocess(pil_img).unsqueeze(0).to(device)
with torch.no_grad():
# Extract image features
h, w = image.shape[-2], image.shape[-1]
image_features, attn_weight_list = model.encode_image_tagclip(image, h, w, attn_mask=1)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
attn_weight = [aw[:, 1:, 1:] for aw in attn_weight_list]
attn_vote = torch.stack(attn_weight, dim=0).squeeze()
thres0 = attn_vote.reshape(attn_vote.shape[0], -1)
thres0 = torch.mean(thres0, dim=-1).reshape(attn_vote.shape[0], 1, 1)
thres0 = thres0.repeat(1, attn_vote.shape[1], attn_vote.shape[2])
if args.dataset == 'cocostuff':
attn_weight = torch.stack(attn_weight, dim=0)[:-1]
else:
attn_weight = torch.stack(attn_weight, dim=0)[8:-1]
attn_cnt = attn_vote > thres0
attn_cnt = attn_cnt.float()
attn_cnt = torch.sum(attn_cnt, dim=0)
attn_cnt = attn_cnt >= 4
attn_weight = torch.mean(attn_weight, dim=0)[0]
attn_weight = attn_weight * attn_cnt.float()
logit_scale = model.logit_scale.exp()
logits = logit_scale * image_features @ text_features.t()#torch.Size([1, 197, 81])
logits = logits[:, 1:, :]
logits = logits.softmax(dim=-1)
logits_coarse = logits.squeeze()
logits = torch.matmul(attn_weight, logits)
logits = logits.squeeze()
logits = mask_attn(logits_coarse, logits, h, w, attn_weight)
logits_max = torch.max(logits, dim=0)[0]
logits_max = logits_max[:NUM_CLASSES]
logits_max = cwr(logits, logits_max, h, w, image, text_features)
logits_max = logits_max.cpu().numpy()
pred_label_id.append(logits_max)
else:
raise NotImplementedError()
gt_one_hot = np.zeros((len(gt_label_id), NUM_CLASSES))
for i in range(len(gt_label_id)):
gt_ids = gt_label_id[i]
for gt_id in gt_ids:
gt_one_hot[i][gt_id] = 1
predictions = torch.tensor(pred_label_id)
labels = torch.tensor(gt_one_hot)
# compute AP
ap = compute_AP(predictions, labels)
print('================================================')
print('mAP: %.6f' % torch.mean(ap))
# compute F1, P, R with specific relative threshold
ma = predictions.max(dim=1)[0]
mi = predictions.min(dim=1)[0]
step = ma - mi
if args.dataset == 'cocostuff':
thres_abs = 0.1
else:
thres_abs = 0.5
F1, P, R = compute_F1(predictions.clone(), labels.clone(), 'overall', thres_abs, use_relative=True)
print('F1: %.6f, Precision: %.6f, Recall: %.6f' % (torch.mean(F1), torch.mean(P), torch.mean(R)))
print('================================================\n')
#save class labels
if args.save_file:
save_path = './output/{}_val_tagclip.txt'.format(args.dataset)
print('>>>writing to {}'.format(save_path))
thres_rel = mi + thres_abs * step
with open(save_path, 'w') as f:
for im_idx, im in enumerate(image_list):
line = im.replace('.jpg','')
for index, value in enumerate(pred_label_id[im_idx]):
if value > thres_rel[im_idx]:
line += " {}".format(index)
if line == im.replace('.jpg',''):
line += " {}".format(np.argmax(pred_label_id[im_idx]))
line += "\n"
f.writelines(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
| parser.add_argument('--dataset', type=str, default='voc2007', choices=['voc2007', 'voc2012', 'coco2014', 'coco2017', 'cocostuff']) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: video-db/videodb-python
# Path: videodb/_constants.py
class ApiPath:
collection = "collection"
upload = "upload"
video = "video"
stream = "stream"
thumbnail = "thumbnail"
upload_url = "upload_url"
transcription = "transcription"
index = "index"
search = "search"
compile = "compile"
workflow = "workflow"
# Path: videodb/collection.py
class Collection:
def __init__(self, _connection, id: str, name: str = None, description: str = None):
self._connection = _connection
self.id = id
self.name = name
self.description = description
def get_videos(self) -> list[Video]:
videos_data = self._connection.get(path=f"{ApiPath.video}")
return [Video(self._connection, **video) for video in videos_data.get("videos")]
def get_video(self, video_id: str) -> Video:
video_data = self._connection.get(path=f"{ApiPath.video}/{video_id}")
return Video(self._connection, **video_data)
def delete_video(self, video_id: str) -> None:
"""Delete the video
:param str video_id: The id of the video to be deleted
:raises InvalidRequestError: If the delete fails
:return: None if the delete is successful
:rtype: None
"""
return self._connection.delete(path=f"{ApiPath.video}/{video_id}")
def search(
self,
query: str,
type: Optional[str] = SearchType.semantic,
result_threshold: Optional[int] = None,
score_threshold: Optional[int] = None,
dynamic_score_percentage: Optional[int] = None,
) -> SearchResult:
search = SearchFactory(self._connection).get_search(type)
return search.search_inside_collection(
self.id,
query,
result_threshold,
score_threshold,
dynamic_score_percentage,
)
def upload(
self,
file_path: str = None,
url: Optional[str] = None,
name: Optional[str] = None,
description: Optional[str] = None,
callback_url: Optional[str] = None,
) -> Video:
upload_data = upload(
self._connection,
file_path,
url,
name,
description,
callback_url,
)
return Video(self._connection, **upload_data) if upload_data else None
# Path: videodb/_utils/_http_client.py
class HttpClient:
"""Http client for making requests"""
def __init__(
self,
api_key: str,
base_url: str,
max_retries: Optional[int] = HttpClientDefaultValues.max_retries,
) -> None:
"""Create a new http client instance
:param str api_key: The api key to use for authentication
:param str base_url: The base url to use for the api
:param int max_retries: (optional) The maximum number of retries to make for a request
"""
self.session = requests.Session()
retries = Retry(
total=max_retries,
backoff_factor=HttpClientDefaultValues.backoff_factor,
status_forcelist=HttpClientDefaultValues.status_forcelist,
)
adapter = HTTPAdapter(max_retries=retries)
self.session.mount("http://", adapter)
self.session.mount("https://", adapter)
self.session.headers.update(
{"x-access-token": api_key, "Content-Type": "application/json"}
)
self.base_url = base_url
logger.debug(f"Initialized http client with base url: {self.base_url}")
def _make_request(
self,
method: Callable[..., requests.Response],
path: str,
base_url: Optional[str] = None,
headers: Optional[dict] = None,
**kwargs,
):
"""Make a request to the api
:param Callable method: The method to use for the request
:param str path: The path to make the request to
:param str base_url: (optional) The base url to use for the request
:param dict headers: (optional) The headers to use for the request
:param kwargs: The keyword arguments to pass to the request method
:return: json response from the request
"""
try:
url = f"{base_url or self.base_url}/{path}"
timeout = kwargs.pop("timeout", HttpClientDefaultValues.timeout)
request_headers = {**self.session.headers, **(headers or {})}
response = method(url, headers=request_headers, timeout=timeout, **kwargs)
response.raise_for_status()
return self._parse_response(response)
except requests.exceptions.RequestException as e:
self._handle_request_error(e)
def _handle_request_error(self, e: requests.exceptions.RequestException) -> None:
"""Handle request errors"""
if isinstance(e, requests.exceptions.HTTPError):
try:
error_message = e.response.json().get("message", "Unknown error")
except ValueError:
error_message = e.response.text
if e.response.status_code == 401:
raise AuthenticationError(
f"Error: {error_message}", e.response
) from None
else:
raise InvalidRequestError(
f"Invalid request: {error_message}", e.response
) from None
elif isinstance(e, requests.exceptions.RetryError):
raise InvalidRequestError(
"Invalid request: Max retries exceeded", e.response
) from None
elif isinstance(e, requests.exceptions.Timeout):
raise InvalidRequestError(
"Invalid request: Request timed out", e.response
) from None
elif isinstance(e, requests.exceptions.ConnectionError):
raise InvalidRequestError(
"Invalid request: Connection error", e.response
) from None
else:
raise InvalidRequestError(
f"Invalid request: {str(e)}", e.response
) from None
@backoff.on_exception(backoff.expo, Exception, max_time=500, logger=None)
def _get_output(self, url: str):
"""Get the output from an async request"""
response_json = self.session.get(url).json()
if (
response_json.get("status") == Status.in_progress
or response_json.get("status") == Status.processing
):
logger.debug("Waiting for processing to complete")
raise Exception("Stuck on processing status") from None
return response_json.get("response") or response_json
def _parse_response(self, response: requests.Response):
"""Parse the response from the api"""
try:
response_json = response.json()
if (
response_json.get("status") == Status.processing
and response_json.get("request_type", "sync") == "async"
):
return None
elif (
response_json.get("status") == Status.processing
and response_json.get("request_type", "sync") == "sync"
):
response_json = self._get_output(
response_json.get("data").get("output_url")
)
if response_json.get("success"):
return response_json.get("data")
else:
raise InvalidRequestError(
f"Invalid request: {response_json.get('message')}", response
) from None
elif response_json.get("success"):
return response_json.get("data")
else:
raise InvalidRequestError(
f"Invalid request: {response_json.get('message')}", response
) from None
except ValueError:
raise InvalidRequestError(
f"Invalid request: {response.text}", response
) from None
def get(self, path: str, **kwargs) -> requests.Response:
"""Make a get request"""
return self._make_request(self.session.get, path, **kwargs)
def post(self, path: str, data=None, **kwargs) -> requests.Response:
"""Make a post request"""
return self._make_request(self.session.post, path, json=data, **kwargs)
def put(self, path: str, data=None, **kwargs) -> requests.Response:
"""Make a put request"""
return self._make_request(self.session.put, path, json=data, **kwargs)
def delete(self, path: str, **kwargs) -> requests.Response:
"""Make a delete request"""
return self._make_request(self.session.delete, path, **kwargs)
def patch(self, path: str, data=None, **kwargs) -> requests.Response:
"""Make a patch request"""
return self._make_request(self.session.patch, path, json=data, **kwargs)
# Path: videodb/video.py
class Video:
def __init__(self, _connection, id: str, collection_id: str, **kwargs) -> None:
self._connection = _connection
self.id = id
self.collection_id = collection_id
self.stream_url = kwargs.get("stream_url", None)
self.player_url = kwargs.get("player_url", None)
self.name = kwargs.get("name", None)
self.description = kwargs.get("description", None)
self.thumbnail_url = kwargs.get("thumbnail_url", None)
self.length = float(kwargs.get("length", 0.0))
self.transcript = kwargs.get("transcript", None)
self.transcript_text = kwargs.get("transcript_text", None)
def __repr__(self) -> str:
return (
f"Video("
f"id={self.id}, "
f"collection_id={self.collection_id}, "
f"stream_url={self.stream_url}, "
f"player_url={self.player_url}, "
f"name={self.name}, "
f"description={self.description}, "
f"thumbnail_url={self.thumbnail_url}, "
f"length={self.length})"
)
def __getitem__(self, key):
return self.__dict__[key]
def search(
self,
query: str,
search_type: Optional[str] = SearchType.semantic,
result_threshold: Optional[int] = None,
score_threshold: Optional[int] = None,
dynamic_score_percentage: Optional[int] = None,
) -> SearchResult:
search = SearchFactory(self._connection).get_search(search_type)
return search.search_inside_video(
self.id,
query,
result_threshold,
score_threshold,
dynamic_score_percentage,
)
def delete(self) -> None:
"""Delete the video
:raises InvalidRequestError: If the delete fails
:return: None if the delete is successful
:rtype: None
"""
self._connection.delete(path=f"{ApiPath.video}/{self.id}")
def generate_stream(self, timeline: Optional[list[tuple[int, int]]] = None) -> str:
"""Generate the stream url of the video
:param list timeline: The timeline of the video to be streamed. Defaults to None.
:raises InvalidRequestError: If the get_stream fails
:return: The stream url of the video
:rtype: str
"""
if not timeline and self.stream_url:
return self.stream_url
stream_data = self._connection.post(
path=f"{ApiPath.video}/{self.id}/{ApiPath.stream}",
data={
"timeline": timeline,
"length": self.length,
},
)
return stream_data.get("stream_url", None)
def generate_thumbnail(self):
if self.thumbnail_url:
return self.thumbnail_url
thumbnail_data = self._connection.get(
path=f"{ApiPath.video}/{self.id}/{ApiPath.thumbnail}"
)
self.thumbnail_url = thumbnail_data.get("thumbnail_url")
return self.thumbnail_url
def _fetch_transcript(self, force: bool = False) -> None:
if self.transcript and not force:
return
transcript_data = self._connection.get(
path=f"{ApiPath.video}/{self.id}/{ApiPath.transcription}",
params={"force": "true" if force else "false"},
)
self.transcript = transcript_data.get("word_timestamps", [])
self.transcript_text = transcript_data.get("text", "")
def get_transcript(self, force: bool = False) -> list[dict]:
self._fetch_transcript(force)
return self.transcript
def get_transcript_text(self, force: bool = False) -> str:
self._fetch_transcript(force)
return self.transcript_text
def index_spoken_words(self) -> None:
"""Semantic indexing of spoken words in the video
:raises InvalidRequestError: If the video is already indexed
:return: None if the indexing is successful
:rtype: None
"""
self._fetch_transcript()
self._connection.post(
path=f"{ApiPath.video}/{self.id}/{ApiPath.index}",
data={
"index_type": IndexType.semantic,
},
)
def add_subtitle(self) -> str:
subtitle_data = self._connection.post(
path=f"{ApiPath.video}/{self.id}/{ApiPath.workflow}",
data={
"type": Workflows.add_subtitles,
},
)
return subtitle_data.get("stream_url", None)
def insert_video(self, video, timestamp: float) -> str:
"""Insert a video into another video
:param Video video: The video to be inserted
:param float timestamp: The timestamp where the video should be inserted
:raises InvalidRequestError: If the insert fails
:return: The stream url of the inserted video
:rtype: str
"""
if timestamp > float(self.length):
timestamp = float(self.length)
pre_shot = Shot(self._connection, self.id, timestamp, "", 0, timestamp)
inserted_shot = Shot(
self._connection, video.id, video.length, "", 0, video.length
)
post_shot = Shot(
self._connection,
self.id,
self.length - timestamp,
"",
timestamp,
self.length,
)
all_shots = [pre_shot, inserted_shot, post_shot]
compile_data = self._connection.post(
path=f"{ApiPath.compile}",
data=[
{
"video_id": shot.video_id,
"collection_id": self.collection_id,
"shots": [(float(shot.start), float(shot.end))],
}
for shot in all_shots
],
)
return compile_data.get("stream_url", None)
def play(self) -> str:
"""Open the player url in the browser/iframe and return the stream url
:return: The stream url
:rtype: str
"""
return play_stream(self.stream_url)
# Path: videodb/_upload.py
def upload(
_connection,
file_path: str = None,
url: str = None,
name: Optional[str] = None,
description: Optional[str] = None,
callback_url: Optional[str] = None,
) -> dict:
if not file_path and not url:
raise VideodbError("Either file_path or url is required")
if file_path and url:
raise VideodbError("Only one of file_path or url is allowed")
if file_path:
try:
name = file_path.split("/")[-1].split(".")[0] if not name else name
upload_url_data = _connection.get(
path=f"{ApiPath.collection}/{_connection.collection_id}/{ApiPath.upload_url}",
params={"name": name},
)
upload_url = upload_url_data.get("upload_url")
with open(file_path, "rb") as file:
files = {"file": (name, file)}
response = requests.post(upload_url, files=files)
response.raise_for_status()
url = upload_url
except FileNotFoundError as e:
raise VideodbError("File not found", cause=e)
except HTTPError as e:
raise VideodbError("Error while uploading file", cause=e)
upload_data = _connection.post(
path=f"{ApiPath.collection}/{_connection.collection_id}/{ApiPath.upload}",
data={
"url": url,
"name": name,
"description": description,
"callback_url": callback_url,
},
)
return upload_data
# Path: videodb/client.py
import logging
from typing import (
Optional,
)
from videodb._constants import (
ApiPath,
)
from videodb.collection import Collection
from videodb._utils._http_client import HttpClient
from videodb.video import Video
from videodb._upload import (
upload,
)
logger = logging.getLogger(__name__)
class Connection(HttpClient):
def __init__(self, api_key: str, base_url: str) -> None:
self.api_key = api_key
self.base_url = base_url
self.collection_id = "default"
super().__init__(api_key, base_url)
def get_collection(self, collection_id: Optional[str] = "default") -> Collection:
collection_data = self.get(path=f"{ApiPath.collection}/{collection_id}")
self.collection_id = collection_data.get("id", "default")
return Collection(
self,
self.collection_id,
collection_data.get("name"),
collection_data.get("description"),
)
def upload(
self,
file_path: str = None,
url: str = None,
name: Optional[str] = None,
| description: Optional[str] = None, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: IDEA-CCNL/Real-Gemini
# Path: real_gemini/tools/gpt4v_tool.py
class GPT4VTool(object):
_name_ = "GPT-4-Vision"
_description_ = "这个工具是GPT for vision的调用接口。用于图像到文本的理解。本工具的输入是一段文本指令和一张或者多张图片,请注意,工具的输入由一个JSON字符串组成,json包括两个key,question和image_input。question表示文本指令,image_input表示图片路径或存放图片的目录。例如:{{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}。A wrapper around OpenAI GPT4V API. Useful for image-to-text understanding when you need to generate text from some images and a text description. The input of this tool is a text prompt and one or more images. Please note, the input of the tool consists of a JSON string, the json includes two keys, question and image_input. The question represents text instructions, and image_input represents the image path or the directory where the images are stored. For example: {{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}."
_return_direct_ = False
def __init__(self):
self._gpt4v = ChatOpenAI(
model="gpt-4-vision-preview",
max_tokens=256)
self.max_dialog_turn = 3
self.history = ChatMessageHistory()
self.history.add_message(
SystemMessage(
content=[
{"type": "text", "text": _OPEN_AI_SYSTEM_PROMPT}
]
)
)
def inference(self, input_str: str):
input_dict = json.loads(input_str)
image_path = input_dict["image_input"]
if os.path.isdir(image_path):
image_paths = [
os.path.join(image_path, path) for path in os.listdir(image_path)]
else:
image_paths = [image_path]
base64_images = []
for image_path in image_paths:
base64_image = image2base64(load_image(image_path))
base64_images.append(f"data:image/jpeg;base64,{base64_image}")
human_contents = []
human_contents.append({"type": "text", "text": input_dict["question"]})
for base64_image in base64_images:
human_contents.append({
"type": "image_url",
"image_url": {"url": base64_image}
}) # images
self.history.add_message(HumanMessage(content=human_contents))
response_msg = self._gpt4v.invoke(self.history.messages)
# 历史只保留一张图
self.history.messages.pop()
human_contents = []
human_contents.append({"type": "text", "text": input_dict["question"]})
human_contents.append({"type": "image_url", "image_url": {"url": base64_images[-1]}})
self.history.add_message(HumanMessage(content=human_contents))
self.history.add_message(response_msg)
# 只保留self.max_dialog_turn轮对话
if len(self.history.messages) > 1 + 2 * self.max_dialog_turn:
self.history.messages = [self.history.messages[0]] + self.history.messages[-2 * self.max_dialog_turn: ]
# print(self.history.messages)
return response_msg.content
# Path: real_gemini/tools/image_generation_tool.py
class TaiyiGeneralTool(object):
_name_ = "taiyi general image generation"
_description_ = "Taiyi General的API,用于从文本生成图像。当你需要从文本描述生成图像时非常有用。输入应该是文本,即图像描述。A wrapper around Taiyi General API for text to image generation. Useful for when you need to generate images from a text description. Input should be text, i.e, an image description."
_return_direct_ = True
def __init__(self):
self.prompter = ChatOpenAI(
model="gpt-3.5-turbo",
max_tokens=256)
self.host = os.getenv("IMAGE_GENERATION_SERVER_HOST")
self.port = os.getenv("IMAGE_GENERATION_SERVER_PORT")
def _upgrade_prompt(self, prompt):
messages = []
messages.append(
SystemMessage(
content=[
{"type": "text", "text": "我正在使用一个Stable Diffusion的AI图像生成工具,我想让你充当我的prompt优化生成器。在我想生成的主题后,请帮我添加各种关键词,使得我的主题的描述更加详细,添加的关键词包括:主体、背景效果、风格、拍摄方式。例如,如果我输入“跑车”,你将生成关键词,如:“跑车,高清,4k,真实细致的跑车摄影,速度动态模糊,赛车场,城市环境,风景道路,戏剧性的天空”"}
]
)
)
messages.append(HumanMessage(content=prompt))
response_msg = self.prompter.invoke(messages)
new_prompt = response_msg.content
return new_prompt
def inference(self, inputs):
url = f"http://{self.host}:{self.port}/taiyi_xl_general_base64/"
headers = {"Content-Type": "application/json"}
new_prompt = self._upgrade_prompt(inputs)
print("new prompt:", new_prompt)
data = {"prompt": new_prompt}
response = requests.post(url, headers=headers, data=json.dumps(data))
response = response.json()
b64_image = response["image_base64"]
# write to file
save_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
save_dir = os.path.join(save_dir, "test", "outputs")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
md5 = hashlib.md5()
md5.update(inputs.encode('utf-8'))
filename = os.path.join(save_dir, md5.hexdigest() + ".png")
save_or_show_image(b64_image, filename)
print("image filename:", filename)
result = {"text": "好的,我用太乙为你生成了一张图片。", "image": filename}
return json.dumps(result, ensure_ascii=False)
# Path: real_gemini/tools/music_tool.py
class Text2MusicTool(object):
_name_ = "Text2Music"
_description_ = "这个工具是从文本生成音乐的调用接口,它可以根据一段文字,生成符合这段文字内容的音乐风格。本工具的输入是一段文本指令。This tool is an API that generates music from text. It can create music that matches the style of the given text content. The input for this tool is a text command."
_return_direct_ = True
def __init__(self):
self.translator = ChatOpenAI(
model="gpt-3.5-turbo",
max_tokens=256)
self.host = os.getenv("MUSIC_SERVER_HOST")
self.port = os.getenv("MUSIC_SERVER_PORT")
def inference(self, input_str: str):
messages = []
messages.append(
SystemMessage(
content=[
{"type": "text", "text": "你是一个翻译专家,请将我输入的中文翻译成英文。"}
]
)
)
messages.append(HumanMessage(content=input_str))
response_msg = self.translator.invoke(messages)
input_str_en = response_msg.content
# print(input_str_en)
url = f"http://{self.host}:{self.port}/text_to_music"
data = {"text": input_str_en}
music_response = requests.post(url, data=data)
music_response = music_response.json()
# write to file
save_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
save_dir = os.path.join(save_dir, "test", "outputs")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
md5 = hashlib.md5()
md5.update(input_str_en.encode('utf-8'))
filename = os.path.join(save_dir, md5.hexdigest() + ".wav")
raw_data = music_response["audio"]
sampling_rate = music_response["sampling_rate"]
scipy.io.wavfile.write(
filename,
rate=sampling_rate,
data=np.frombuffer(base64.b64decode(raw_data), np.float32),
)
print("music filename:", filename)
result = {"text": "好的,为你生成了一段音乐。", "audio": filename}
return json.dumps(result, ensure_ascii=False)
# Path: real_gemini/tools/controlnet_tool.py
class Image2PoseTool(object):
_name_="Pose-Detection-On-Image"
_description_="""当你想要检测图片中的人体姿态时很有用。比如:生成这张图片中的人体姿态,或者从这张图片中生成人体姿态。输入到这个工具的应该是一个字符串,表示图片的路径或者图片所在的文件夹路径。useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_dir or image_paths."""
_return_direct_ = False
def __init__(self):
self.host = "0.0.0.0"
self.port = 6679
def inference(self, image_input: str):
url = f"http://{self.host}:{self.port}/image_to_pose"
data = {"image_input": image_input}
pose_response = requests.post(url, data=data)
return pose_response.text
# Path: real_gemini/tools/sam_tool.py
class SegmentingTool(object):
_name_="Segment the Image"
_description_="当你想要对图片中的所有物体进行分割,但是不想对某个物体进行分割时很有用。比如:分割这张图片中的所有物体,或者在这张图片上生成分割结果,或者对这张图片进行分割,或者分割这张图片中的所有物体。输入到这个工具的应该是一个字符串,表示图片的路径或者图片所在的文件夹路径。useful when you want to segment all the part of the image, but not segment a certain object.like: segment all the object in this image, or generate segmentations on this image, or segment the image, or perform segmentation on this image, or segment all the object in this image. The input to this tool should be a string, representing the image_dir or image_paths."
_return_direct_ = False
def __init__(self):
# self.host = "localhost"
self.host = "0.0.0.0"
self.port = 6680
def inference(self,image_input: str):
url = f"http://{self.host}:{self.port}/segmenting"
data = {"image_input": image_input}
seg_response = requests.post(url, data=data)
return seg_response.text
# Path: real_gemini/tools/dino_tool.py
class Text2BoxTool(object):
_name_="Detect the Give Object"
_description_="""当你只想检测或者找出图片中的某个物体时很有用。输入到这个工具的应该是一个被逗号分隔成两部分的字符串,分别表示图片的路径或者图片所在的文件夹路径和要检测的物体。useful when you only want to detect or find out given objects in the picture. The input to this tool should be a comma separated string of two, representing the image_dir or image_paths and the object to be detected, respectively."""
_return_direct_ = False
def __init__(self):
self.host = "0.0.0.0"
self.port = 6681
def inference(self, inputs):
url = f"http://{self.host}:{self.port}/text_to_box"
data = {"inputs": inputs}
box_response = requests.post(url, data=data)
return box_response.text
# Path: real_gemini/tools/imageediting_tool.py
class ImageRemoveTool(object):
_name_ = "Remove Something From The Image"
_description_ = "当你想要从图片中移除某个物体或者某个物体的某个部分时很有用。输入到这个工具的应该是一个被逗号分隔成两部分的字符串,分别表示图片的路径或者图片所在的文件夹路径和要移除的物体。useful when you want to remove the object or something from the photofrom its description or location. The input to this tool should be a comma separated string of two, representing the image_dir or image_paths and the object need to be removed."
_return_direct_ = False
def __init__(self):
self.host = "0.0.0.0"
# self.host = "localhost"
self.port = 6682
def inference(self, inputs):
url = f"http://{self.host}:{self.port}/image_remove"
data = {"input": inputs}
update_response = requests.post(url, data=data)
return update_response.text
# Path: real_gemini/tools/imageediting_tool.py
class ImageReplaceTool(object):
_name_="Replace Something From The Image"
_description_="当你想要用另一个物体替换图片中的某个物体或者某个物体的某个部分时很有用。输入到这个工具的应该是一个被逗号分隔成三部份的字符串,分别表示图片的路径或者图片所在的文件夹路径,要替换的物体以及要替换成的物体。\nuseful when you want to replace an object from the object description or location with another object from its description.\nThe input to this tool should be a comma separated string of three, representing the image_dir or image_paths, the object to be replaced, the object to be replaced with."
_return_direct_ = False
def __init__(self):
self.host = "0.0.0.0"
# self.host = "localhost"
self.port = 6682
def inference(self, inputs):
url = f"http://{self.host}:{self.port}/image_replace"
data = {"input": inputs}
update_response = requests.post(url, data=data)
return update_response.text
# Path: real_gemini/tools/weather_tool.py
class WeatherTool(object):
_name_ = "WeatherAPI"
_description_ = "这个工具是查询当前和未来天气的调用接口,它可以根据一段文字,这个文字包含一个城市,这个接口可以查询这个城市的天气,注意,本工具的输入是一个字符串。This tool is a weather query API that can retrieve the current and future weather based on a given text, which includes a city name. The API is capable of querying the weather for the specified city. Please note that the input for this tool is a string."
_return_direct_ = False
def __init__(self):
self.gaode_api_key = os.getenv("GAODE_API_KEY")
def inference(self, input_str: str):
city_name = input_str
district_name = input_str
params = self._get_params(city_name, district_name)
return self._process_response(self._results(params))
def _get_params(self, city_name: str, district_name: str) -> Dict[str, str]:
"""Get parameters for GaoDeAPI."""
adcode = self._get_adcode(city_name, district_name)
params = {
"api_key": self.gaode_api_key,
"adcode": adcode
}
print(params)
return params
def _results(self, params: dict) -> dict:
"""Run query through GaoDeAPI and return the raw result."""
# # with HiddenPrints():
response = requests.get("https://restapi.amap.com/v3/weather/weatherInfo?", {
"key": self.gaode_api_key,
"city": params["adcode"],
"extensions": "all",
"output": "JSON"
})
res = json.loads(response.content)
return res
def _process_response(self, res: dict) -> str:
"""Process response from GaoDeAPI."""
if res["status"] == '0':
return "输入的城市信息可能有误或未提供城市信息"
if res["forecasts"] is None or len(res["forecasts"]) == 0:
return "输入的城市信息可能有误或未提供城市信息"
res["currentTime"] = datetime.datetime.now()
return json.dumps(res["forecasts"], ensure_ascii=False)
def _get_adcode(self, city_name: str, district_name: str) -> str:
"""Obtain the regional code of a city based on its name and district/county name."""
# 读取Excel文件
work_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
df = pd.read_excel(
os.path.join(work_dir, "test/AMap_adcode_citycode.xlsx"), sheet_name="sheet1"
)
# print(df)
# 将所有NaN值转换成0
df = df.dropna()
if district_name is not None and district_name != '':
# 根据'city_name'列检索数据
result = df[df['中文名'].str.contains(district_name)]
json_data = result.to_json(orient='records', force_ascii=False)
# 解析 JSON 数据
json_array = json.loads(json_data)
# 如果区域名称为空,用城市名称去查
if (district_name is None or district_name == '') and city_name != '':
# 根据'city_name'列检索数据
result = df[df['中文名'].str.contains(city_name)]
json_data = result.to_json(orient='records', force_ascii=False)
# 解析 JSON 数据
json_array = json.loads(json_data)
# 如果没数据直接返回空
if len(json_array) == 0:
# 根据'citycode'列检索数据
result = df[df['中文名'].str.contains(city_name)]
json_data = result.to_json(orient='records', force_ascii=False)
# 解析 JSON 数据
json_array = json.loads(json_data)
# 如果只有一条直接返回
if len(json_array) == 1:
return json_array[0]['adcode']
# 如果有多条再根据district_name进行检索
if len(json_array) > 1:
for obj in json_array:
if district_name is not None and district_name != '' and district_name in obj['中文名']:
return obj['adcode']
if city_name in obj['district_name']:
return obj['adcode']
return "输入的城市信息可能有误或未提供城市信息"
# Path: real_gemini/utils/output_parser.py
class ConvoOutputParser(AgentOutputParser):
"""Output parser for the conversational agent."""
ai_prefix: str = "AI"
"""Prefix to use before AI output."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if f"{self.ai_prefix}:" in text:
return AgentFinish(
{"output": text.split(f"{self.ai_prefix}:")[-1].strip()}, text
)
if "###" in text:
return AgentFinish(
{"output": "###"}, text
)
regex = r"Action: (.*?)[\n]*Action Input: ([\s\S]*)"
match = re.search(regex, text, re.DOTALL)
if not match:
raise OutputParserException(f"Could not parse LLM output: `{text}`")
action = match.group(1)
action_input = match.group(2)
return AgentAction(action.strip(), action_input.strip(" ").strip('"'), text)
@property
def _type(self) -> str:
return "conversational"
# Path: real_gemini/utils/agent_prompt.py
PREFIX = """Assistant is a large language model trained by IDEA-CCNL.
Please note, the scenario that the assistant is facing is: a user is interacting with the assistant through a camera for Q&A. The system will first convert the user's voice into text and then input it into the assistant. At the same time, the system will save keyframe images to a directory for image understanding. The assistant needs to conduct multimodal Q&A and tool invocation based on the images and text.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text or images, audio, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific multimodal question or just want to have a conversation about a particular topic, Assistant is here to assist.
TOOLS:
------
Assistant has access to the following tools:"""
# Path: real_gemini/utils/agent_prompt.py
FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No, and {ai_prefix}: [your response here]
```
MAKE SURE your response MUST in Chinese.
Since the text you input is obtained from the ASR service, there may be some recognition noise. If your input is some meaningless text or incoherent sentences, please do not call the tool and directly return "###".
由于你输入的文字是由ASR服务得到的,所以可能存在一些识别噪音。假如你的输入是一些没有意义的文字或者不通顺的句子时,请不要调用工具,并直接返回\"###\"。
Your input is an text instruction and key frame images captured by an AI multimodal assistant. Please answer my questions based on these images. Please note that the images and questions may not be related, and you need to make your own judgment.
"""
# Path: real_gemini/utils/agent_prompt.py
SUFFIX = """Begin!
Previous conversation history:
{chat_history}
New input: {input}
{agent_scratchpad}"""
# Path: real_gemini/agent.py
import os
import re
import json
from langchain.chat_models import ChatOpenAI
from langchain.agents.tools import Tool
from langchain.agents import initialize_agent, load_tools, AgentType
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from .tools.gpt4v_tool import GPT4VTool
from .tools.image_generation_tool import TaiyiGeneralTool
from .tools.music_tool import Text2MusicTool
from .tools.controlnet_tool import Image2PoseTool
from .tools.sam_tool import SegmentingTool
from .tools.dino_tool import Text2BoxTool
from .tools.imageediting_tool import ImageRemoveTool, ImageReplaceTool
from .tools.weather_tool import WeatherTool
from .utils.output_parser import ConvoOutputParser
from .utils.agent_prompt import PREFIX, FORMAT_INSTRUCTIONS, SUFFIX
#encoding=utf8
REGISTERED_TOOL_CLASSES = [
GPT4VTool,
TaiyiGeneralTool,
Text2MusicTool,
SegmentingTool,
Image2PoseTool,
Text2BoxTool,
ImageRemoveTool,
ImageReplaceTool,
WeatherTool,
]
class ReActAgent(object):
def __init__(self):
# self.llm = ChatOpenAI(model_name="gpt-4", temperature=0.5)
self.llm = ChatOpenAI(model_name="gpt-4-1106-preview", temperature=0.5)
# self.llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.5)
# self.llm = ChatOpenAI(model_name="deepseek", temperature=0.5, openai_api_base="http://192.168.81.13:6767/v1/")
# self.tools = load_tools(["dalle-image-generator"])
self.tools = []
for tool_cls in REGISTERED_TOOL_CLASSES:
custom_tool = tool_cls()
self.tools.append(
Tool(
name=custom_tool._name_,
description=custom_tool._description_,
func=custom_tool.inference,
return_direct=custom_tool._return_direct_
)
)
memory = ConversationBufferMemory(
return_messages=True,
memory_key="chat_history", output_key='output')
ai_prefix = "REAL-GEMINI"
output_parser = ConvoOutputParser()
output_parser.ai_prefix = ai_prefix
self.agent = initialize_agent(
tools=self.tools,
llm=self.llm,
| memory=memory, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aiim-research/GRETEL
# Path: src/core/factory_base.py
def get_instance_kvargs(kls, param):
GLogger.getLogger().info("Instantiating: "+kls)
return get_class(kls)(**param)
# Path: src/core/oracle_base.py
class Oracle(Trainable,metaclass=ABCMeta):
def __init__(self, context:Context, local_config) -> None:
super().__init__(context, local_config)
self._call_counter = 0
@final
def predict(self, data_instance):
"""predicts the label of a given data instance
-------------
INPUT:
data_instance : The instance whose class is going to be predicted
-------------
OUTPUT:
The predicted label for the data instance
"""
self._call_counter += 1
return self._real_predict(data_instance)
@final
def predict_proba(self, data_instance):
"""predicts the probability estimates for a given data instance
-------------
INPUT:
data_instance : The instance whose class is going to be predicted
-------------
OUTPUT:
The predicted probability estimates for the data instance
"""
self._call_counter += 1
return self._real_predict_proba(data_instance)
@final
def get_calls_count(self):
return self._call_counter
@final
def reset_call_count(self):
self._call_counter = 0
@final
def predict_list(self, dataset: Dataset, fold_id=0):
sptest = dataset.get_split_indices()[fold_id]['test']
result = [self.predict(dataset.get_instance(i)) for i in sptest]
return result
'''@abstractmethod'''#TODO: need to be reactivated and implemented. May can be removed accordingly to Mario and GRETEL philosphy
def evaluate(self, dataset: Dataset, fold_id=0):
pass
@abstractmethod
def _real_predict(self, data_instance):
pass
@abstractmethod
def _real_predict_proba(self, data_instance):
pass
# Path: src/dataset/dataset_base.py
class Dataset(Savable):
def init(self):
super().init()
################### PREAMBLE ###################
self.instances: List[DataInstance] = []
self.node_features_map = {}
self.edge_features_map = {}
self.graph_features_map = {}
self.splits = []
self._torch_repr = None
self._class_indices = {}
self._num_nodes = None
self._num_nodes_values = None
#################################################
def create(self):
self.generator = get_instance_kvargs(self.local_config['parameters']['generator']['class'],
{
"context": self.context,
"local_config": self.local_config['parameters']['generator'],
"dataset": self
})
self._inject_dataset()
self.manipulators: List[BaseManipulator] = self._create_manipulators()
self.generate_splits(n_splits=self.local_config['parameters']['n_splits'],
shuffle=self.local_config['parameters']['shuffle'])
def _inject_dataset(self):
for instance in self.instances:
instance._dataset = self
def _create_manipulators(self):
manipulator_instances = []
for manipulator in self.local_config['parameters']['manipulators']:
self.context.logger.info("Apply: "+manipulator['class'])
manipulator_instances.append(get_instance_kvargs(manipulator['class'],
{
"context": self.context,
"local_config": manipulator,
"dataset": self
}))
return manipulator_instances
def __len__(self):
return len(self.get_data())
def get_data(self):
return self.instances
def get_instance(self, i: int):
return self.instances[i]
def get_instances_by_class(self,cls):
if not self._inst_by_cls:
self._inst_by_cls= []
if not self._inst_by_cls[cls]:
idx = self.class_indices[cls]
self._inst_by_cls[cls]=[self.instances[i] for i in idx]
return self._inst_by_cls[cls]
def num_node_features(self):
return len(self.node_features_map)
def num_edge_features(self):
return len(self.edge_features_map)
def num_graph_features(self):
return len(self.graph_features_map)
def class_indices(self):
if not self._class_indices:
for i, inst in enumerate(self.instances):
self._class_indices[inst.label] = self._class_indices.get(inst.label, []) + [i]
return self._class_indices
def manipulate(self, instance: DataInstance):
for manipulator in self.manipulators:
manipulator._process_instance(instance)
@property
def num_classes(self):
return len(self.class_indices())
@property
def num_nodes(self):
if not self._num_nodes:
self._num_nodes = np.min(self.num_nodes_values)
return self._num_nodes
@property
def num_nodes_values(self):
if not self._num_nodes_values:
self._num_nodes_values = []
for inst in self.instances:
self._num_nodes_values.append(len(inst.data))
return self._num_nodes_values
def get_split_indices(self, fold_id=-1):
if fold_id == -1:
#NOTE: i am bit worried that it might be weak if you have sparse indices
return {'train': list(range(0, len(self.instances))), 'test': list(range(0, len(self.instances))) }
else:
return self.splits[fold_id]
def generate_splits(self, n_splits=10, shuffle=True):
kf = StratifiedKFold(n_splits=n_splits, shuffle=shuffle)
spl = kf.split([g for g in self.instances], [g.label for g in self.instances])
for train_index, test_index in spl:
self.splits.append({'train': train_index.tolist(), 'test': test_index.tolist()})
def get_torch_loader(self,
fold_id: int=-1,
batch_size: int=4,
usage: str='train',
kls: int=-1,
dataset_kls: str='src.dataset.utils.dataset_torch.TorchGeometricDataset',
**kwargs):
"""
Retrieves a DataLoader for Torch instances, facilitating batch processing.
Parameters:
- fold_id (int, optional): The fold identifier. Defaults to -1, indicating all folds.
- batch_size (int, optional): The number of instances in each batch. Defaults to 4.
- usage (str, optional): The usage type, e.g., 'train' or 'test'. Defaults to 'train'.
- kls (int, optional): The class identifier. Defaults to -1, indicating all classes.
- dataset_kls (str, optional): The class name of the (torch) Dataset or (torch_geometric) Dataset to use.
Defaults to 'src.dataset.utils.dataset_torch.TorchGeometricDataset'.
- **kwargs: Additional keyword arguments to be passed when creating the Dataset instance.
Returns:
torch_geometric.loader.DataLoader: A DataLoader configured for the specified Torch instances.
Usage Example:
```
dataloader = get_torch_loader(fold_id=0, batch_size=32, usage='train', kls=1, dataset_kls='{path-to-dataset-class}')
```
Notes:
- The DataLoader is created using instances obtained from the `get_torch_instances` method.
- Batching is performed with the specified `batch_size`.
- Data shuffling and dropping the last incomplete batch are enabled by default for training.
- The function leverages the `DataLoader` class from the `torch_geometric.loader` module.
"""
instances = self.get_torch_instances(fold_id=fold_id, usage=usage, kls=kls, dataset_kls=dataset_kls)
return DataLoader(instances, batch_size=batch_size, shuffle=True, drop_last=True)
def get_torch_instances(self,
fold_id: int=-1,
usage: str='train',
kls: int=-1,
dataset_kls: str='src.dataset.utils.dataset_torch.TorchGeometricDataset',
**kwargs):
"""
Retrieves a subset of Torch instances for a specific fold, usage, and dataset class.
Parameters:
- fold_id (int, optional): The fold identifier. Defaults to -1, indicating all folds.
- usage (str, optional): The usage type, e.g., 'train' or 'test'. Defaults to 'train'.
- kls (int, optional): The class identifier. Defaults to -1, indicating all classes.
- dataset_kls (str, optional): The class name of the (torch) Dataset or (torch_geometric) Dataset to use.
Defaults to 'src.dataset.utils.dataset_torch.TorchGeometricDataset'.
- **kwargs: Additional keyword arguments to be passed when creating the Dataset instance.
Returns:
torch.utils.data.Subset: A subset of Torch instances based on the specified parameters.
Usage Example:
```
subset = get_torch_instances(fold_id=0, usage='train', kls=1, dataset_kls='{path-to-dataset-class}')
```
Notes:
- If `kls` is specified, instances not belonging to the specified class will be excluded.
- The function relies on the presence of `get_split_indices` and `class_indices` methods in the parent class.
- The Dataset instance is created using the specified class name (`dataset_kls`) and additional arguments (`**kwargs`).
"""
# Retrieve the indices for the specified fold and usage
indices = self.get_split_indices(fold_id)[usage]
# If a specific class (kls) is provided, filter out instances not belonging to that class
if kls != -1:
indices = list(set(indices).difference(set(self.class_indices()[kls])))
# Create an instance of the specified dataset class with the given instances and additional arguments
self._torch_repr = get_class(dataset_kls)(self.instances, **kwargs)
# Return a Subset of the dataset instances based on the filtered indices
return Subset(self._torch_repr.instances, indices)
def read(self):
if self.saved():
store_path = self.context.get_path(self)
with open(store_path, 'rb') as f:
dump = pickle.load(f)
self.instances = dump['instances']
self.splits = dump['splits']
#self.local_config = dump['config']
self.node_features_map = dump['node_features_map']
self.edge_features_map = dump['edge_features_map']
self.graph_features_map = dump['graph_features_map']
self._num_nodes = dump['num_nodes']
self._class_indices = dump['class_indices']
self.manipulators = dump['manipulators']
#TODO: Attach the dataset back to all the instances
self._inject_dataset()
def write(self):
store_path = self.context.get_path(self)
dump = {
"instances" : self.instances,
"splits": self.splits,
"config": clean_cfg(self.local_config),
"node_features_map": self.node_features_map,
"edge_features_map": self.edge_features_map,
"graph_features_map": self.graph_features_map,
"num_nodes": self._num_nodes,
"class_indices": self._class_indices,
"manipulators": self.manipulators
}
with open(store_path, 'wb') as f:
pickle.dump(dump, f)
def check_configuration(self):
super().check_configuration()
local_config = self.local_config
if 'generator' not in local_config['parameters']:
raise ValueError(f'''The "generator" parameter needs to be specified in {self}''')
if 'manipulators' not in local_config['parameters']: # or not len(local_config['parameters']['manipulators']):
local_config['parameters']['manipulators'] = []
#local_config['parameters']['manipulators'].append(build_default_config_obj("src.dataset.manipulators.centralities.NodeCentrality"))
#local_config['parameters']['manipulators'].append(build_default_config_obj("src.dataset.manipulators.weights.EdgeWeights"))
local_config['parameters']['n_splits'] = local_config['parameters'].get('n_splits', 10)
local_config['parameters']['shuffle'] = local_config['parameters'].get('shuffle', True)
@property
def name(self):
alias = get_class( self.local_config['parameters']['generator']['class'] ).__name__
return self.context.get_name(self,alias=alias)
# Path: src/core/configurable.py
class Configurable(Base,metaclass=ABCMeta):
def __init__(self, context: Context, local_config):
super().__init__(context)
self.local_config = local_config#copy.deepcopy(local_config)
self.check_configuration()
self.init()
def check_configuration(self):
self.local_config['parameters'] = self.local_config.get('parameters',{})
@abstractmethod
def init(self):
pass
# Path: src/dataset/instances/base.py
class DataInstance:
def __init__(self, id, label, data, dataset=None):
self.id = id
self.data = data
self.label = label #TODO: Refactoring to have a one-hot encoding of labels!
self._dataset = dataset
# Path: src/dataset/instances/graph.py
class GraphInstance(DataInstance):
def __init__(self, id, label, data, node_features=None, edge_features=None, edge_weights=None, graph_features=None, dataset=None):
super().__init__(id, label, data, dataset=dataset)
self.node_features = self.__init_node_features(node_features).astype(np.float32)
self.edge_features = self.__init_edge_features(edge_features).astype(np.float32)
self.edge_weights = self.__init_edge_weights(edge_weights).astype(np.float32)
self.graph_features = graph_features
self._nx_repr = None
def __deepcopy__(self, memo):
# Fields that are being shallow copied
_dataset = self._dataset
# Fields that are deep copied
_new_id = deepcopy(self.id, memo)
_new_label = deepcopy(self.label, memo)
_data = deepcopy(self.data, memo)
_node_features = deepcopy(self.node_features, memo)
_edge_features = deepcopy(self.edge_features, memo)
_edge_weights = deepcopy(self.edge_weights, memo)
_graph_features = deepcopy(self.graph_features, memo)
return GraphInstance(_new_id, _new_label, _data, _node_features, _edge_features, _edge_weights, _graph_features)
def get_nx(self):
if not self._nx_repr:
self._nx_repr = self._build_nx()
return deepcopy(self._nx_repr)
def __init_node_features(self, node_features):
return np.zeros((self.data.shape[0], 1)) if isinstance(node_features, (str, type(None))) else node_features
def __init_edge_features(self, edge_features):
edges = np.nonzero(self.data)
return np.ones((len(edges[0]), 1)) if isinstance(edge_features, (str, type(None))) else edge_features
def __init_edge_weights(self, edge_weights):
edges = np.nonzero(self.data)
return np.ones(len(edges[0])) if edge_weights is None else edge_weights
def _build_nx(self):
nx_repr = nx.from_numpy_array(self.data)
nx_repr.add_nodes_from([node, {'node_features': self.node_features[node]}] for node in nx_repr.nodes())
edges = list(nx_repr.edges)
nx_repr.add_edges_from([(edge[0], edge[1], {'edge_features': self.edge_features[i], 'weight': self.edge_weights[i]}) for i, edge in enumerate(edges)])
return nx_repr
@property
def num_edges(self):
nx_repr = self.get_nx()
return nx_repr.number_of_edges()
@property
def num_nodes(self):
return len(self.data)
def nodes(self):
return [ i for i in range(self.data.shape[0])]
def neighbors(self, node):
return [i for i in self.data[node,:] if i != 0]
def degree(self,node):
return len(self.neighbors(node))
def degrees(self):
return [ len(self.neighbors(y)) for y in self.nodes()]
# Path: src/explainer/ensemble/aggregators/nodes/base.py
class NodeFeatureAggregator(Configurable):
def init(self):
self.dataset: Dataset = retake_dataset(self.local_config)
self.oracle: Oracle = retake_oracle(self.local_config)
super().init()
def aggregate(self, nodes, instances: List[DataInstance]):
pass
# Path: src/utils/cfg_utils.py
def init_dflts_to_of(snippet, key, kls, *args, **kwargs):
__add_dflts_to_of(snippet, key, kls, generate_default_for,*args, **kwargs)
# Path: src/utils/cfg_utils.py
def inject_dataset(cfg, dataset):
cfg['dataset']= dataset
# Path: src/utils/cfg_utils.py
def inject_oracle(cfg, oracle):
cfg['oracle']= oracle
# Path: src/utils/cfg_utils.py
def retake_oracle(cfg):
return cfg['oracle']
# Path: src/utils/cfg_utils.py
def retake_dataset(cfg):
return cfg['dataset']
# Path: src/explainer/ensemble/aggregators/base.py
from copy import deepcopy
from typing import List
from src.core.factory_base import get_instance_kvargs
from src.core.oracle_base import Oracle
from src.dataset.dataset_base import Dataset
from src.core.configurable import Configurable
from src.dataset.instances.base import DataInstance
from src.dataset.instances.graph import GraphInstance
from src.explainer.ensemble.aggregators.nodes.base import NodeFeatureAggregator
from src.utils.cfg_utils import init_dflts_to_of, inject_dataset, inject_oracle, retake_oracle, retake_dataset
import numpy as np
class ExplanationAggregator(Configurable):
def init(self):
self.dataset: Dataset = retake_dataset(self.local_config)
self.oracle: Oracle = retake_oracle(self.local_config)
inject_dataset(self.local_config['parameters']['node_feature_aggregator'], self.dataset)
inject_oracle(self.local_config['parameters']['node_feature_aggregator'], self.oracle)
self.node_feature_aggregator: NodeFeatureAggregator = get_instance_kvargs(self.local_config['parameters']['node_feature_aggregator']['class'],
{'context':self.context,'local_config': self.local_config['parameters']['node_feature_aggregator']})
super().init()
def aggregate(self, instance: DataInstance, explanations: List[DataInstance]):
aggregated_instance = self.real_aggregate(instance, explanations)
# we need to combine:
# 1) node features
# 2) edge features
# 3) graph features
adj = aggregated_instance.data
edges = np.nonzero(adj)
# if there's at least one edge that the aggreagtor produced
# then get the features of the incident nodes
if edges[0].size:
node_features = self.node_feature_aggregator.aggregate(
np.array(list(range(adj.shape[0]))),
explanations
)
cf_candidate = GraphInstance(id=instance.id,
label=1-instance.label,
| data=adj, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: modelscope/scepter
# Path: scepter/modules/data/dataset/base_dataset.py
class BaseDataset(Dataset, metaclass=ABCMeta):
para_dict = {
'MODE': {
'value': 'train',
'description': 'solver phase, select from [train, test, eval]'
},
'FILE_SYSTEM': {},
'TRANSFORMS': [{
'ImageToTensor': {
'PADDING': {
'value': None,
'description': 'padding'
},
'PAD_IF_NEEDED': {
'value': False,
'description': 'pad if needed'
},
'FILL': {
'value': 0,
'description': 'fill'
},
'PADDING_MODE': {
'value': 'constant',
'description': 'padding mode'
}
}
}]
}
def __init__(self, cfg, logger=None):
mode = cfg.get('MODE', 'train')
pipeline = cfg.get('TRANSFORMS', [])
super(BaseDataset, self).__init__()
self.mode = mode
self.logger = logger
self.worker_logger = get_logger(name='datasets')
self.pipeline = build_pipeline(pipeline,
TRANSFORMS,
logger=self.worker_logger)
self.file_systems = cfg.get('FILE_SYSTEM', None)
if isinstance(self.file_systems, list):
for file_sys in self.file_systems:
self.fs_prefix = FS.init_fs_client(file_sys,
logger=self.logger,
overwrite=False)
elif self.file_systems is not None:
self.fs_prefix = FS.init_fs_client(self.file_systems,
logger=self.logger,
overwrite=False)
self.local_we = we.get_env()
if old_python_version:
self.file_systems.logger = None
def __getitem__(self, index: int):
item = self._get(index)
return self.pipeline(item)
def worker_init_fn(self, worker_id, num_workers=1):
if isinstance(self.file_systems, list):
for file_sys in self.file_systems:
self.fs_prefix = FS.init_fs_client(file_sys,
logger=self.logger,
overwrite=False)
elif self.file_systems is not None:
self.fs_prefix = FS.init_fs_client(self.file_systems,
logger=self.logger,
overwrite=False)
self.worker_id = worker_id
self.logger = self.worker_logger
set_random_seed(int(os.environ.get('ES_SEED', 2023)))
we.set_env(self.local_we)
@abstractmethod
def _get(self, index: int):
pass
def __repr__(self) -> str:
return f'{self.__class__.__name__}: mode={self.mode}, len={len(self)}'
@staticmethod
def get_config_template():
'''
{ "ENV" :
{ "description" : "",
"A" : {
"value": 1.0,
"description": ""
}
}
}
:return:
'''
return dict_to_yaml('DATASETS',
__class__.__name__,
BaseDataset.para_dict,
set_name=True)
# Path: scepter/modules/data/dataset/registry.py
DATASETS = Registry('DATASETS',
common_para=DataObject.para_dict,
build_func=build_dataset_config)
# Path: scepter/modules/utils/config.py
def dict_to_yaml(module_name, name, json_config, set_name=False):
'''
{ "ENV" :
{ "description" : "",
"A" : {
"value": 1.0,
"description": ""
}
}
}
convert std dict to yaml
:param module_name:
:param json_config:
:return:
'''
def convert_yaml_style(level=1,
name='ENV',
description='ENV PARA',
default='',
type_name='',
is_sys=False):
new_line = ''
new_line += '{}# {} DESCRIPTION: {} TYPE: {} default: {}\n'.format(
'\t' * (level - 1), name.upper(), description, type_name,
f'\'{default}\'' if isinstance(default, str) else default)
if is_sys:
if name == '-':
new_line += '{}{}\n'.format('\t' * (level - 1), name.upper())
else:
new_line += '{}{}:\n'.format('\t' * (level - 1), name.upper())
else:
# if isinstance(default, str):
# default = f'\'{default}\''
if default is None:
new_line += '{}# {}: {}\n'.format('\t' * (level - 1),
name.upper(), default)
else:
new_line += '{}{}: {}\n'.format('\t' * (level - 1),
name.upper(), default)
return new_line
def parse_dict(json_config,
level_num,
parent_key,
set_name=False,
name='',
parent_type='dict'):
yaml_str = ''
# print(level_num, json_config)
if isinstance(json_config, dict):
if 'value' in json_config:
value = json_config['value']
if isinstance(value, dict):
assert len(value) < 1
value = None
description = json_config.get('description', '')
yaml_str += convert_yaml_style(level=level_num - 1,
name=parent_key,
description=description,
default=value,
type_name=type(value).__name__)
return True, yaml_str
else:
if len(json_config) < 1:
yaml_str += convert_yaml_style(level=level_num,
name='NAME',
description='',
default='',
type_name='')
level_num += 1
for k, v in json_config.items():
if k == 'description':
continue
if isinstance(v, dict):
is_final, new_yaml_str = parse_dict(v,
level_num,
k,
parent_type='dict')
if not is_final and parent_type == 'dict':
description = v.get('description', '')
yaml_str += convert_yaml_style(
level=level_num - 1,
name=k,
description=description,
default='',
type_name='',
is_sys=True)
if not is_final and parent_type == 'list':
yaml_str += convert_yaml_style(level=level_num,
name='NAME',
description='',
default=k,
type_name='')
yaml_str += new_yaml_str
elif isinstance(v, list):
base_yaml_str = convert_yaml_style(level=level_num - 1,
name=k,
description='',
default='',
type_name='',
is_sys=True)
yaml_str += base_yaml_str
for tup in v:
is_final, new_yaml_str = parse_dict(
tup, level_num, '-', parent_type='list')
if not is_final:
yaml_str += convert_yaml_style(level=level_num,
name='-',
description='',
default='',
type_name='',
is_sys=True)
yaml_str += new_yaml_str
else:
raise KeyError(
f'json config {json_config} must be a dict of list'
)
elif isinstance(json_config, list):
level_num += 1
for tup in json_config:
is_final, new_yaml_str = parse_dict(tup, level_num, '-')
if not is_final:
yaml_str += convert_yaml_style(level=level_num - 1,
name='-',
description='',
default='',
type_name='',
is_sys=True)
if set_name:
yaml_str += convert_yaml_style(level=level_num,
name='NAME',
description='',
default=name,
type_name='')
yaml_str += new_yaml_str
else:
raise KeyError(f'json config {json_config} must be a dict')
return False, yaml_str
if isinstance(json_config, dict):
first_dict, sec_dict, third_dict = {}, {}, {}
for key, value in json_config.items():
if isinstance(value, dict) and len(value) > 0:
first_dict[key] = value
elif isinstance(value, dict) and len(value) == 0:
sec_dict[key] = value
elif isinstance(value, list):
third_dict[key] = value
else:
raise f'Config {json_config} is illegal'
json_config = {}
json_config.update(first_dict)
json_config.update(sec_dict)
json_config.update(third_dict)
yaml_str = f'[{module_name}] module yaml examples:\n'
level_num = 1
base_yaml_str = convert_yaml_style(level=level_num,
name=module_name,
description='',
default='',
type_name='',
is_sys=True)
level_num += 1
is_final, new_yaml_str = parse_dict(json_config,
level_num,
module_name,
set_name=isinstance(json_config, list)
and set_name,
name=name)
if not is_final:
yaml_str += base_yaml_str
if set_name and not isinstance(json_config, list):
yaml_str += convert_yaml_style(level=level_num,
name='NAME',
description='',
default=name,
type_name='')
yaml_str += new_yaml_str
else:
yaml_str += new_yaml_str[1:]
return yaml_str
# Path: scepter/modules/utils/distribute.py
def set_random_seed(seed):
def get_dist_info():
def gather_data(data):
def gather_list(data):
def gather_picklable(data):
def _gather_picklable_custom(data):
def gather_gpu_tensors(tensor, all_recv=False, is_cat=True):
def broadcast(tensor, src, group=None, **kwargs):
def barrier():
def get_global_gloo_group():
def reduce_scatter(output,
input_list,
op=dist.ReduceOp.SUM,
group=None,
**kwargs):
def all_reduce(tensor, op=dist.ReduceOp.SUM, group=None, **kwargs):
def reduce(tensor, dst, op=dist.ReduceOp.SUM, group=None, **kwargs):
def _serialize_to_tensor(data):
def _unserialize_from_tensor(recv_data):
def send(tensor, dst, group=None, **kwargs):
def recv(tensor, src=None, group=None, **kwargs):
def isend(tensor, dst, group=None, **kwargs):
def irecv(tensor, src=None, group=None, **kwargs):
def scatter(data, scatter_list=None, src=0, group=None, **kwargs):
def shared_random_seed():
def mp_worker(gpu, ngpus_per_node, cfg, fn, pmi_rank, world_size, work_env):
def __init__(self):
def init_env(self, config, fn, logger=None):
def get_env(self):
def set_env(self, we_env):
def __str__(self):
class Workenv(object):
# Path: scepter/modules/utils/file_system.py
FS = FileSystem()
# Path: scepter/modules/data/dataset/ms_dataset.py
import numbers
import os
import sys
from scepter.modules.data.dataset.base_dataset import BaseDataset
from scepter.modules.data.dataset.registry import DATASETS
from scepter.modules.utils.config import dict_to_yaml
from scepter.modules.utils.distribute import we
from scepter.modules.utils.file_system import FS
from modelscope import MsDataset
from modelscope.utils.constant import DownloadMode
@DATASETS.register_class()
class ImageTextPairMSDataset(BaseDataset):
para_dict = {
'MS_DATASET_NAME': {
'value': '',
'description': 'Modelscope dataset name.'
},
'MS_DATASET_NAMESPACE': {
'value': '',
'description': 'Modelscope dataset namespace.'
},
'MS_DATASET_SUBNAME': {
'value': '',
'description': 'Modelscope dataset subname.'
},
'MS_DATASET_SPLIT': {
'value': '',
'description':
'Modelscope dataset split set name, default is train.'
},
'MS_REMAP_KEYS': {
'value':
None,
'description':
'Modelscope dataset header of list file, the default is Target:FILE; '
'If your file is not this header, please set this field, which is a map dict.'
"For example, { 'Image:FILE': 'Target:FILE' } will replace the filed Image:FILE to Target:FILE"
},
'MS_REMAP_PATH': {
'value':
None,
'description':
'When modelscope dataset name is not None, that means you use the dataset from modelscope,'
' default is None. But if you want to use the datalist from modelscope and the file from '
'local device, you can use this field to set the root path of your images. '
},
'TRIGGER_WORDS': {
'value':
'',
'description':
'The words used to describe the common features of your data, especially when you customize a '
'tuner. Use these words you can get what you want.'
},
'REPLACE_STYLE': {
'value':
False,
'description':
'Whether use the MS_DATASET_SUBNAME to replace the word in your description, default is False.'
},
'HIGHLIGHT_KEYWORDS': {
'value':
'',
'description':
'The keywords you want to highlight in prompt, which will be replace by <HIGHLIGHT_KEYWORDS>.'
},
'KEYWORDS_SIGN': {
'value':
'',
'description':
'The keywords sign you want to add, which is like <{HIGHLIGHT_KEYWORDS}{KEYWORDS_SIGN}>'
},
'OUTPUT_SIZE': {
'value':
None,
'description':
'If you use the FlexibleResize transforms, this filed will output the image_size as [h, w],'
'which will be used to set the output size of images used to train the model.'
},
}
def __init__(self, cfg, logger=None):
super().__init__(cfg=cfg, logger=logger)
ms_dataset_name = cfg.get('MS_DATASET_NAME', None)
ms_dataset_namespace = cfg.get('MS_DATASET_NAMESPACE', None)
ms_dataset_subname = cfg.get('MS_DATASET_SUBNAME', None)
ms_dataset_split = cfg.get('MS_DATASET_SPLIT', 'train')
ms_remap_keys = cfg.get('MS_REMAP_KEYS', None)
ms_remap_path = cfg.get('MS_REMAP_PATH', None)
self.replace_style = cfg.get('REPLACE_STYLE', False)
self.trigger_words = cfg.get('TRIGGER_WORDS', '')
self.replace_keywords = cfg.get('HIGHLIGHT_KEYWORDS', '')
self.keywords_sign = cfg.get('KEYWORDS_SIGN', '')
self.output_size = cfg.get('OUTPUT_SIZE', None)
if self.output_size is not None:
if isinstance(self.output_size, numbers.Number):
self.output_size = [self.output_size, self.output_size]
# Use modelscope dataset
if not ms_dataset_name:
raise (
'Your must set MS_DATASET_NAME as modelscope dataset or your local dataset orignized '
'as modelscope dataset.')
if FS.exists(ms_dataset_name):
ms_dataset_name = FS.get_dir_to_local_dir(ms_dataset_name)
ms_remap_path = ms_dataset_name
try:
self.data = MsDataset.load(str(ms_dataset_name),
namespace=ms_dataset_namespace,
subset_name=ms_dataset_subname,
split=ms_dataset_split)
except Exception:
self.logger.info(
"Load Modelscope dataset failed, retry with download_mode='force_redownload'."
)
try:
self.data = MsDataset.load(
str(ms_dataset_name),
namespace=ms_dataset_namespace,
subset_name=ms_dataset_subname,
split=ms_dataset_split,
download_mode=DownloadMode.FORCE_REDOWNLOAD)
except Exception as sec_e:
raise f'Load Modelscope dataset failed {sec_e}.'
if ms_remap_keys:
self.data = self.data.remap_columns(ms_remap_keys.get_dict())
if ms_remap_path:
def map_func(example):
example['Target:FILE'] = os.path.join(ms_remap_path,
| example['Target:FILE']) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YyzHarry/shortcut-ood-fairness
# Path: dataset/datasets.py
DATASETS = [
'MIMIC',
'CheXpert',
'NIH',
'PadChest',
'VinDr',
'SIIM',
'ISIC',
'ODIR'
]
CXR_DATASETS = [
'MIMIC',
'CheXpert',
'NIH',
'PadChest',
'VinDr',
'SIIM'
]
ATTRS = ['sex', 'ethnicity', 'age', 'sex_ethnicity']
TASKS = ['No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
'Cataract', 'Retinopathy']
N_STEPS = 5001 # Default, subclasses may override
CHECKPOINT_FREQ = 100 # Default, subclasses may override
N_WORKERS = 8 # Default, subclasses may override
INPUT_SHAPE = None # Subclasses should override
AVAILABLE_ATTRS = None # Subclasses should override
SPLITS = { # Default, subclasses may override
'tr': 0,
'va': 1,
'te': 2
}
EVAL_SPLITS = ['te'] # Default, subclasses may override
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age', 'ethnicity', 'sex_ethnicity']
TASKS = [
'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'
]
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age', 'ethnicity', 'sex_ethnicity']
TASKS = [
'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'
]
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age']
TASKS = [
'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'
]
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age']
TASKS = [
'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'
]
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age']
TASKS = [
'No Finding', 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema'
]
SPLITS = {
'te': 2
}
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age']
TASKS = [
'Pneumothorax'
]
SPLITS = {
'te': 2
}
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age']
TASKS = [
'No Finding'
]
N_STEPS = 30001
CHECKPOINT_FREQ = 1000
N_WORKERS = 16
INPUT_SHAPE = (3, 224, 224,)
AVAILABLE_ATTRS = ['sex', 'age']
TASKS = [
'No Finding', 'Cataract', 'Retinopathy'
]
def get_dataset_class(dataset_name):
def num_environments(dataset_name):
def __init__(self, root, split, metadata, transform, group_def='group', subsample_type=None, duplicates=None, subset_query=None):
def _count_groups(self):
def subsample(self, subsample_type):
def duplicate(self, duplicates):
def __getitem__(self, index):
def __len__(self):
def __init__(self, metadata, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def transform(self, x):
def __init__(self, dss):
def __getitem__(self, idx):
def __len__(self):
def __init__(self, ds, idxs):
def __getitem__(self, idx):
def __len__(self):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
def __init__(self, data_path, split, hparams, group_def='group', subsample_type=None, duplicates=None, override_attr=None, subset_query=None):
class SubpopDataset:
class BaseImageDataset(SubpopDataset):
class ConcatImageDataset(SubpopDataset):
class SubsetImageDataset(SubpopDataset):
class MIMIC(BaseImageDataset):
class CheXpert(BaseImageDataset):
class NIH(BaseImageDataset):
class PadChest(BaseImageDataset):
class VinDr(BaseImageDataset):
class SIIM(BaseImageDataset):
class ISIC(BaseImageDataset):
class ODIR(BaseImageDataset):
# Path: learning/algorithms.py
ALGORITHMS = [
'ERM',
'StratifiedERM',
# subgroup methods
'GroupDRO',
'IRM',
'CVaRDRO',
'JTT',
'LISA',
'DFR',
# data augmentation
'Mixup',
# domain generalization methods
'MMD',
'CORAL',
'DANN',
'CDANN',
# imbalanced learning methods
'ReSample',
'ReWeight',
'SqrtReWeight',
'CBLoss',
'Focal',
'LDAM',
'BSoftmax',
'CRT',
'ReWeightCRT',
'VanillaCRT',
# flat minima optimizer
'MA',
'SAM',
# attribute balancing
'GroupDROAttr',
'ReSampleAttr',
'ReWeightAttr',
]
D = self.my_cdist(x, y)
K = torch.zeros_like(D)
W = size[2]
H = size[3]
def get_algorithm_class(algorithm_name):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _init_model(self):
def _compute_loss(self, i, x, y, a, step):
def update(self, minibatch, step):
def return_feats(self, x):
def predict(self, x):
def return_groups(self, y, a):
def return_attributes(all_a):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _init_model(self):
def _compute_loss(self, i, x, y, a, step):
def update(self, minibatch, step):
def return_feats(self, x):
def predict(self, x):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None, group_def='group'):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def focal_loss(input_values, gamma):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _irm_penalty(logits, y):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes=None, attr_sizes=None, gaussian=False):
def my_cdist(x1, x2):
def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100, 1000]):
def mmd(self, x, y):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes=None, attr_sizes=None, conditional=False, class_balance=False):
def update(self, minibatch, step):
def return_feats(self, x):
def predict(self, x):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def update(self, minibatch, step):
def return_feats(self, x):
def predict(self, x):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step, stage1_model):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _to_ohe(self, y):
def _lisa_mixup_data(self, s, a, x, y, alpha):
def _rand_bbox(size, lam):
def _mix_up(alpha, x1, x2, y1, y2):
def _cut_mix_up(self, alpha, x1, x2, y1, y2):
def _compute_loss(self, i, x, y, a, step):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def _compute_loss(self, i, x, y, a, step):
def update(self, minibatch, step):
def predict(self, x):
def update_ma(self):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
def update(self, minibatch, step):
def norm(tensor_list, p=2):
class Algorithm(torch.nn.Module):
class ERM(Algorithm):
class GroupDRO(ERM):
class GroupDROAttr(ERM):
class StratifiedERM(ERM):
class ReSample(ERM):
class ReSampleAttr(ERM):
class ReWeightBase(ERM):
class ReWeight(ReWeightBase):
class ReWeightAttr(ReWeightBase):
class SqrtReWeight(ReWeight):
class CBLoss(ReWeight):
class Focal(ERM):
class LDAM(ERM):
class BSoftmax(ERM):
class CRT(ERM):
class ReWeightCRT(ReWeight):
class VanillaCRT(ERM):
class DFR(ERM):
class IRM(ERM):
class Mixup(ERM):
class AbstractMMD(ERM):
class MMD(AbstractMMD):
class CORAL(AbstractMMD):
class AbstractDANN(Algorithm):
class DANN(AbstractDANN):
class CDANN(AbstractDANN):
class CVaRDRO(ERM):
class AbstractTwoStage(Algorithm):
class JTT_Stage2(ERM):
class JTT(AbstractTwoStage):
class LISA(ERM):
class MA(ERM):
class SAM(ERM):
# Path: learning/early_stopping.py
class EarlyStopping:
def __init__(self, patience=5, lower_is_better=True):
def __call__(self, metric, step, state_dict, path):
def save_model(state_dict, path):
# Path: learning/swad_utils.py
class AveragedModel(Module):
class SWADBase:
class LossValley(SWADBase):
def __init__(self, model, device=None, avg_fn=None, rm_optimizer=False):
def avg_fn(averaged_model_parameter, model_parameter, num_averaged):
def forward(self, *args, **kwargs):
def predict(self, *args, **kwargs):
def network(self):
def update_parameters(self, model, step=None, start_step=None, end_step=None):
def clone(self):
def update_and_evaluate(self, segment_swa, val_loss):
def get_final_model(self):
def __init__(self, n_converge, n_tolerance, tolerance_ratio, **kwargs):
def get_smooth_loss(self, idx):
def is_converged(self):
def update_and_evaluate(self, segment_swa, val_loss):
def get_final_model(self):
Q = list(self.smooth_Q)[: converge_idx + 1]
# Path: utils/misc.py
def pickle_save(filename, obj):
def pickle_load(filename):
def mac_pickle_load(file_path):
def mac_pickle_dump(filename, obj):
def load_json(json_path):
def save_json(json_path, data):
def default(self, obj):
def format(cls, text, color='white'):
def log(text, color='white', style='normal', with_time=True, handle=None):
def print_yellow(text, with_time=True):
def print_cyan(text, with_time=True):
def print_green(text, with_time=True):
def prepare_folders(args):
def l2_between_dicts(dict_1, dict_2):
def __init__(self, ema, oneminusema_correction=True):
def update(self, dict_data):
def count_samples_per_class(targets, num_labels):
def make_balanced_weights_per_sample(targets):
def pdb():
def seed_hash(*args):
def print_separator():
def print_row(row, colwidth=10, latex=False):
def format_val(x):
def safe_load(parsed):
def __init__(self, underlying_dataset, keys):
def __getitem__(self, key):
def __len__(self):
def split_dataset(dataset, n, seed=0):
def random_pairs_of_minibatches(minibatches):
def mixup_data(x, y, alpha=1., device="cpu"):
def accuracy(network, loader, device):
def adjust_learning_rate(optimizer, lr, step, total_steps, schedule, cos=False):
def __init__(self, fname, mode="a"):
def write(self, message):
def flush(self):
def __init__(self, *args, **kwargs):
def _prototype(self, other, op):
def __add__(self, other):
def __rmul__(self, other):
def __neg__(self):
def __rsub__(self, other):
def __truediv__(self, other):
def make_grid(tensor, nrow=8, padding=2, normalize=False, ranges=None, scale_each=False, pad_value=0):
def norm_ip(img, min, max):
def norm_range(t, ranges):
def save_image(tensor, filename, nrow=8, padding=2, normalize=False, ranges=None, scale_each=False, pad_value=0):
class NumpyEncoder(json.JSONEncoder):
class TextFormat:
class MovingAverage:
class _SplitDataset(torch.utils.data.Dataset):
class Tee:
class ParamDict(OrderedDict):
# Path: utils/eval_helper.py
def predict_on_set(algorithm, loader, device):
def eval_metrics(algorithm, loader, device, thress=[0.5], thress_suffix=['_50'], add_arrays=False):
def binary_metrics(targets, preds, label_set=[0, 1], suffix='', return_arrays=False):
def prob_metrics(targets, preds, label_set, return_arrays=False):
CM = confusion_matrix(targets, preds, labels=label_set)
CM = confusion_matrix(targets, preds, labels=label_set)
# Path: dataset/fast_dataloader.py
class InfiniteDataLoader:
def __init__(self, dataset, weights, batch_size, num_workers):
super().__init__()
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(
weights, replacement=True, num_samples=batch_size)
else:
sampler = torch.utils.data.RandomSampler(dataset, replacement=True)
batch_sampler = torch.utils.data.BatchSampler(
sampler,
batch_size=batch_size,
drop_last=True)
self._infinite_iterator = iter(torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=_InfiniteSampler(batch_sampler)
))
def __iter__(self):
while True:
yield next(self._infinite_iterator)
def __len__(self):
raise ValueError
# Path: train.py
import argparse
import collections
import json
import os
import random
import sys
import time
import numpy as np
import pandas as pd
import PIL
import torch
import torchvision
import torch.utils.data
import pickle
import hparams_registry
import wandb
import hashlib
from tensorboard_logger import Logger
from pathlib import Path
from torch.utils.data import DataLoader
from dataset import datasets
from learning import algorithms, early_stopping, swad_utils
from utils import misc, eval_helper
from dataset.fast_dataloader import InfiniteDataLoader
from collections import OrderedDict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Shortcut Learning in Chest X-rays')
# training
parser.add_argument('--store_name', type=str, default='debug')
parser.add_argument('--dataset', type=str, default=["MIMIC"], nargs='+')
parser.add_argument('--task', type=str, default="No Finding", choices=datasets.TASKS + datasets.ATTRS)
parser.add_argument('--attr', type=str, default="sex", choices=datasets.ATTRS)
parser.add_argument('--group_def', type=str, default="group", choices=['group', 'label'])
parser.add_argument('--algorithm', type=str, default="ERM", choices=algorithms.ALGORITHMS)
# others
parser.add_argument('--output_dir', type=str, default='output')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--hparams', type=str, help='JSON-serialized hparams dict')
parser.add_argument('--hparams_seed', type=int, default=0, help='Seed for random hparams (0 for "default hparams")')
parser.add_argument('--seed', type=int, default=0, help='Seed for everything else')
parser.add_argument('--steps', type=int, default=None)
parser.add_argument('--log_online', help='Log online using wandb', action='store_true')
parser.add_argument('--skip_ood_eval', help='skip evals on OOD datasets', action='store_true')
parser.add_argument('--log_all', help='Log all val metrics at each step to tb and wandb', action='store_true')
parser.add_argument('--stratified_erm_subset', type=int, default=None)
# two-stage related
parser.add_argument('--stage1_folder', type=str)
# early stopping
parser.add_argument('--use_es', action='store_true')
parser.add_argument('--es_strategy', choices=['metric'], default='metric')
parser.add_argument('--es_metric', type=str, default='min_group:accuracy')
parser.add_argument('--es_patience', type=int, default=5, help='Stop after this many checkpoints w/ no improvement')
# checkpoints
parser.add_argument('--resume', '-r', type=str, default='')
parser.add_argument('--checkpoint_freq', type=int, default=None, help='Checkpoint every N steps')
parser.add_argument('--skip_model_save', action='store_true')
parser.add_argument('--debug', action='store_true')
# architectures and pre-training sources
parser.add_argument('--image_arch', default='densenet_sup_in1k',
choices=['densenet_sup_in1k', 'resnet_sup_in1k', 'resnet_sup_in21k', 'resnet_simclr_in1k',
'resnet_barlow_in1k', 'vit_sup_in1k', 'vit_sup_in21k', 'vit_sup_swag', 'vit_clip_oai',
'vit_clip_laion', 'vit_dino_in1k', 'resnet_dino_in1k'])
# data augmentations
parser.add_argument('--aug', default='basic2',
choices=['none', 'basic', 'basic2', 'auto_aug', 'rand_aug', 'trivial_aug', 'augmix'])
args = parser.parse_args()
start_step = 0
misc.prepare_folders(args)
output_dir = os.path.join(args.output_dir, args.store_name)
if not args.debug:
sys.stdout = misc.Tee(os.path.join(output_dir, 'out.txt'))
sys.stderr = misc.Tee(os.path.join(output_dir, 'err.txt'))
tb_logger = Logger(logdir=output_dir, flush_secs=2)
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
| print("\tCUDA: {}".format(torch.version.cuda)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RomGai/BrainVis
# Path: dc_ldm/models/diffusion/plms.py
class PLMSSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
if ddim_eta != 0:
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
print(f'Data shape for PLMS sampling is {size}')
samples, intermediates = self.plms_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
**kwargs
)
return samples, intermediates
@torch.no_grad()
def plms_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, generator=None):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device, generator=generator)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
print(f"Running PLMS Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
old_eps = []
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
old_eps=old_eps, t_next=ts_next)
img, pred_x0, e_t = outs
old_eps.append(e_t)
if len(old_eps) >= 4:
old_eps.pop(0)
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
b, *_, device = *x.shape, x.device
def get_model_output(x, t):
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
def get_x_prev_and_pred_x0(e_t, index):
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
e_t = get_model_output(x, t)
if len(old_eps) == 0:
# Pseudo Improved Euler (2nd order)
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = (e_t + e_t_next) / 2
elif len(old_eps) == 1:
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (3 * e_t - old_eps[-1]) / 2
elif len(old_eps) == 2:
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
elif len(old_eps) >= 3:
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
return x_prev, pred_x0, e_t
# Path: dc_ldm/util.py
def instantiate_from_config(config):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
# Path: dataset.py
class Dataset(Data.Dataset):
def __init__(self, device, mode, data, wave_len):
self.device = device
self.datas, self.label ,self.clip,self.clip_moreinf = data
self.mode = mode
self.wave_len = wave_len
self.__padding__()
def __padding__(self):
origin_len = self.datas[0].shape[0]
if origin_len % self.wave_len:
padding_len = self.wave_len - (origin_len % self.wave_len)
padding = np.zeros((len(self.datas), padding_len, self.datas[0].shape[1]), dtype=np.float32)
self.datas = np.concatenate([self.datas, padding], axis=-2)
def __len__(self):
return len(self.datas)
def __getitem__(self, item):
data = torch.tensor(self.datas[item]).to(self.device)
label = self.label[item]
clip=torch.tensor(self.clip[item]).to(self.device)
clip_moreinf = torch.tensor(self.clip_moreinf[item]).to(self.device)
return data, torch.tensor(label).to(self.device), clip,clip_moreinf
def shape(self):
return self.datas[0].shape
# Path: model/BrainVisModels.py
class TimeEncoder(nn.Module):
def __init__(self, args):
super(TimeEncoder, self).__init__()
d_model = args.d_model
self.d=d_model
self.momentum = args.momentum
self.linear_proba = True
self.nocliptune=True
self.device = args.device
self.data_shape = args.data_shape
self.max_len = int(self.data_shape[0] / args.wave_length)
print(self.max_len)
self.mask_len = int(args.mask_ratio * self.max_len)
self.position = PositionalEmbedding(self.max_len, d_model)
self.mask_token = nn.Parameter(torch.randn(d_model, ))
self.input_projection = nn.Conv1d(args.data_shape[1], d_model, kernel_size=args.wave_length,
stride=args.wave_length)
self.encoder = TransformerEncoder(args)
self.momentum_encoder = TransformerEncoder(args)
self.tokenizer = Tokenizer(d_model, args.vocab_size)
self.reg = Regressor(d_model, args.attn_heads, 4 * d_model, 1, args.reg_layers)
self.predict_head = nn.Linear(d_model, args.num_class)
self.channelmapping=ChannelMapping(self.max_len,77)
self.dimmapping = nn.Linear(d_model, 768)
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
xavier_normal_(module.weight.data)
if module.bias is not None:
constant_(module.bias.data, 0.1)
def copy_weight(self):
with torch.no_grad():
for (param_a, param_b) in zip(self.encoder.parameters(), self.momentum_encoder.parameters()):
param_b.data = param_a.data
def momentum_update(self):
with torch.no_grad():
for (param_a, param_b) in zip(self.encoder.parameters(), self.momentum_encoder.parameters()):
param_b.data = self.momentum * param_b.data + (1 - self.momentum) * param_a.data
def pretrain_forward(self, x):
x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()
tokens = self.tokenizer(x)
x += self.position(x)
rep_mask_token = self.mask_token.repeat(x.shape[0], x.shape[1], 1) + self.position(x)
index = np.arange(x.shape[1])
random.shuffle(index)
v_index = index[:-self.mask_len]
m_index = index[-self.mask_len:]
visible = x[:, v_index, :]
mask = x[:, m_index, :]
tokens = tokens[:, m_index]
rep_mask_token = rep_mask_token[:, m_index, :]
rep_visible = self.encoder(visible)
with torch.no_grad():
rep_mask = self.momentum_encoder(mask)
rep_mask_prediction = self.reg(rep_visible, rep_mask_token)
token_prediction_prob = self.tokenizer.center(rep_mask_prediction)
return [rep_mask, rep_mask_prediction], [token_prediction_prob, tokens]
def forward(self, x):
if self.linear_proba==True and self.nocliptune==True:
#with torch.no_grad():
x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()
x += self.position(x)
x = self.encoder(x)
return torch.mean(x, dim=1)
if self.linear_proba==False and self.nocliptune==True:
x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()
x += self.position(x)
x = self.encoder(x)
#lastrep=torch.mean(x, dim=1)
lastrep=x
xcls=self.predict_head(torch.mean(x, dim=1))
return lastrep, torch.mean(x, dim=1), xcls
if self.nocliptune == False: #CLIP
x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()
x += self.position(x)
x = self.encoder(x)
lastrep=torch.mean(x, dim=1)
x=self.channelmapping(x)
x = self.dimmapping(x)
return lastrep#,x
def get_tokens(self, x):
x = self.input_projection(x.transpose(1, 2)).transpose(1, 2).contiguous()
tokens = self.tokenizer(x)
return tokens
# Path: model/BrainVisModels.py
class AlignNet(nn.Module):
def __init__(self, input_size, freq_size, output_size,pretrained_model):
super(AlignNet, self).__init__()
self.pretrained_model = pretrained_model#TimeFreqEncoder
self.fc01=nn.Linear(input_size+freq_size+40, 4*input_size)
self.tanh = nn.Tanh()
self.fc02 = nn.Linear(4*input_size, input_size)
self.tanh = nn.Tanh()
self.fc03=nn.Linear(input_size, 4*input_size)
self.tanh = nn.Tanh()
self.fc04 = nn.Linear(4*input_size, input_size)
self.tanh = nn.Tanh()
self.fc05=nn.Linear(input_size, 4*input_size)
self.tanh = nn.Tanh()
self.fc6 = nn.Linear(4*input_size, output_size)
def forward(self, x):
lastrep,encoded,scores=self.pretrained_model(x)
x = torch.cat((encoded, scores), dim=1)
x = self.fc01(x)
x = self.tanh(x)
res_4is_1=x
x = self.fc02(x)
x = self.tanh(x)
res_is_2 = x
x = self.fc03(x)+res_4is_1
x = self.tanh(x)
res_4is_2 = x
x = self.fc04(x)+res_is_2
x = self.tanh(x)
x = self.fc05(x)+res_4is_2
x = self.tanh(x)
x = self.fc6(x)
return x
# Path: model/BrainVisModels.py
class TimeFreqEncoder(nn.Module):
def __init__(self, pretrained_model_time,pretrained_model_freq,args):
super(TimeFreqEncoder, self).__init__()
self.pretrained_model_time = pretrained_model_time
self.pretrained_model_time.nocliptune=True
self.pretrained_model_time.linear_proba=False
self.pretrained_model_freq=pretrained_model_freq
self.fc01 =nn.Linear( args.d_model+128, args.num_class)
def forward(self,x):
lastrep,time_feature,cls=self.pretrained_model_time(x)
lstmcls,freq_feature=self.pretrained_model_freq(x)
x = torch.cat((time_feature, freq_feature), dim=1)
lastrep = x
encoded=x
x = self.fc01(encoded)
scores=x
return lastrep,encoded,scores
# Path: model/BrainVisModels.py
class FreqEncoder(nn.Module):
def __init__(self, input_size=128, lstm_size=128, lstm_layers=1, output_size=128):
# Call parent
super().__init__()
# Define parameters
self.input_size = input_size
self.lstm_size = lstm_size
self.lstm_layers = lstm_layers
self.output_size = output_size
# Define internal modules
self.lstm = nn.LSTM(input_size, lstm_size, num_layers=lstm_layers, batch_first=True)
self.output = nn.Linear(lstm_size, output_size)
self.classifier = nn.Linear(output_size, 40)
def forward(self, x):
batch_size = x.size(0)
x = x.permute(0, 2, 1)
x = x.cpu()
fourier_transform = np.fft.fft(x, axis=2)
half_spectrum = fourier_transform[:, :, 1:440 // 2 + 1]
amplitude_spectrum = np.abs(half_spectrum)
amplitude_spectrum = torch.tensor(amplitude_spectrum).float()
x = amplitude_spectrum.permute(0, 2, 1)
x = x.to("cuda")
lstm_init = (torch.zeros(self.lstm_layers, batch_size, self.lstm_size),
torch.zeros(self.lstm_layers, batch_size, self.lstm_size))
if x.is_cuda: lstm_init = (lstm_init[0].cuda(), lstm_init[0].cuda())
lstm_init = (Variable(lstm_init[0], volatile=x.volatile), Variable(lstm_init[1], volatile=x.volatile))
x = self.lstm(x, lstm_init)[0][:, -1, :]
reps = x
# Forward output
xa = F.relu(self.output(x))
x = self.classifier(xa)
return x, xa
# Path: args.py
# Path: cascade_diffusion.py
import torch
import os
import numpy as np
import torchvision.transforms as transforms
import argparse
from omegaconf import OmegaConf
from dc_ldm.models.diffusion.plms import PLMSSampler
from einops import rearrange, repeat
from dc_ldm.util import instantiate_from_config
from torch.utils.data import Dataset, DataLoader
from dataset import Dataset as selfdataset
from model.BrainVisModels import TimeEncoder, AlignNet,TimeFreqEncoder,FreqEncoder
from args import args, Test_data, Train_data_all, Train_data, Train_data_all_with_image_name, Train_data_with_image_name, Test_data_with_image_name
from diffusers import StableDiffusionImg2ImgPipeline
from PIL import Image
propmt_dict = {'n02106662': 'german shepherd dog',
'n02124075': 'cat ',
'n02281787': 'lycaenid butterfly',
'n02389026': 'sorrel horse',
'n02492035': 'Cebus capucinus',
'n02504458': 'African elephant',
'n02510455': 'panda',
'n02607072': 'anemone fish',
'n02690373': 'airliner',
'n02906734': 'broom',
'n02951358': 'canoe or kayak',
'n02992529': 'cellular telephone',
'n03063599': 'coffee mug',
'n03100240': 'old convertible',
'n03180011': 'desktop computer',
'n03197337': 'digital watch',
'n03272010': 'electric guitar',
'n03272562': 'electric locomotive',
'n03297495': 'espresso maker',
'n03376595': 'folding chair',
'n03445777': 'golf ball',
'n03452741': 'grand piano',
'n03584829': 'smoothing iron',
'n03590841': 'Orange jack-o’-lantern',
'n03709823': 'mailbag',
'n03773504': 'missile',
'n03775071': 'mitten,glove',
'n03792782': 'mountain bike, all-terrain bike',
'n03792972': 'mountain tent',
'n03877472': 'pajama',
'n03888257': 'parachute',
'n03982430': 'pool table, billiard table, snooker table ',
'n04044716': 'radio telescope',
'n04069434': 'eflex camera',
'n04086273': 'revolver, six-shooter',
'n04120489': 'running shoe',
'n07753592': 'banana',
'n07873807': 'pizza',
'n11939491': 'daisy',
'n13054560': 'bolete'
}
lable_number_dict={
'[12]': 'n02106662',
'[39]': 'n02124075',
'[11]': 'n02281787',
'[0]': 'n02389026',
'[21]': 'n02492035',
'[35]': 'n02504458',
'[8]': 'n02510455',
'[3]': 'n02607072',
'[36]': 'n02690373',
'[18]': 'n02906734',
'[10]': 'n02951358',
'[15]': 'n02992529',
'[5]': 'n03063599',
'[24]': 'n03100240',
'[17]': 'n03180011',
'[34]': 'n03197337',
'[28]': 'n03272010',
'[37]': 'n03272562',
'[4]': 'n03297495',
'[25]': 'n03376595',
'[16]': 'n03445777',
'[30]': 'n03452741',
'[2]': 'n03584829',
'[14]': 'n03590841',
'[23]': 'n03709823',
'[20]': 'n03773504',
'[27]': 'n03775071',
'[6]': 'n03792782',
'[31]': 'n03792972',
'[26]': 'n03877472',
'[1]': 'n03888257',
'[22]': 'n03982430',
'[38]': 'n04044716',
'[29]': 'n04069434',
'[7]': 'n04086273',
'[13]': 'n04120489',
'[32]': 'n07753592',
'[19]': 'n07873807',
'[9]': 'n11939491',
'[33]': 'n13054560'
}
parser = argparse.ArgumentParser(description="Template")
parser.add_argument('-mp','--model_params', default='', nargs='*', help='list of key=value pairs of model options')
opt = parser.parse_args()
# Path
datapath='data/EEG_Feature_Label/'
img_file_type='.JPEG'
device = "cuda"
test_img_names_file=datapath+'test_image_names.pth'
test_seq_file=datapath+'test_seqs.pth'
dff_model_path = "pretrained_model/v1-5-pruned-emaonly.ckpt"
dff_yaml_path = "pretrained_model/config15.yaml"
test_pred_file=datapath+'test_pred.pth'
output_path="picture"
| logger=None
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Rajeshwaran2001/DRM-Media-Tool
# Path: key_getter.py
class KeyGeter(QWidget):
def __init__(self, debug_logger, info_logger):
super().__init__()
self.debug_logger = debug_logger
self.info_logger = info_logger
self.init_ui()
def init_ui(self):
# Create layout
layout = QVBoxLayout()
# Create labels and input fields
label1 = QLabel('PSSH:')
label2 = QLabel('Licence URL:')
label3 = QLabel('Name:')
self.input1 = QLineEdit()
self.input2 = QLineEdit()
self.input3 = QLineEdit()
# To have input and lable on same line
row_layout1 = QHBoxLayout()
row_layout1.addWidget(label1)
row_layout1.addWidget(self.input1)
layout.addLayout(row_layout1)
row_layout2 = QHBoxLayout()
row_layout2.addWidget(label2)
row_layout2.addWidget(self.input2)
layout.addLayout(row_layout2)
row_layout3 = QHBoxLayout()
row_layout3.addWidget(label3)
row_layout3.addWidget(self.input3)
layout.addLayout(row_layout3)
# Create a button
button = QPushButton('Submit')
# Add labels, input fields, and button to the layout
layout.addWidget(button)
# Set the layout for the main window
self.setLayout(layout)
# Connect the button to a function (e.g., handle_button_click)
button.clicked.connect(self.handle_button_click)
# Create a text browser to display the API response
self.response_browser = QTextBrowser()
# Add the text browser to the layout
layout.addWidget(self.response_browser)
self.show()
def handle_button_click(self):
self.info_logger.info("Submit Button Clicked")
# Get user input from the input fields
pssh = self.input1.text()
license_url = self.input2.text()
name = self.input3.text()
# Check if any field is empty
if not name:
self.info_logger.info("Name Field is Empty")
if not pssh:
self.info_logger.info("pssh Field is Empty")
if not license_url:
self.info_logger.info("license_url Field is Empty")
conn = sqlite3.connect('db.db')
self.info_logger.info("DB Connected Succesfully")
cursor = conn.cursor()
# Create a table with columns if it doesn't exist
cursor.execute('''CREATE TABLE IF NOT EXISTS pssh (
pssh TEXT,
license_url TEXT,
movie_name TEXT
)''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS keys (
key_id INTEGER PRIMARY KEY,
key TEXT,
pssh_id INTEGER,
FOREIGN KEY (pssh_id) REFERENCES pssh (pssh_id)
)
''')
# Insert the values into the table
cursor.execute("INSERT INTO pssh (pssh, license_url, movie_name) VALUES (?, ?, ?)",
(pssh, license_url, name))
conn.commit()
pssh_id = cursor.lastrowid
# Construct the API request
api_url = os.getenv("API_URL")
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (Ktesttemp, like Gecko) Chrome/90.0.4430.85 Safari/537.36",
"Content-Type": "application/json",
"X-API-Key": os.getenv("X_API_KEY"),
}
# Check if either pssh or license_url is empty
if not pssh or not license_url:
error_message = "Both 'pssh' and 'license_url' must be provided."
show_error_message(self, error_message)
self.debug_logger.debug(error_message)
else:
payload = {
"license_url": license_url,
"pssh": pssh,
}
# Make the API request
response = requests.post(api_url, headers=headers, json=payload)
self.info_logger.info(response)
data = json.loads(response.text)
key = None
# print(data)
self.info_logger.info("API response is: %s", response)
if response.status_code in [200, 302]:
if "keys" in data:
keys = data["keys"]
if isinstance(keys, list):
if len(keys) == 1:
for key_info in keys:
if isinstance(key_info, str):
key = key_info
elif isinstance(key_info, dict) and "key" in key_info:
key = key_info["key"]
else:
print('error')
continue
cursor.execute(
"INSERT INTO keys (key, pssh_id) VALUES (?, ?)", (key, pssh_id))
# print("One key found")
self.info_logger.info("Single key found")
else:
# key_strings = keys
# key_string = ', '.join(key_strings)
# part = key_string.replace(
# '[', '').replace(']', '').replace("'", "")
# key_parts = part.split(', ')
# key = "\n".join(key_parts)
# print(key)
# print("Multiple keys found")
self.info_logger.info("Multiple keys found")
key_strings = keys
for key_string in key_strings:
key = key_string.replace(
'[', '').replace(']', '').replace("'", "")
cursor.execute(
"INSERT INTO keys (key, pssh_id) VALUES (?, ?)", (key, pssh_id))
else:
key = keys
cursor.execute(
"INSERT INTO keys (key, pssh_id) VALUES (?, ?)", (key, pssh_id))
self.info_logger.info("Keys Found")
else:
error_message = "No 'key' or 'keys' found in the JSON data."
show_error_message(self, error_message)
self.debug_logger.debug(error_message)
elif response.status_code == 400:
try:
error_message = response.json()["message"]
show_error_message(self, error_message)
self.info_logger.info(error_message)
except Exception as e:
self.debug_logger.debug(e)
else:
error_message = "API ERROR."
show_error_message(self, error_message)
self.info_logger.info(error_message)
current_datetime = datetime.now().strftime("%Y-%m-%d %I:%M:%S %p")
event_data = {
'pssh': pssh,
'license_url': license_url,
'movie_name': name,
'keys': keys if "keys" in data else [],
'datetime': current_datetime,
}
# 'events' is the name of the collection
events_ref = db.collection('events')
events_ref.add(event_data)
self.info_logger.info("Key Added to Globa Db")
# Display the API response in the text browser
conn.commit()
# Close the database connection
conn.close()
if key is not None:
key_str = json.dumps(keys)
self.response_browser.setText(key_str)
# Clear the input fields
self.input1.clear()
self.input2.clear()
self.input3.clear()
else:
error_message = "No keys to display." # Customize this message as needed
# show_error_message(self, error_message)
self.debug_logger.debug(error_message)
# Path: decrypter.py
class Decrypter(QWidget):
def __init__(self, debug_logger, info_logger):
super().__init__()
self.init_ui()
self.conn = None # Database connection
self.cursor = None # Database cursor
self.debug_logger = debug_logger
self.info_logger = info_logger
self.create_database()
def init_ui(self):
layout = QVBoxLayout()
# Create a horizontal layout for the "Select Folder" and folder path
select_folder_layout = QHBoxLayout()
select_folder_label = QLabel("Select Folder:")
select_button = QPushButton("Select Folder")
select_button.clicked.connect(self.browse_folder)
self.folder_path_lineedit = QLineEdit()
select_folder_layout.addWidget(select_folder_label)
select_folder_layout.addWidget(select_button)
select_folder_layout.addWidget(self.folder_path_lineedit)
layout.addLayout(select_folder_layout)
# Create horizontal layout for buttons (Check Folder, GetKeys, Decrypt)
buttons_layout = QHBoxLayout()
check_folder_button = QPushButton("Check Folder")
check_folder_button.clicked.connect(self.check_folder_existence)
buttons_layout.addWidget(check_folder_button)
get_keys_button = QPushButton("Get Keys from DB")
get_keys_button.clicked.connect(self.get_keys_from_db)
buttons_layout.addWidget(get_keys_button)
decrypt_button = QPushButton("Decrypt")
decrypt_button.clicked.connect(self.decrypt_files)
buttons_layout.addWidget(decrypt_button)
merge_button = QPushButton("Media Merger")
merge_button.clicked.connect(self.merger)
buttons_layout.addWidget(merge_button)
layout.addLayout(buttons_layout)
# Create a QListWidget for displaying search results
layout.addWidget(QLabel("Search Results:"))
self.search_result_list = QListWidget()
layout.addWidget(self.search_result_list)
self.setLayout(layout)
# Add these methods to handle button clicks
def browse_folder(self):
folder_path = QFileDialog.getExistingDirectory(self, "Select Folder")
if folder_path:
self.folder_path_lineedit.setText(folder_path)
# self.search_database(folder_path)
def check_folder_existence(self):
folder_path = self.folder_path_lineedit.text()
if os.path.exists(folder_path):
show_success_message(self, "Folder exists.")
self.info_logger.info("Folder exists.")
else:
show_error_message(self, "Folder does not exist.")
self.debug_logger.debug("Folder does not exist.")
def get_keys_from_db(self):
folder_path = self.folder_path_lineedit.text()
if os.path.exists(folder_path):
keys_found = self.search_database(folder_path)
# print(keys_found)
if keys_found:
success_message = "Keys retrieved successfully."
show_success_message(self, success_message)
self.info_logger.info(success_message)
else:
# Customize this message as needed
error_message = "No keys found in the database."
show_error_message(self, error_message)
self.debug_logger.debug(error_message)
else:
show_error_message(self, "No Folder Found.")
self.debug_logger.debug("No Folder Found.")
def decrypt_files(self):
folder_path = self.folder_path_lineedit.text()
if os.path.exists(folder_path):
decrypt = self.decrypt_file(folder_path)
if decrypt:
success_message = "Decryption successfully."
show_success_message(self, success_message)
self.info_logger.info(success_message)
else:
# Customize this message as needed
error_message = "Decryption Failed."
show_error_message(self, error_message)
self.debug_logger.debug(error_message)
else:
show_error_message(self, "No Folder Selected.")
self.debug_logger.debug("No Folder Selected.")
def merger(self):
folder_path = self.folder_path_lineedit.text()
if os.path.exists(folder_path):
merge = self.file_merger(folder_path)
self.info_logger.info("Files Merged Succesfully")
else:
show_error_message(self, "No Folder Selected.")
self.debug_logger.debug("No Folder Selected.")
def create_database(self):
self.conn = sqlite3.connect('db.db')
self.cursor = self.conn.cursor()
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS pssh (
pssh_id INTEGER PRIMARY KEY,
pssh TEXT,
license_url TEXT,
movie_name TEXT
)
''')
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS keys (
key_id INTEGER PRIMARY KEY,
key TEXT,
pssh_id INTEGER,
FOREIGN KEY(pssh_id) REFERENCES pssh(pssh_id)
)
''')
self.conn.commit()
def search_database(self, folder_name):
self.search_result_list.clear()
# Search DB for entries with a movie_name that matches the folder_name
query = "SELECT rowid, movie_name FROM pssh WHERE movie_name = ?"
pattern = os.path.basename(folder_name)
self.cursor.execute(query, (pattern,))
results = self.cursor.fetchall()
keys = False
for result in results:
rowid, movie_name = result
self.search_result_list.addItem(
f"rowid: {rowid}, Movie Name: {movie_name}")
# Search for keys based on the pssh_id
keys_query = "SELECT key FROM keys WHERE pssh_id = ?"
self.cursor.execute(keys_query, (rowid,))
keys = self.cursor.fetchall()
if keys:
keys_found = True # Set the flag if keys are found
for key in keys:
self.search_result_list.addItem(f" Key: {key[0]}")
if not keys_found:
# Customize this message as needed
error_message = "No keys found in DB."
show_error_message(self, error_message)
self.debug_logger.debug(error_message)
else:
self.info_logger.info("Keys Found in Database")
return keys_found
def decrypt_file(self, folder_name):
self.search_result_list.clear()
# Search DB for entries with a movie_name that matches the folder_name
query = "SELECT rowid, movie_name FROM pssh WHERE movie_name = ?"
pattern = os.path.basename(folder_name)
self.cursor.execute(query, (pattern,))
results = self.cursor.fetchall()
keys = []
for result in results:
rowid, movie_name = result
self.search_result_list.addItem(
f"rowid: {rowid}, Movie Name: {movie_name}")
# Search for keys based on the pssh_id
keys_query = "SELECT key FROM keys WHERE pssh_id = ?"
self.cursor.execute(keys_query, (rowid,))
keys = self.cursor.fetchall()
# Get video and audio files in the selected folder
video_audio_formats = ['.mp4', '.avi', '.webm',
'.mkv', '.m4a', '.wav', '.flac', '.mp3']
files = [f for f in os.listdir(folder_name) if os.path.isfile(
os.path.join(folder_name, f))]
video_audio_files = [f for f in files if os.path.splitext(
f)[1].lower() in video_audio_formats]
# self.search_result_list.addItem("\nVideo and Audio Files:")
# print(files)
for file in video_audio_files:
# self.search_result_list.addItem(f" {file}")
# Decrypt the file using mp4decrypt
decrypted_file = os.path.splitext(
file)[0] + "_decrypted" + os.path.splitext(file)[1]
input_file_path = os.path.normpath(os.path.join(folder_name, file))
output_file_path = os.path.normpath(
os.path.join(folder_name, decrypted_file))
decrypt_command = ['mp4decrypt']
if not keys:
# Customize this message as needed
error_message = "No key found in DB."
show_error_message(self, error_message)
else:
for key in keys:
decrypt_command.extend(["--key", key[0]])
decrypt_command.extend([input_file_path, output_file_path])
try:
# print(decrypt_command)
subprocess.run(decrypt_command, shell=True, check=True)
self.search_result_list.addItem(
f" Decrypted File: {decrypted_file}")
# Remove the original input file
os.remove(input_file_path)
# Rename the decrypted file to the original file name
os.rename(output_file_path, input_file_path)
show_success_message(self, "Decryption successfully Completed")
self.info_logger.info(
"Decryption of {decrypted_file} is successfully Completed")
# # Ask the user if they want to delete the encrypted file
# reply = QMessageBox.question(
# self, 'Delete Encrypted File',
# 'Do you want to delete the encrypted file?',
# QMessageBox.Yes | QMessageBox.No, QMessageBox.No
# )
# if reply == QMessageBox.Yes:
# # Code to delete the encrypted file
# self.search_result_list.addItem(
# f" Deleted Encrypted File: {QMessageBox.Yes}")
# show_success_message(self,
# "Encrypted file deleted successfully")
except subprocess.CalledProcessError as e:
self.search_result_list.addItem(
f" Error decrypting file: {e}")
show_error_message(self, "Error decrypting file")
self.debug_logger.debug("Error: {e}")
return bool(keys)
def file_merger(self, folder_name):
file_merger_dialog = FileMergerDialog(
self.debug_logger, self.info_logger, folder_name)
file_merger_dialog.exec_()
# Path: logger.py
def setup_logging():
# Get the directory of the script or the executable
if getattr(sys, 'frozen', False): # if the application is frozen
current_dir = os.path.dirname(sys.executable)
else:
current_dir = os.path.dirname(os.path.abspath(__file__))
log_dir = os.path.join(current_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
info_log_file = os.path.join(log_dir, 'info.log')
debug_log_file = os.path.join(log_dir, 'debug.log')
# Configuration for info_logger
info_logger = logging.getLogger('info_logger')
info_handler = logging.FileHandler(info_log_file)
info_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
info_handler.setFormatter(info_formatter)
info_logger.addHandler(info_handler)
info_logger.setLevel(logging.INFO)
# Configuration for debug_logger
debug_logger = logging.getLogger('debug_logger')
debug_handler = logging.FileHandler(debug_log_file)
debug_formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
debug_handler.setFormatter(debug_formatter)
debug_logger.addHandler(debug_handler)
debug_logger.setLevel(logging.DEBUG)
return info_logger, debug_logger
# Path: version.py
CHANNEL = 'Beta'
# Path: main.py
import sys
import platform
import webbrowser
import os
from PyQt5.QtWidgets import QApplication, QMainWindow, QTabWidget, QMessageBox, QAction, QMenu
from PyQt5.QtGui import QIcon
from key_getter import KeyGeter
from decrypter import Decrypter
from logger import setup_logging
from version import __version__, CHANNEL
info_logger, debug_logger = setup_logging()
current_dir = os.path.dirname(os.path.abspath(__file__))
icon = os.path.join(current_dir, 'assets', 'logo.ico')
git = os.path.join(current_dir, 'assets', 'github.png')
discord = os.path.join(current_dir, 'assets', 'discord.svg')
bug = os.path.join(current_dir, 'assets', 'bug.svg')
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowIcon(QIcon(icon))
self.init_ui()
def init_ui(self):
self.setWindowTitle(f"DRM & Media Tool {__version__} ({CHANNEL})")
self.setGeometry(100, 100, 650, 350)
# Create the tab widget
tab_widget = QTabWidget(self)
# Create the menu bar
menu_bar = self.menuBar()
# Create the Help menu
help_menu = menu_bar.addMenu('Help')
# Create "Tools Used" action
tools_used_action = QAction('Tools Used', self)
tools_used_action.triggered.connect(self.show_tools_used)
help_menu.addAction(tools_used_action)
# Create "About" action
about_action = QAction('About', self)
about_action.triggered.connect(self.show_about)
help_menu.addAction(about_action)
feature_bug_menu = QMenu('Feature/Bug', self)
request_feature_bug_action = QAction(
'Request a New Feature or Report Bug', self)
request_feature_bug_action.triggered.connect(
self.open_feature_bug_form)
feature_bug_menu.addAction(request_feature_bug_action)
menu_bar.addMenu(feature_bug_menu)
help_menu = menu_bar.addMenu('Discord')
open_discord_action = QAction('Open Discord', self)
open_discord_action.triggered.connect(self.open_discord)
help_menu.addAction(open_discord_action)
# Create tabs
hello_tab = KeyGeter(debug_logger, info_logger)
file_lister_tab = Decrypter(debug_logger, info_logger)
# Add tabs to the tab widget
tab_widget.addTab(hello_tab, "Key Graber")
tab_widget.addTab(file_lister_tab, "Decrypter & Merger")
# Set the central widget to be the tab widget
| self.setCentralWidget(tab_widget) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gmum/ViewingDirectionGaussianSplatting
# Path: scene/colmap_loader.py
def read_extrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_text(path):
"""
Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
# Path: scene/colmap_loader.py
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
# Path: scene/colmap_loader.py
def read_extrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
# Path: scene/colmap_loader.py
def read_intrinsics_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
# Path: scene/colmap_loader.py
def read_points3D_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
for p_id in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
xyzs[p_id] = xyz
rgbs[p_id] = rgb
errors[p_id] = error
return xyzs, rgbs, errors
# Path: scene/colmap_loader.py
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
xyzs = None
rgbs = None
errors = None
num_points = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
num_points += 1
xyzs = np.empty((num_points, 3))
rgbs = np.empty((num_points, 3))
errors = np.empty((num_points, 1))
count = 0
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = np.array(float(elems[7]))
xyzs[count] = xyz
rgbs[count] = rgb
errors[count] = error
count += 1
return xyzs, rgbs, errors
# Path: utils/graphics_utils.py
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
Rt = np.zeros((4, 4))
Rt[:3, :3] = R.transpose()
Rt[:3, 3] = t
Rt[3, 3] = 1.0
C2W = np.linalg.inv(Rt)
cam_center = C2W[:3, 3]
cam_center = (cam_center + translate) * scale
C2W[:3, 3] = cam_center
Rt = np.linalg.inv(C2W)
return np.float32(Rt)
# Path: utils/graphics_utils.py
def focal2fov(focal, pixels):
return 2*math.atan(pixels/(2*focal))
# Path: utils/graphics_utils.py
def fov2focal(fov, pixels):
return pixels / (2 * math.tan(fov / 2))
# Path: utils/sh_utils.py
def SH2RGB(sh):
return sh * C0 + 0.5
# Path: scene/gaussian_model.py
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
def __init__(self, sh_degree : int):
def capture(self):
def restore(self, model_args, training_args):
def get_scaling(self):
def get_rotation(self):
def get_xyz(self):
def get_features(self):
def get_opacity(self):
def get_covariance(self, scaling_modifier = 1):
def oneupSHdegree(self):
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
def training_setup(self, training_args):
def update_learning_rate(self, iteration):
def construct_list_of_attributes(self):
def save_ply(self, path):
def reset_opacity(self):
def load_ply(self, path):
def replace_tensor_to_optimizer(self, tensor, name):
def _prune_optimizer(self, mask):
def prune_points(self, mask):
def cat_tensors_to_optimizer(self, tensors_dict):
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
def densify_and_clone(self, grads, grad_threshold, scene_extent):
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
def add_densification_stats(self, viewspace_point_tensor, update_filter):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
# Path: scene/dataset_readers.py
import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
if eval:
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
| T = w2c[:3, 3] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Ruiyuan-Zhang/CCS
# Path: multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None):
super().__init__()
self.blockatt = blockatt
self.blockatt_memory = blockatt_memory
self.embed_dim = args.encoder_embed_dim
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.use_nfm = use_nfm
print('using nfm?', self.use_nfm)
self.nb = nb
self.norm_blocks = self.nb
self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args
self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
print("SETUP TRANSFORMER LAYER", 'blocks', self.nb)
activation_dropout_p = getattr(args, "activation_dropout", 0)
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0)
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.encoder_normalize_before
self.fc1 = self.build_fc1(
self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size
)
self.fc2 = self.build_fc2(
args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size
)
self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)
if self.blockatt:
self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm)
self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)
if self.blockatt_memory:
memory_slots = 4
memory_head_size = 128
memory_num_heads = 1
gate_style = 'memory'
print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size)
self.memory_layer = RelationalMemory(mem_slots=memory_slots, head_size=memory_head_size, input_size=self.embed_dim, output_size=self.embed_dim,
num_heads=memory_num_heads, num_blocks=1, forget_bias=1., input_bias=0.,
attention_mlp_layers=5, gate_style=gate_style)
#self.n_blocks_val * self.block_dim_val
#self.block_dim_val = dim_val // self.n_blocks_val
self.memory_attention = MemoryAttention(n_blocks_query=self.nb, n_blocks_val=8, dim_query=self.embed_dim, dim_val=memory_head_size*memory_num_heads*memory_slots)
self.self_mem_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)
#self.competition = GroupLinearLayer(self.embed_dim//self.nb, 1, self.nb, a=0.05)
#self.comp_sm = nn.Softmax(dim=2)
self.competition = None
if out_proj_dim is not None:
self.out_proj = GroupLinearLayer(self.embed_dim//self.nb, out_proj_dim//self.nb, self.nb)
else:
self.out_proj = None
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(GroupLinearLayer(input_dim//self.nb, output_dim//self.nb, self.nb), p=q_noise, block_size=qn_block_size)
#return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(GroupLinearLayer(input_dim//self.nb, output_dim//self.nb, self.nb), p=q_noise, block_size=qn_block_size)
#return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
nblocks=self.nb,
top_k_ratio = args.topk_ratio,
use_value_competition = False,
)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
seq_len, bsz, _ = x.shape
if self.competition is not None:
comp = self.competition(x)
comp = self.comp_sm(comp)
#comp = F.gumbel_softmax(comp, tau=0.5, hard=False, dim=2)
comp = comp.unsqueeze(-1).repeat(1,1,1,self.embed_dim//self.nb)
comp = comp.view((x.shape[0], x.shape[1], self.embed_dim))
else:
comp = None
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=state if state is not None else x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.blockatt:
if self.normalize_before:
x = self.comm_norm(x)
residual = x
x, _ = self.comm(x)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.comm_norm(x)
if self.blockatt_memory:
if self.normalize_before:
x = self.self_mem_norm(x)
residual = x
T,bsz,nhid = x.shape
if comp is not None:
x_write = comp * x
else:
x_write = x*1.0
_, new_memory = self.memory_layer.forward_step(x_write.reshape((T*bsz, nhid)), self.memory_obj[0])
self.memory_obj[0] = new_memory
Tbs,num_slots,nhid_slot = new_memory.shape
mem_read = new_memory.reshape((T, bsz, num_slots*nhid_slot))
x,_ = self.memory_attention(x, mem_read)
x = residual + x
if not self.normalize_before:
x = self.self_mem_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
#print('fc1 on shape', x.shape, 'in encoder')
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
#print('fc2 on shape', x.shape, 'in encoder')
x = self.fc2(x)
x = self.dropout_module(x)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.out_proj is not None:
x = self.out_proj(x)
return x
# Path: multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
class TransformerEncoderLayerVanilla(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, args, out_proj = None):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5)
self.dropout = args.dropout
self.activation_fn = utils.get_activation_fn(
activation=getattr(args, "activation_fn", "relu")
)
self.activation_dropout = getattr(args, "activation_dropout", 0)
if self.activation_dropout == 0:
# for backwards compatibility with models that use args.relu_dropout
self.activation_dropout = getattr(args, "relu_dropout", 0)
self.normalize_before = args.encoder_normalize_before
self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5)
if out_proj is not None:
self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj)
else:
self.final_linear = None
def build_fc1(self, input_dim, output_dim):
return nn.Linear(input_dim, output_dim)
def build_fc2(self, input_dim, output_dim):
return nn.Linear(input_dim, output_dim)
def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=args.self_attention,
shared_memory_attention = args.shared_memory_attention,
use_topk = args.use_topk,
topk = args.topk,
num_steps = args.num_steps,
mem_slots = args.mem_slots,
null_attention = args.null_attention,
regressive = args.regressive
)
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None, memory = None):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, src_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where
T_tgt is the length of query, while T_src is the length of key,
though here both query and key is x here,
attn_mask[t_tgt, t_src] = 1 means when calculating embedding
for t_tgt, t_src is excluded (or masked out), =0 means it is
included in attention
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
# TODO: to formally solve this problem, we need to change fairseq's
# MultiheadAttention. We will do this later on.
#print(state is not None)
x, memory, _ = self.self_attn(
query=state if state is not None else x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
attn_mask=attn_mask,
memory = memory
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=float(self.activation_dropout), training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.final_linear is not None:
x = self.final_linear(x)
return x, memory
# Path: multi_part_assembly/utils/wx_transformer_utilities/pos_enc.py
class PositionEncoder(nn.Module):
def __init__(self, d_model, max_seq_len = 300):
super().__init__()
self.d_model = d_model
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
self.pos_emb_weight = nn.Parameter(torch.ones_like(pe))
def forward(self, x):
# make embeddings relatively larger
x = x.permute(1,0,2)
#x = x * math.sqrt(self.d_model)
#add constant to embedding
seq_len = x.size(1)
#width x channel
#pe_use = F.interpolate(self.pe.permute(0,2,1), size=seq_len).permute(0,2,1)
pe_use = Variable(self.pe[:,:seq_len] * F.sigmoid(self.pos_emb_weight[:,:seq_len]), requires_grad=False).cuda()
#bs x pos x nhid --> bs x nhid x pos --> bs x pos x nhid
x = x + pe_use
#Variable(pe_use, requires_grad=False).cuda()
x = x.permute(1,0,2)
return x
# Path: multi_part_assembly/utils/wx_transformer_utilities/GroupLinearLayer.py
class GroupLinearLayer(nn.Module):
def __init__(self, din, dout, num_blocks, bias=True, a = None):
super(GroupLinearLayer, self).__init__()
self.nb = num_blocks
#din = din // num_blocks
#dout = dout // num_blocks
self.dout = dout
if a is None:
a = 1. / math.sqrt(dout)
self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))
self.bias = bias
if bias is True:
self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))
#self.bias = nn.Parameter(torch.zeros(dout*num_blocks))
else:
self.bias = None
def forward(self,x):
ts,bs,m = x.shape
#x = x.reshape((ts*bs, self.nb, m//self.nb))
x = x.permute(1,0,2)
x = torch.bmm(x,self.weight)
x = x.permute(1,0,2)
if not self.bias is None:
x = x + self.bias
#x = x.reshape((ts, bs, self.dout*self.nb))
return x
# Path: multi_part_assembly/models/wx_transformer/wx_transformers.py
import torch
import torch.nn as nn
import types
import math
import numpy as np
import math
import time
import time
from multi_part_assembly.utils.wx_transformer_utilities.transformer_layer import TransformerEncoderLayer, TransformerEncoderLayerVanilla
from multi_part_assembly.utils.wx_transformer_utilities.pos_enc import PositionEncoder
from multi_part_assembly.utils.wx_transformer_utilities.GroupLinearLayer import GroupLinearLayer
#from transformer import TransformerEncoder
args = types.SimpleNamespace()
args.use_module_communication = 'true'
args.encoder_embed_dim = 512
| args.encoder_attention_heads = 8 #was 8 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: uc-vision/taichi-splatting
# Path: taichi_splatting/misc/autograd.py
@contextmanager
def restore_grad(*tensors):
try:
grads = [tensor.grad if tensor.grad is not None else None
for tensor in tensors]
for tensor in tensors:
if tensor.requires_grad is True:
tensor.grad = torch.zeros_like(tensor)
yield
finally:
for tensor, grad in zip(tensors, grads):
tensor.grad = grad
# Path: taichi_splatting/perspective/params.py
class CameraParams:
T_image_camera: torch.Tensor # (3, 3) camera projection matrix
T_camera_world : torch.Tensor # (4, 4) camera view matrix
@property
def device(self):
return self.T_image_camera.device
@property
def T_image_world(self):
T_image_camera = torch.eye(4,
device=self.T_image_camera.device, dtype=self.T_image_camera.dtype)
T_image_camera[0:3, 0:3] = self.T_image_camera
return T_image_camera @ self.T_camera_world
near_plane: float
far_plane: float
image_size: Tuple[Integral, Integral]
def __repr__(self):
w, h = self.image_size
fx, fy = self.T_image_camera[0, 0], self.T_image_camera[1, 1]
cx, cy = self.T_image_camera[0, 2], self.T_image_camera[1, 2]
pos_str = ", ".join([f"{x:.3f}" for x in self.camera_position])
return f"CameraParams({w}x{h}, fx={fx:.4f}, fy={fy:.4f}, cx={cx:.4f}, cy={cy:.4f}, clipping={self.near_plane:.4f}-{self.far_plane:.4f}, position=({pos_str})"
@property
def camera_position(self):
T_world_camera = torch.inverse(self.T_camera_world)
return T_world_camera[0:3, 3]
def to(self, device=None, dtype=None):
return CameraParams(
T_image_camera=self.T_image_camera.to(device=device, dtype=dtype),
T_camera_world=self.T_camera_world.to(device=device, dtype=dtype),
near_plane=self.near_plane,
far_plane=self.far_plane,
image_size=self.image_size
)
def __post_init__(self):
assert self.T_image_camera.shape == (3, 3), f"Expected shape (3, 3), got {self.T_image_camera.shape}"
assert self.T_camera_world.shape == (4, 4), f"Expected shape (4, 4), got {self.T_camera_world.shape}"
assert len(self.image_size) == 2
assert self.near_plane > 0
assert self.far_plane > self.near_plane
# Path: taichi_splatting/taichi_lib/generic.py
def make_library(dtype=ti.f32):
"""
This function returns a namespace containing all the functions and data types
that are used in the other modules. This is done to provide different precisions
for the same code. Primarily for enabling gradient (gradcheck) testing using f64.
"""
vec2 = ti.types.vector(2, dtype)
vec3 = ti.types.vector(3, dtype)
vec4 = ti.types.vector(4, dtype)
mat2 = ti.types.matrix(2, 2, dtype)
mat3 = ti.types.matrix(3, 3, dtype)
mat4 = ti.types.matrix(4, 4, dtype)
mat4x2 = ti.types.matrix(4, 2, dtype=dtype)
#
# Gaussian datatypes
#
@ti.dataclass
class Gaussian2D:
uv : vec2
uv_conic : vec3
alpha : dtype
@ti.dataclass
class Gaussian3D:
position : vec3
log_scaling : vec3
rotation : vec4
alpha_logit : dtype
@ti.func
def alpha(self):
return sigmoid(self.alpha_logit)
@ti.func
def scale(self):
return ti.math.exp(self.log_scaling)
vec_g2d = ti.types.vector(struct_size(Gaussian2D), dtype=dtype)
vec_g3d = ti.types.vector(struct_size(Gaussian3D), dtype=dtype)
@ti.func
def to_vec_g2d(uv:vec2, uv_conic:vec3, alpha:dtype) -> vec_g2d:
return vec_g2d(*uv, *uv_conic, alpha)
@ti.func
def to_vec_g3d(position:vec3, log_scaling:vec3, rotation:vec4, alpha_logit:dtype) -> vec_g3d:
return vec_g3d(*position, *log_scaling, *rotation, alpha_logit)
@ti.func
def unpack_vec_g3d(vec:vec_g3d) -> Gaussian3D:
return vec[0:3], vec[3:6], vec[6:10], vec[10]
@ti.func
def unpack_vec_g2d(vec:vec_g2d) -> Gaussian2D:
return vec[0:2], vec[2:5], vec[5]
@ti.func
def get_position_g3d(vec:vec_g3d) -> vec3:
return vec[0:3]
@ti.func
def from_vec_g3d(vec:vec_g3d) -> Gaussian3D:
return Gaussian3D(vec[0:3], vec[3:6], vec[6:10], vec[10])
@ti.func
def from_vec_g2d(vec:vec_g2d) -> Gaussian2D:
return Gaussian2D(vec[0:2], vec[2:5], vec[5])
@ti.func
def unpack_activate_g3d(vec:vec_g3d):
position, log_scaling, rotation, alpha_logit = unpack_vec_g3d(vec)
return position, ti.exp(log_scaling), ti.math.normalize(rotation), sigmoid(alpha_logit)
@ti.func
def bounding_sphere(vec:vec_g3d, gaussian_scale: ti.template()):
position, log_scaling = vec[0:3], vec[3:6]
return position, ti.exp(log_scaling).max() * gaussian_scale
# Taichi structs don't have static methods, but they can be added afterward
Gaussian2D.vec = vec_g2d
Gaussian2D.to_vec = to_vec_g2d
Gaussian2D.from_vec = from_vec_g2d
Gaussian2D.unpack = unpack_vec_g2d
Gaussian3D.vec = vec_g3d
Gaussian3D.to_vec = to_vec_g3d
Gaussian3D.from_vec = from_vec_g3d
Gaussian3D.unpack = unpack_vec_g3d
Gaussian3D.unpack_activate = unpack_activate_g3d
Gaussian3D.get_position = get_position_g3d
Gaussian3D.bounding_sphere = bounding_sphere
#
# Projection related functions
#
mat2x3f = ti.types.matrix(n=2, m=3, dtype=dtype)
@ti.func
def project_perspective_camera_image(
position: vec3,
T_camera_world: mat4,
projective_transform: mat3,
):
point_in_camera = (T_camera_world @ vec4(*position, 1)).xyz
uv = (projective_transform @ point_in_camera) / point_in_camera.z
return uv.xy, point_in_camera
@ti.func
def project_perspective(
position: vec3,
T_image_world: mat4,
):
point_in_camera = (T_image_world @ vec4(*position, 1))
return point_in_camera.xy / point_in_camera.z, point_in_camera.z
def camera_origin(T_camera_world: mat4):
r, t = split_rt(T_camera_world)
t = -(r.transpose() @ t)
return t
@ti.func
def gaussian_covariance_in_camera(
T_camera_world: mat4,
cov_rotation: vec4,
cov_scale: vec3,
) -> mat3:
""" Construct and rotate the covariance matrix in camera space
"""
W = T_camera_world[:3, :3]
R = quat_to_mat(cov_rotation)
S = mat3([
[cov_scale.x, 0, 0],
[0, cov_scale.y, 0],
[0, 0, cov_scale.z]
])
# covariance matrix, 3x3, equation (6) in the paper
# Sigma = R @ S @ S.transpose() @ R.transpose()
# cov_uv = J @ W @ Sigma @ W.transpose() @ J.transpose() # equation (5) in the paper
m = W @ R @ S
return m @ m.transpose()
@ti.func
def get_projective_transform_jacobian(
projection: mat3,
position: vec3,
):
# cx = projective_transform[0, 2]
# cy = projective_transform[1, 2]
# [[fx/z, 0, cx/z - (cx*z + fx*x)/z**2], [0, fy/z, cy/z - (cy*z + fy*y)/z**2]]
fx = projection[0, 0]
fy = projection[1, 1]
x, y, z = position
return mat2x3f([
[fx / z, 0, -(fx * x) / (z * z)],
[0, fy / z, -(fy * y) / (z * z)]
])
@ti.func
def project_perspective_gaussian(
projective_transform: mat3,
point_in_camera: vec3,
cov_in_camera: mat3) -> mat2:
""" Approximate the 2D gaussian covariance in image space """
J = get_projective_transform_jacobian(
projective_transform, point_in_camera)
cov_uv = J @ cov_in_camera @ J.transpose()
return cov_uv
#
# Miscellaneous math functions
#
@ti.func
def sigmoid(x:dtype):
return 1. / (1. + ti.exp(-x))
@ti.func
def inverse_sigmoid(x:dtype):
return -ti.log(1. / x - 1.)
#
# Miscellaneous conversion functions
#
@ti.func
def mat3_from_ndarray(ndarray:ti.template()):
return mat3([ndarray[i, j]
for i in ti.static(range(3)) for j in ti.static(range(3))])
@ti.func
def mat4_from_ndarray(ndarray:ti.template()):
return mat4([ndarray[i, j]
for i in ti.static(range(4)) for j in ti.static(range(4))])
@ti.func
def isfin(x):
return ~(ti.math.isinf(x) or ti.math.isnan(x))
#
# Ellipsoid related functions, covariance, conic, etc.
#
@ti.func
def radii_from_cov(uv_cov: vec3) -> dtype:
d = (uv_cov.x - uv_cov.z)
max_eig_sq = (uv_cov.x + uv_cov.z +
ti.sqrt(d * d + 4.0 * uv_cov.y * uv_cov.y)) / 2.0
return ti.sqrt(max_eig_sq)
@ti.func
def cov_axes(cov:vec3):
tr = cov.x + cov.z
det = cov.x * cov.z - cov.y * cov.y
gap = tr**2 - 4 * det
sqrt_gap = ti.sqrt(ti.max(gap, 0))
lambda1 = (tr + sqrt_gap) * 0.5
lambda2 = (tr - sqrt_gap) * 0.5
v1 = vec2(cov.x - lambda2, cov.y).normalized()
v2 = vec2(v1.y, -v1.x)
return v1 * ti.sqrt(lambda1), v2 * ti.sqrt(lambda2)
@ti.func
def inverse_cov(cov: vec3):
# inverse of upper triangular part of symmetric matrix
inv_det = 1 / (cov.x * cov.z - cov.y * cov.y)
return vec3(inv_det * cov.z, -inv_det * cov.y, inv_det * cov.x)
@ti.func
def upper(cov: mat2) -> vec3:
return vec3(cov[0, 0], cov[0, 1], cov[1, 1])
@ti.func
def radii_from_conic(conic: vec3):
return radii_from_cov(inverse_cov(conic))
@ti.func
def conic_pdf(xy: vec2, uv: vec2, uv_conic: vec3) -> dtype:
dx, dy = xy - uv
a, b, c = uv_conic
p = ti.exp(-0.5 * (dx**2 * a + dy**2 * c) - dx * dy * b)
return p
@ti.func
def conic_pdf_with_grad(xy: vec2, uv: vec2, uv_conic: vec3):
d = xy - uv
a, b, c = uv_conic
dx2 = d.x**2
dy2 = d.y**2
dxdy = d.x * d.y
p = ti.exp(-0.5 * (dx2 * a + dy2 * c) - dxdy * b)
dp_duv = vec2(
(b * d.y - 0.5 * a * (2 * uv.x - 2 * xy.x)) * p,
(b * d.x - 0.5 * c * (2 * uv.y - 2 * xy.y)) * p
)
dp_dconic = vec3(-0.5 * dx2 * p, -dxdy * p, -0.5 * dy2 * p)
return p, dp_duv, dp_dconic
@ti.func
def conic_grad(p: ti.f32, xy: vec2, uv: vec2, uv_conic: vec3):
d = xy - uv
a, b, c = uv_conic
dx2 = d.x**2
dy2 = d.y**2
dxdy = d.x * d.y
dp_duv = vec2(
(b * d.y - 0.5 * a * (2 * uv.x - 2 * xy.x)) * p,
(b * d.x - 0.5 * c * (2 * uv.y - 2 * xy.y)) * p
)
dp_dconic = vec3(-0.5 * dx2 * p, -dxdy * p, -0.5 * dy2 * p)
return dp_duv, dp_dconic
@ti.func
def cov_inv_basis(uv_cov: vec3, scale: dtype) -> mat2:
basis = ti.Matrix.cols(cov_axes(uv_cov))
return (basis * scale).inverse()
@ti.func
def quat_to_mat(q:vec4) -> mat3:
x, y, z, w = q
x2, y2, z2 = x*x, y*y, z*z
return mat3(
1 - 2*y2 - 2*z2, 2*x*y - 2*w*z, 2*x*z + 2*w*y,
2*x*y + 2*w*z, 1 - 2*x2 - 2*z2, 2*y*z - 2*w*x,
2*x*z - 2*w*y, 2*y*z + 2*w*x, 1 - 2*x2 - 2*y2
)
@ti.func
def join_rt(r:mat3, t:vec3) -> mat4:
return mat4(
r[0, 0], r[0, 1], r[0, 2], t[0],
r[1, 0], r[1, 1], r[1, 2], t[1],
r[2, 0], r[2, 1], r[2, 2], t[2],
0, 0, 0, 1
)
@ti.func
def split_rt(rt:mat4) -> ti.template():
return rt[:3, :3], rt[:3, 3]
@ti.func
def qt_to_mat(q:vec4, t:vec3) -> mat4:
r = quat_to_mat(q)
return mat4(
r[0, 0], r[0, 1], r[0, 2], t[0],
r[1, 0], r[1, 1], r[1, 2], t[1],
r[2, 0], r[2, 1], r[2, 2], t[2],
0, 0, 0, 1
)
@ti.func
def scaling_matrix(scale:vec3) -> mat3:
return mat3(
scale.x, 0, 0,
0, scale.y, 0,
0, 0, scale.z
)
@ti.func
def quat_mul(q1: vec4, q2: vec4) -> vec4:
return vec4(
q1.w * q2.x + q1.x * q2.w + q1.y * q2.z - q1.z * q2.y,
q1.w * q2.y - q1.x * q2.z + q1.y * q2.w + q1.z * q2.x,
q1.w * q2.z + q1.x * q2.y - q1.y * q2.x + q1.z * q2.w,
q1.w * q2.w - q1.x * q2.x - q1.y * q2.y - q1.z * q2.z,
)
@ti.func
def quat_conj(q: vec4) -> vec4:
return vec4(-q.x, -q.y, -q.z, q.w)
@ti.func
def quat_rotate(q: vec4, v: vec3) -> vec3:
qv = vec4(*v, 0.0)
q_rot = quat_mul(q, quat_mul(qv, quat_mul(q)))
return q_rot.xyz
return SimpleNamespace(**locals())
# Path: taichi_splatting/taichi_lib/conversions.py
def struct_size(ti_struct:ti.lang.struct.StructType):
# Path: taichi_splatting/perspective/projection.py
from functools import cache
from typing import Tuple
from beartype import beartype
from taichi_splatting.misc.autograd import restore_grad
from .params import CameraParams
from taichi_splatting.taichi_lib.generic import make_library
from taichi_splatting.taichi_lib.conversions import torch_taichi
import taichi as ti
import torch
@cache
def project_to_image_function(torch_dtype=torch.float32,
blur_cov:float = 0.3):
dtype = torch_taichi[torch_dtype]
lib = make_library(dtype)
Gaussian3D, Gaussian2D = lib.Gaussian3D, lib.Gaussian2D
@ti.kernel
def project_perspective_kernel(
| gaussians: ti.types.ndarray(Gaussian3D.vec, ndim=1), # (N, 3 + feature_vec.n1) # input |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: camenduru/FreeInit-hf
# Path: animatediff/models/attention.py
class Transformer3DModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
):
super().__init__()
self.use_linear_projection = use_linear_projection
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
inner_dim = num_attention_heads * attention_head_dim
# Define input layers
self.in_channels = in_channels
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
if use_linear_projection:
self.proj_in = nn.Linear(in_channels, inner_dim)
else:
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
# Define transformers blocks
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
for d in range(num_layers)
]
)
# 4. Define output layers
if use_linear_projection:
self.proj_out = nn.Linear(in_channels, inner_dim)
else:
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):
# Input
assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
batch, channel, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
hidden_states = self.proj_in(hidden_states)
# Blocks
for block in self.transformer_blocks:
hidden_states = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
video_length=video_length
)
# Output
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
hidden_states = self.proj_out(hidden_states)
else:
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
if not return_dict:
return (output,)
return Transformer3DModelOutput(sample=output)
# Path: animatediff/models/resnet.py
class Downsample3D(nn.Module):
def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
else:
raise NotImplementedError
def forward(self, hidden_states):
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
raise NotImplementedError
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: animatediff/models/resnet.py
class ResnetBlock3D(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout=0.0,
temb_channels=512,
groups=32,
groups_out=None,
pre_norm=True,
eps=1e-6,
non_linearity="swish",
time_embedding_norm="default",
output_scale_factor=1.0,
use_in_shortcut=None,
use_inflated_groupnorm=None,
):
super().__init__()
self.pre_norm = pre_norm
self.pre_norm = True
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.time_embedding_norm = time_embedding_norm
self.output_scale_factor = output_scale_factor
if groups_out is None:
groups_out = groups
assert use_inflated_groupnorm != None
if use_inflated_groupnorm:
self.norm1 = InflatedGroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
else:
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels is not None:
if self.time_embedding_norm == "default":
time_emb_proj_out_channels = out_channels
elif self.time_embedding_norm == "scale_shift":
time_emb_proj_out_channels = out_channels * 2
else:
raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
else:
self.time_emb_proj = None
if use_inflated_groupnorm:
self.norm2 = InflatedGroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
else:
self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if non_linearity == "swish":
self.nonlinearity = lambda x: F.silu(x)
elif non_linearity == "mish":
self.nonlinearity = Mish()
elif non_linearity == "silu":
self.nonlinearity = nn.SiLU()
self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
self.conv_shortcut = None
if self.use_in_shortcut:
self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input_tensor, temb):
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
if temb is not None and self.time_embedding_norm == "default":
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
if temb is not None and self.time_embedding_norm == "scale_shift":
scale, shift = torch.chunk(temb, 2, dim=1)
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
# Path: animatediff/models/resnet.py
class Upsample3D(nn.Module):
def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_conv_transpose = use_conv_transpose
self.name = name
conv = None
if use_conv_transpose:
raise NotImplementedError
elif use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
def forward(self, hidden_states, output_size=None):
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
raise NotImplementedError
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
if hidden_states.shape[0] >= 64:
hidden_states = hidden_states.contiguous()
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
else:
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
# if self.use_conv:
# if self.name == "conv":
# hidden_states = self.conv(hidden_states)
# else:
# hidden_states = self.Conv2d_0(hidden_states)
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: animatediff/models/motion_module.py
def get_motion_module(
in_channels,
motion_module_type: str,
motion_module_kwargs: dict
):
if motion_module_type == "Vanilla":
return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,)
else:
raise ValueError
# Path: animatediff/models/unet_blocks.py
import torch
import pdb
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module
use_inflated_groupnorm=use_inflated_groupnorm,
)
)
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
out_channels // attn_num_head_channels,
in_channels=out_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=out_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample3D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
output_states = ()
for resnet, attn, motion_module in zip(self.resnets, self.attentions, self.motion_modules):
if self.training and self.gradient_checkpointing:
def create_custom_forward(module, return_dict=None):
def custom_forward(*inputs):
if return_dict is not None:
return module(*inputs, return_dict=return_dict)
else:
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(attn, return_dict=False),
hidden_states,
encoder_hidden_states,
)[0]
if motion_module is not None:
hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(motion_module), hidden_states.requires_grad_(), temb, encoder_hidden_states)
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
# add motion module
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
output_states += (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states += (hidden_states,)
return hidden_states, output_states
class DownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
output_scale_factor=1.0,
add_downsample=True,
downsample_padding=1,
use_inflated_groupnorm=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
motion_modules = []
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
| out_channels=out_channels, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: exislow/tidal-dl-ng
# Path: tidal_dl_ng/config.py
class Settings(BaseConfig, metaclass=SingletonMeta):
cls_model = ModelSettings
data = None
def __init__(self):
self.file_path = path_file_settings()
self.read(self.file_path)
# Path: tidal_dl_ng/constants.py
REQUESTS_TIMEOUT_SEC = 45
# Path: tidal_dl_ng/constants.py
class CoverDimensions(Enum):
Px320: str = "320x320"
Px640: str = "640x640"
Px1280: str = "1280x1280"
# Path: tidal_dl_ng/constants.py
class MediaType(Enum):
TRACK: str = "track"
VIDEO: str = "video"
PLAYLIST: str = "playlist"
ALBUM: str = "album"
MIX: str = "mix"
# Path: tidal_dl_ng/constants.py
class SkipExisting(Enum):
Disabled: bool = False
Filename: str = "exact"
ExtensionIgnore: str = "extension_ignore"
# Path: tidal_dl_ng/constants.py
class StreamManifestMimeType(Enum):
MPD: str = "application/dash+xml"
BTS: str = "application/vnd.tidal.bts"
VIDEO: str = "video/mp2t"
# Path: tidal_dl_ng/helper/decryption.py
def decrypt_file(path_file_encrypted: str, path_file_destination: str, key: str, nonce: str) -> None:
"""
Decrypts an encrypted MQA file given the file, key and nonce
"""
# Initialize counter and file decryptor
counter = Counter.new(64, prefix=nonce, initial_value=0)
decryptor = AES.new(key, AES.MODE_CTR, counter=counter)
# Open and decrypt
with open(path_file_encrypted, "rb") as f_src:
audio_decrypted = decryptor.decrypt(f_src.read())
# Replace with decrypted file
with open(path_file_destination, "wb") as f_dst:
f_dst.write(audio_decrypted)
# Path: tidal_dl_ng/helper/decryption.py
def decrypt_security_token(security_token: str) -> (str, str):
"""
Decrypts security token into key and nonce pair
security_token should match the securityToken value from the web response
"""
# Do not change this
master_key = "UIlTTEMmmLfGowo/UC60x2H45W6MdGgTRfo/umg4754="
# Decode the base64 strings to ascii strings
master_key = base64.b64decode(master_key)
security_token = base64.b64decode(security_token)
# Get the IV from the first 16 bytes of the securityToken
iv = security_token[:16]
encrypted_st = security_token[16:]
# Initialize decryptor
decryptor = AES.new(master_key, AES.MODE_CBC, iv)
# Decrypt the security token
decrypted_st = decryptor.decrypt(encrypted_st)
# Get the audio stream decryption key and nonce from the decrypted security token
key = decrypted_st[:16]
nonce = decrypted_st[16:24]
return key, nonce
# Path: tidal_dl_ng/helper/exceptions.py
class MediaMissing(Exception):
pass
# Path: tidal_dl_ng/helper/exceptions.py
class MediaUnknown(Exception):
pass
# Path: tidal_dl_ng/helper/exceptions.py
class UnknownManifestFormat(Exception):
pass
# Path: tidal_dl_ng/helper/path.py
def check_file_exists(path_file: str, extension_ignore: bool = False):
if extension_ignore:
path_file = Path(path_file).stem + ".*"
# TODO: Check what happens is (no) files .
result = bool(glob.glob(path_file))
return result
# Path: tidal_dl_ng/helper/path.py
def format_path_media(fmt_template: str, media: Track | Album | Playlist | UserPlaylist | Video | Mix) -> str:
result = fmt_template
# Search track format template for placeholder.
regex = r"\{(.+?)\}"
matches = re.finditer(regex, fmt_template, re.MULTILINE)
fn_format = get_format_fn(media)
for _matchNum, match in enumerate(matches, start=1):
template_str = match.group()
result_fmt = fn_format(match.group(1), media)
if result_fmt:
value = sanitize_filename(result_fmt)
result = result.replace(template_str, value)
return result
# Path: tidal_dl_ng/helper/path.py
def path_file_sanitize(path_file: str, adapt: bool = False) -> (bool, str):
# Split into path and filename
pathname, filename = os.path.split(path_file)
# Sanitize path
try:
pathname_sanitized = sanitize_filepath(
pathname, replacement_text=" ", validate_after_sanitize=True, platform="auto"
)
except ValidationError:
# If adaption of path is allowed in case of an error set path to HOME.
if adapt:
pathname_sanitized = Path.home()
else:
raise
# Sanitize filename
try:
filename_sanitized = sanitize_filename(
filename, replacement_text=" ", validate_after_sanitize=True, platform="auto"
)
filename_sanitized_extension = Path(filename_sanitized).suffix
# Check if the file extension was removed by shortening the filename length
if filename_sanitized_extension == "":
# Add the original file extension
file_extension = "_" + Path(path_file).suffix
filename_sanitized = filename_sanitized[: -len(file_extension)] + file_extension
except ValidationError as e:
# TODO: Implement proper exception handling and logging.
print(e)
raise
# Join path and filename
result = os.path.join(pathname_sanitized, filename_sanitized)
return result
# Path: tidal_dl_ng/helper/tidal.py
def name_builder_item(media: Track) -> str:
return f"{name_builder_artist(media)} - {name_builder_title(media)}"
# Path: tidal_dl_ng/helper/wrapper.py
class WrapperLogger:
fn_print: Callable = None
def __init__(self, fn_print: Callable):
self.fn_print = fn_print
def debug(self, value):
self.fn_print(value)
def warning(self, value):
self.fn_print(value)
def info(self, value):
self.fn_print(value)
def error(self, value):
self.fn_print(value)
def critical(self, value):
self.fn_print(value)
# Path: tidal_dl_ng/metadata.py
class Metadata:
path_file: str = None
title: str = None
album: str = None
albumartist: str = None
artists: [str] = None
copy_right: str = None
tracknumber: int = None
discnumber: int = None
totaldisc: int = None
totaltrack: int = None
date: str = None
composer: [str] = None
isrc: str = None
lyrics: str = None
path_cover: str = None
url_cover: str = None
m: mutagen.mp4.MP4 | mutagen.mp4.MP4 | mutagen.flac.FLAC = None
# TODO: What about videos?
def __init__(
self,
path_file: str,
album: str = "",
title: str = "",
artists: list[str] | None = None,
copy_right: str = "",
tracknumber: int = 0,
discnumber: int = 0,
totaltrack: int = 0,
totaldisc: int = 0,
composer: list[str] | None = None,
isrc: str = "",
albumartist: str = "",
date: str = "",
lyrics: str = "",
path_cover: str = "",
url_cover: str = "",
):
self.path_file = path_file
self.title = title
self.album = album
self.albumartist = albumartist
self.artists = artists
self.copy_right = copy_right
self.tracknumber = tracknumber
self.discnumber = discnumber
self.totaldisc = totaldisc
self.totaltrack = totaltrack
self.date = date
self.composer = composer
self.isrc = isrc
self.lyrics = lyrics
self.path_cover = path_cover
self.url_cover = url_cover
self.m: mutagen.mp4.MP4 | mutagen.flac.FLAC | mutagen.mp3.MP3 = mutagen.File(self.path_file)
def _cover(self) -> bool:
result: bool = False
data_cover: str | bytes = self.cover_data(url=self.url_cover, path_file=self.path_cover)
if data_cover:
if isinstance(self.m, mutagen.flac.FLAC):
flac_cover = flac.Picture()
flac_cover.type = id3.PictureType.COVER_FRONT
flac_cover.data = data_cover
flac_cover.mime = "image/jpeg"
self.m.clear_pictures()
self.m.add_picture(flac_cover)
elif isinstance(self.m, mutagen.mp3.MP3):
self.m.tags.add(APIC(encoding=3, data=data_cover))
elif isinstance(self.m, mutagen.mp4.MP4):
cover_mp4 = mp4.MP4Cover(data_cover)
self.m.tags["covr"] = [cover_mp4]
result = True
return result
def save(self):
if not self.m.tags:
self.m.add_tags()
if isinstance(self.m, mutagen.flac.FLAC):
self.set_flac()
elif isinstance(self.m, mutagen.mp3.MP3):
self.set_mp3()
elif isinstance(self.m, mutagen.mp4.MP4):
self.set_mp4()
self._cover()
self.m.save()
return True
def set_flac(self):
self.m.tags["title"] = self.title
self.m.tags["album"] = self.album
self.m.tags["albumartist"] = self.albumartist
self.m.tags["artist"] = ", ".join(self.artists) if self.artists else ""
self.m.tags["copyright"] = self.copy_right
self.m.tags["tracknumber"] = str(self.tracknumber)
self.m.tags["tracktotal"] = str(self.totaltrack)
self.m.tags["discnumber"] = str(self.discnumber)
self.m.tags["disctotal"] = str(self.totaldisc)
self.m.tags["date"] = self.date
self.m.tags["composer"] = ", ".join(self.composer) if self.composer else ""
self.m.tags["isrc"] = self.isrc
self.m.tags["lyrics"] = self.lyrics
def set_mp3(self):
self.m.tags.add(TIT2(encoding=3, text=self.title))
self.m.tags.add(TALB(encoding=3, text=self.album))
self.m.tags.add(TOPE(encoding=3, text=self.albumartist))
self.m.tags.add(TPE1(encoding=3, text=", ".join(self.artists) if self.artists else ""))
self.m.tags.add(TCOP(encoding=3, text=self.copy_right))
self.m.tags.add(TRCK(encoding=3, text=str(self.tracknumber)))
self.m.tags.add(TRCK(encoding=3, text=self.discnumber))
self.m.tags.add(TDRC(encoding=3, text=self.date))
self.m.tags.add(TCOM(encoding=3, text=", ".join(self.composer) if self.composer else ""))
self.m.tags.add(TSRC(encoding=3, text=self.isrc))
self.m.tags.add(USLT(encoding=3, lang="eng", desc="desc", text=self.lyrics))
def set_mp4(self):
self.m.tags["\xa9nam"] = self.title
self.m.tags["\xa9alb"] = self.album
self.m.tags["aART"] = self.albumartist
self.m.tags["\xa9ART"] = ", ".join(self.artists) if self.artists else ""
self.m.tags["cprt"] = self.copy_right
self.m.tags["trkn"] = [[self.tracknumber, self.totaltrack]]
self.m.tags["disk"] = [[self.discnumber, self.totaldisc]]
# self.m.tags['\xa9gen'] = self.genre
self.m.tags["\xa9day"] = self.date
self.m.tags["\xa9wrt"] = ", ".join(self.composer) if self.composer else ""
self.m.tags["\xa9lyr"] = self.lyrics
def cover_data(self, url: str = None, path_file: str = None) -> str | bytes:
result: str | bytes = ""
if url:
try:
result = requests.get(url, timeout=REQUESTS_TIMEOUT_SEC).content
except Exception as e:
# TODO: Implement propper logging.
print(e)
elif path_file:
try:
with open(path_file, "rb") as f:
result = f.read()
except OSError as e:
# TODO: Implement propper logging.
print(e)
return result
# Path: tidal_dl_ng/model/gui_data.py
class ProgressBars:
item: QtCore.Signal
item_name: QtCore.Signal
list_item: QtCore.Signal
# Path: tidal_dl_ng/model/tidal.py
class StreamManifest:
codecs: str
mime_type: str
urls: [str]
file_extension: str
encryption_type: str | None = None
encryption_key: str | None = None
# Path: tidal_dl_ng/download.py
import base64
import json
import os
import random
import shutil
import tempfile
import time
import ffmpeg
import m3u8
import requests
from collections.abc import Callable
from logging import Logger
from uuid import uuid4
from mpegdash.parser import MPEGDASHParser
from requests.exceptions import HTTPError
from rich.progress import Progress, TaskID
from tidalapi import Album, Mix, Playlist, Session, Track, UserPlaylist, Video
from tidal_dl_ng.config import Settings
from tidal_dl_ng.constants import REQUESTS_TIMEOUT_SEC, CoverDimensions, MediaType, SkipExisting, StreamManifestMimeType
from tidal_dl_ng.helper.decryption import decrypt_file, decrypt_security_token
from tidal_dl_ng.helper.exceptions import MediaMissing, MediaUnknown, UnknownManifestFormat
from tidal_dl_ng.helper.path import check_file_exists, format_path_media, path_file_sanitize
from tidal_dl_ng.helper.tidal import name_builder_item
from tidal_dl_ng.helper.wrapper import WrapperLogger
from tidal_dl_ng.metadata import Metadata
from tidal_dl_ng.model.gui_data import ProgressBars
from tidal_dl_ng.model.tidal import StreamManifest
# TODO: Set appropriate client string and use it for video download.
# https://github.com/globocom/m3u8#using-different-http-clients
class RequestsClient:
def download(
self, uri: str, timeout: int = REQUESTS_TIMEOUT_SEC, headers: dict | None = None, verify_ssl: bool = True
):
if not headers:
headers = {}
o = requests.get(uri, timeout=timeout, headers=headers)
return o.text, o.url
class Download:
settings: Settings = None
session: Session = None
skip_existing: SkipExisting = False
def __init__(self, session: Session, skip_existing: SkipExisting = SkipExisting.Disabled):
self.settings = Settings()
self.session = session
self.skip_existing = skip_existing
def _download(
self,
| fn_logger: Callable, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: smoores-dev/storyteller
# Path: storyteller/synchronize/files.py
CACHE_DIR = f"{DATA_DIR}/cache"
# Path: storyteller/synchronize/files.py
TEXT_DIR = f"{DATA_DIR}/assets/text"
# Path: storyteller/synchronize/audio.py
def get_audio_chapter_filenames(book_name: str):
book_dir = get_audio_directory(book_name)
dirname = get_chapters_path(book_dir)
return sorted([str(Path(dirname, filename)) for filename in os.listdir(dirname)])
# Path: storyteller/synchronize/audio.py
def get_transcriptions(book_name: str):
audio_chapter_filenames = get_audio_chapter_filenames(book_name)
transcription_filenames = [
get_transcription_filename(chapter_filename)
for chapter_filename in audio_chapter_filenames
]
transcriptions: List[whisperx.types.AlignedTranscriptionResult] = []
for transcription_filename in transcription_filenames:
with open(transcription_filename, mode="r") as transcription_file:
transcription = json.load(transcription_file)
transcriptions.append(transcription)
return transcriptions
# Path: storyteller/synchronize/epub.py
class SentenceRange:
id: int
start: float
end: float
audiofile: str
# Path: storyteller/synchronize/epub.py
def create_media_overlay(
base_filename: str,
chapter_filename: str,
sentence_ranges: List[SentenceRange],
):
soup = BeautifulSoup(
"""
<smil xmlns="http://www.w3.org/ns/SMIL" xmlns:epub="http://www.idpf.org/2007/ops" version="3.0">
<body>
</body>
</smil>
""",
"xml",
)
seq = soup.new_tag("seq", id=f"{base_filename}_overlay")
seq["epub:textref"] = f"../{chapter_filename}"
seq["epub:type"] = "chapter"
soup.body.append(seq) # type: ignore
for sentence_range in sentence_ranges:
par = soup.new_tag("par", id=f"sentence{sentence_range.id}")
text = soup.new_tag(
"text", src=f"../{chapter_filename}#sentence{sentence_range.id}"
)
audio = soup.new_tag(
"audio",
src=f"../{get_epub_audio_filename(sentence_range.audiofile)}",
clipBegin=f"{sentence_range.start}s",
clipEnd=f"{sentence_range.end}s",
)
par.append(text)
par.append("\n")
par.append(audio)
par.append("\n")
seq.append(par)
seq.append("\n")
return soup.encode(formatter="minimal")
# Path: storyteller/synchronize/epub.py
@cache
def get_chapter_sentences(chapter: epub.EpubHtml):
soup = BeautifulSoup(chapter.get_body_content(), "html.parser")
textblocks = soup.find_all(
["p", "li", "blockquote", "h1", "h2", "h3", "h4", "h5", "h6"]
)
return [
re.sub(consecutivenewlines, " ", sentence)
for textblock in textblocks
if isinstance(textblock, Tag)
for sentence in sent_tokenize(textblock.get_text())
]
# Path: storyteller/synchronize/epub.py
@cache
def get_chapter_text(chapter: epub.EpubHtml):
soup = BeautifulSoup(chapter.get_body_content(), "html.parser")
return re.sub(consecutivenewlines, " ", soup.get_text())
# Path: storyteller/synchronize/epub.py
def get_epub_audio_filename(audio_filename: str) -> str:
return f"Audio/{os.path.basename(audio_filename)}"
# Path: storyteller/synchronize/epub.py
def get_sentences_with_offsets(text: str):
sentences = sent_tokenize(text)
sentences_with_offsets: list[str] = []
last_sentence_end = 0
for sentence in sentences:
sentence_start = text.find(sentence, last_sentence_end)
if sentence_start > last_sentence_end:
sentences_with_offsets.append(text[last_sentence_end:sentence_start])
sentences_with_offsets.append(sentence)
last_sentence_end = sentence_start + len(sentence)
if len(text) > last_sentence_end:
sentences_with_offsets.append(text[last_sentence_end:])
return sentences_with_offsets
# Path: storyteller/synchronize/epub.py
def read_epub(book_name: str):
book = epub.read_epub(get_epub_filepath(book_name))
for item in book.get_items_of_type(ITEM_DOCUMENT):
if not item.is_chapter():
continue
soup = BeautifulSoup(item.content)
head: Union[Tag, None] = soup.find("head") # type: ignore
if head is not None:
links = head.find_all("link")
for link in links:
item.add_link(
href=link["href"], rel=" ".join(link["rel"]), type=link["type"]
)
return book
# Path: storyteller/synchronize/epub.py
def get_chapters(book: epub.EpubBook) -> List[epub.EpubHtml]:
spine_ids = [item[0] for item in book.spine]
chapters = [cast(epub.EpubHtml, book.get_item_with_id(id)) for id in spine_ids]
return chapters
# Path: storyteller/synchronize/epub.py
def tag_sentences(chapter: epub.EpubHtml):
content = cast(str, chapter.get_content())
soup = BeautifulSoup(content, "html.parser")
body_soup = soup.find("body")
if body_soup is None:
return
if isinstance(body_soup, NavigableString):
return
textblocks = body_soup.find_all(
["p", "li", "blockquote", "h1", "h2", "h3", "h4", "h5", "h6"]
)
start_id = 0
for textblock in textblocks:
if not isinstance(textblock, Tag):
continue
spans = get_textblock_spans(start_id, textblock)
new_content = serialize_spans(soup, spans)
textblock.clear()
textblock.extend(new_content)
try:
start_id = get_last_span_id(spans) + 1
except StopIteration:
pass
chapter.set_content(soup.encode())
# Path: storyteller/synchronize/sync.py
from dataclasses import dataclass
from itertools import groupby
from pathlib import Path
from typing import Any, Callable, Dict, List, TypedDict, Union, cast
from fuzzysearch import Match, find_near_matches
from ebooklib import epub
from mutagen.mp4 import MP4
from mutagen.mp3 import MP3
from .files import CACHE_DIR, TEXT_DIR
from .audio import (
get_audio_chapter_filenames,
get_transcriptions,
)
from .epub import (
SentenceRange,
create_media_overlay,
get_chapter_sentences,
get_chapter_text,
get_epub_audio_filename,
get_sentences_with_offsets,
read_epub,
get_chapters,
tag_sentences,
)
import json
import math
import os
import sys
import whisperx.types
start, audiofile = find_timestamps(
first_match.start + transcription_offset + chapter_offset, transcription
)
if len(sentence_ranges) > 0:
last_audiofile = sentence_ranges[-1].audiofile
if audiofile == last_audiofile:
sentence_ranges[-1].end = start
else:
last_mp4 = (
MP4(last_audiofile)
if last_audiofile.endswith(".mp4")
else MP3(last_audiofile)
)
sentence_ranges[-1].end = last_mp4.info.length
start = 0
elif last_sentence_range is not None:
if audiofile == last_sentence_range.audiofile:
last_sentence_range.end = start
else:
last_mp4 = (
MP4(last_sentence_range.audiofile)
if last_sentence_range.audiofile.endswith(".mp4")
else MP3(last_sentence_range.audiofile)
)
last_sentence_range.end = last_mp4.info.length
start = 0
else:
start = 0
sentence_ranges.append(SentenceRange(sentence_index, start, start, audiofile))
not_found = 0
transcription_window_index = (
get_window_index_from_offset(transcription_window_list, first_match.start)
+ transcription_window_index
)
last_good_transcription_window = transcription_window_index
sentence_index += 1
return sentence_ranges
def interpolate_sentence_ranges(
sentence_ranges: List[SentenceRange],
) -> List[SentenceRange]:
interpolated: List[SentenceRange] = []
for sentence_range in sentence_ranges:
if len(interpolated) == 0:
interpolated.append(sentence_range)
continue
last_sentence_range = interpolated[-1]
count = sentence_range.id - last_sentence_range.id
diff = last_sentence_range.end - last_sentence_range.start
interpolated_length = diff / count
for i in range(1, count):
interpolated_sentence_range = SentenceRange(
last_sentence_range.id + i,
last_sentence_range.start + interpolated_length * i,
last_sentence_range.start + interpolated_length * (i + 1),
last_sentence_range.audiofile,
)
interpolated.append(interpolated_sentence_range)
interpolated.append(sentence_range)
return interpolated
def get_chapter_duration(sentence_ranges: List[SentenceRange]):
duration = 0
for _, file_group in groupby(sentence_ranges, key=lambda r: r.audiofile):
file_group_list = list(file_group)
duration += file_group_list[-1].end - file_group_list[0].start
return duration
@dataclass
class SyncedChapter:
chapter: epub.EpubHtml
sentence_ranges: List[SentenceRange]
audio: List[epub.EpubItem]
def sync_chapter(
start_sentence: int,
transcription: StorytellerTranscription,
chapter: epub.EpubHtml,
transcription_offset: int,
last_sentence_range: Union[SentenceRange, None],
):
chapter_sentences = get_chapter_sentences(chapter)
sentence_ranges = get_sentence_ranges(
start_sentence,
transcription,
chapter_sentences,
transcription_offset,
last_sentence_range,
)
sentence_ranges = interpolate_sentence_ranges(sentence_ranges)
tag_sentences(chapter)
chapter_filepath_length = len(chapter.file_name.split(os.path.sep)) - 1
relative_ups = "../" * chapter_filepath_length
chapter.add_link(
rel="stylesheet",
href=f"{relative_ups}Styles/storyteller-readaloud.css",
type="text/css",
)
audiofiles = set([sentence_range.audiofile for sentence_range in sentence_ranges])
audio_items = []
for audiofile in audiofiles:
epub_audio_filename = get_epub_audio_filename(audiofile)
audio_item = epub.EpubItem(
uid=epub_audio_filename,
| file_name=epub_audio_filename, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zyrant/SPGroup3D
# Path: mmdet3d/version.py
def parse_version_info(version_str):
# Path: mmdet3d/version.py
def parse_version_info(version_str):
# Path: mmdet3d/apis/train.py
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, optional): The seed. Default to None.
device (str, optional): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
# Path: mmdet3d/apis/train.py
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""A function wrapper for launching model training according to cfg.
Because we need different eval_hook in runner. Should be deprecated in the
future.
"""
if cfg.model.type in ['EncoderDecoder3D']:
train_segmentor(
model,
dataset,
cfg,
distributed=distributed,
validate=validate,
timestamp=timestamp,
meta=meta)
else:
train_detector(
model,
dataset,
cfg,
distributed=distributed,
validate=validate,
timestamp=timestamp,
meta=meta)
# Path: mmdet3d/datasets/builder.py
def build_dataset(cfg, default_args=None):
from mmdet3d.datasets.dataset_wrappers import CBGSDataset
from mmdet.datasets.dataset_wrappers import (ClassBalancedDataset,
ConcatDataset, RepeatDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'ConcatDataset':
dataset = ConcatDataset(
[build_dataset(c, default_args) for c in cfg['datasets']],
cfg.get('separate_eval', True))
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'ClassBalancedDataset':
dataset = ClassBalancedDataset(
build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
elif cfg['type'] == 'CBGSDataset':
dataset = CBGSDataset(build_dataset(cfg['dataset'], default_args))
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
elif cfg['type'] in DATASETS._module_dict.keys():
dataset = build_from_cfg(cfg, DATASETS, default_args)
else:
dataset = build_from_cfg(cfg, MMDET_DATASETS, default_args)
return dataset
# Path: mmdet3d/models/builder.py
def build_model(cfg, train_cfg=None, test_cfg=None):
"""A function warpper for building 3D detector or segmentor according to
cfg.
Should be deprecated in the future.
"""
if cfg.type in ['EncoderDecoder3D']:
return build_segmentor(cfg, train_cfg=train_cfg, test_cfg=test_cfg)
else:
return build_detector(cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# Path: mmdet3d/utils/collect_env.py
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__
env_info['MMSegmentation'] = mmseg.__version__
env_info['MMDetection3D'] = mmdet3d.__version__ + '+' + get_git_hash()[:7]
env_info['spconv2.0'] = IS_SPCONV2_AVAILABLE
return env_info
# Path: mmdet3d/utils/logger.py
def get_root_logger(log_file=None, log_level=logging.INFO, name='mmdet3d'):
"""Get root logger and add a keyword filter to it.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmdet3d".
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
name (str, optional): The name of the root logger, also used as a
filter keyword. Defaults to 'mmdet3d'.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name=name, log_file=log_file, log_level=log_level)
# add a logging filter
logging_filter = logging.Filter(name)
logging_filter.filter = lambda record: record.find(name) != -1
return logger
# Path: tools/train.py
import os
import argparse
import copy
import time
import warnings
import mmcv
import torch
import torch.distributed as dist
import importlib
from os import path as osp
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmdet import __version__ as mmdet_version
from mmdet3d import __version__ as mmdet3d_version
from mmdet3d.apis import init_random_seed, train_model
from mmdet3d.datasets import build_dataset
from mmdet3d.models import build_model
from mmdet3d.utils import collect_env, get_root_logger
from mmdet.apis import set_random_seed
from mmseg import __version__ as mmseg_version
from mmdet.utils import setup_multi_processes
from mmdet3d.utils import setup_multi_processes
cfg.resume_from = args.resume_from
if args.auto_resume:
cfg.auto_resume = args.auto_resume
warnings.warn('`--auto-resume` is only supported when mmdet'
'version >= 2.20.0 for 3D detection model or'
'mmsegmentation verision >= 0.21.0 for 3D'
'segmentation model')
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
# specify logger name, if we still use 'mmdet', the output info will be
# filtered and won't be saved in the log_file
# TODO: ugly workaround to judge whether we are training det or seg model
if cfg.model.type in ['EncoderDecoder3D']:
logger_name = 'mmseg'
else:
logger_name = 'mmdet'
logger = get_root_logger(
log_file=log_file, log_level=cfg.log_level, name=logger_name)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
seed = init_random_seed(args.seed)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_model(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
logger.info(f'Model:\n{model}')
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
# in case we use a dataset wrapper
if 'dataset' in cfg.data.train:
val_dataset.pipeline = cfg.data.train.dataset.pipeline
else:
val_dataset.pipeline = cfg.data.train.pipeline
# set test_mode=False here in deep copied config
# which do not affect AP/AR calculation later
# refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa
val_dataset.test_mode = False
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=mmdet_version,
mmseg_version=mmseg_version,
mmdet3d_version=mmdet3d_version,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE # for segmentors
if hasattr(datasets[0], 'PALETTE') else None)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
| meta=meta) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: jdejaegh/irm-kmi-ha
# Path: custom_components/irm_kmi/coordinator.py
class IrmKmiCoordinator(DataUpdateCoordinator):
"""Coordinator to update data from IRM KMI"""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize the coordinator."""
super().__init__(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="IRM KMI weather",
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(minutes=7),
)
self._api_client = IrmKmiApiClient(session=async_get_clientsession(hass))
self._zone = get_config_value(entry, CONF_ZONE)
self._dark_mode = get_config_value(entry, CONF_DARK_MODE)
self._style = get_config_value(entry, CONF_STYLE)
self._config_entry = entry
async def _async_update_data(self) -> ProcessedCoordinatorData:
"""Fetch data from API endpoint.
This is the place to pre-process the data to lookup tables
so entities can quickly look up their data.
"""
if (zone := self.hass.states.get(self._zone)) is None:
raise UpdateFailed(f"Zone '{self._zone}' not found")
try:
# Note: asyncio.TimeoutError and aiohttp.ClientError are already
# handled by the data update coordinator.
async with async_timeout.timeout(10):
api_data = await self._api_client.get_forecasts_coord(
{'lat': zone.attributes[ATTR_LATITUDE],
'long': zone.attributes[ATTR_LONGITUDE]}
)
_LOGGER.debug(f"Observation for {api_data.get('cityName', '')}: {api_data.get('obs', '{}')}")
except IrmKmiApiError as err:
raise UpdateFailed(f"Error communicating with API: {err}")
if api_data.get('cityName', None) in OUT_OF_BENELUX:
# TODO create a repair when this triggers
_LOGGER.info(f"Config state: {self._config_entry.state}")
_LOGGER.error(f"The zone {self._zone} is now out of Benelux and forecast is only available in Benelux."
f"Associated device is now disabled. Move the zone back in Benelux and re-enable to fix "
f"this")
disable_from_config(self.hass, self._config_entry)
issue_registry.async_create_issue(
self.hass,
DOMAIN,
"zone_moved",
is_fixable=True,
severity=issue_registry.IssueSeverity.ERROR,
translation_key='zone_moved',
data={'config_entry_id': self._config_entry.entry_id, 'zone': self._zone},
translation_placeholders={'zone': self._zone}
)
return ProcessedCoordinatorData()
return await self.process_api_data(api_data)
async def async_refresh(self) -> None:
"""Refresh data and log errors."""
await self._async_refresh(log_failures=True, raise_on_entry_error=True)
async def _async_animation_data(self, api_data: dict) -> RadarAnimationData:
"""From the API data passed in, call the API to get all the images and create the radar animation data object.
Frames from the API are merged with the background map and the location marker to create each frame."""
animation_data = api_data.get('animation', {}).get('sequence')
localisation_layer_url = api_data.get('animation', {}).get('localisationLayer')
country = api_data.get('country', '')
if animation_data is None or localisation_layer_url is None or not isinstance(animation_data, list):
return RadarAnimationData()
try:
images_from_api = await self.download_images_from_api(animation_data, country, localisation_layer_url)
except IrmKmiApiError:
_LOGGER.warning(f"Could not get images for weather radar")
return RadarAnimationData()
localisation = images_from_api[0]
images_from_api = images_from_api[1:]
lang = self.hass.config.language if self.hass.config.language in LANGS else 'en'
radar_animation = RadarAnimationData(
hint=api_data.get('animation', {}).get('sequenceHint', {}).get(lang),
unit=api_data.get('animation', {}).get('unit', {}).get(lang),
location=localisation
)
rain_graph = self.create_rain_graph(radar_animation, animation_data, country, images_from_api)
radar_animation['svg_animated'] = rain_graph.get_svg_string()
radar_animation['svg_still'] = rain_graph.get_svg_string(still_image=True)
return radar_animation
async def process_api_data(self, api_data: dict) -> ProcessedCoordinatorData:
"""From the API data, create the object that will be used in the entities"""
return ProcessedCoordinatorData(
current_weather=IrmKmiCoordinator.current_weather_from_data(api_data),
daily_forecast=IrmKmiCoordinator.daily_list_to_forecast(api_data.get('for', {}).get('daily')),
hourly_forecast=IrmKmiCoordinator.hourly_list_to_forecast(api_data.get('for', {}).get('hourly')),
animation=await self._async_animation_data(api_data=api_data),
warnings=self.warnings_from_data(api_data.get('for', {}).get('warning'))
)
async def download_images_from_api(self,
animation_data: list,
country: str,
localisation_layer_url: str) -> tuple[Any]:
"""Download a batch of images to create the radar frames."""
coroutines = list()
coroutines.append(
self._api_client.get_image(localisation_layer_url,
params={'th': 'd' if country == 'NL' or not self._dark_mode else 'n'}))
for frame in animation_data:
if frame.get('uri', None) is not None:
coroutines.append(
self._api_client.get_image(frame.get('uri'), params={'rs': STYLE_TO_PARAM_MAP[self._style]}))
async with async_timeout.timeout(20):
images_from_api = await asyncio.gather(*coroutines)
_LOGGER.debug(f"Just downloaded {len(images_from_api)} images")
return images_from_api
@staticmethod
def current_weather_from_data(api_data: dict) -> CurrentWeatherData:
"""Parse the API data to build a CurrentWeatherData."""
# Process data to get current hour forecast
now_hourly = None
hourly_forecast_data = api_data.get('for', {}).get('hourly')
if not (hourly_forecast_data is None
or not isinstance(hourly_forecast_data, list)
or len(hourly_forecast_data) == 0):
for current in hourly_forecast_data[:2]:
if datetime.now().strftime('%H') == current['hour']:
now_hourly = current
break
# Get UV index
module_data = api_data.get('module', None)
uv_index = None
if not (module_data is None or not isinstance(module_data, list)):
for module in module_data:
if module.get('type', None) == 'uv':
uv_index = module.get('data', {}).get('levelValue')
try:
pressure = float(now_hourly.get('pressure', None)) if now_hourly is not None else None
except TypeError:
pressure = None
try:
wind_speed = float(now_hourly.get('windSpeedKm', None)) if now_hourly is not None else None
except TypeError:
wind_speed = None
try:
wind_gust_speed = float(now_hourly.get('windPeakSpeedKm', None)) if now_hourly is not None else None
except TypeError:
wind_gust_speed = None
try:
temperature = float(api_data.get('obs', {}).get('temp'))
except TypeError:
temperature = None
current_weather = CurrentWeatherData(
condition=CDT_MAP.get((api_data.get('obs', {}).get('ww'), api_data.get('obs', {}).get('dayNight')), None),
temperature=temperature,
wind_speed=wind_speed,
wind_gust_speed=wind_gust_speed,
wind_bearing=now_hourly.get('windDirectionText', {}).get('en') if now_hourly is not None else None,
pressure=pressure,
uv_index=uv_index
)
if api_data.get('country', '') == 'NL':
current_weather['wind_speed'] = api_data.get('obs', {}).get('windSpeedKm')
current_weather['wind_bearing'] = api_data.get('obs', {}).get('windDirectionText', {}).get('en')
return current_weather
@staticmethod
def hourly_list_to_forecast(data: List[dict] | None) -> List[Forecast] | None:
"""Parse data from the API to create a list of hourly forecasts"""
if data is None or not isinstance(data, list) or len(data) == 0:
return None
forecasts = list()
day = datetime.now()
for f in data:
if 'dateShow' in f:
day = day + timedelta(days=1)
hour = f.get('hour', None)
if hour is None:
continue
precipitation_probability = None
if f.get('precipChance', None) is not None:
precipitation_probability = int(f.get('precipChance'))
ww = None
if f.get('ww', None) is not None:
ww = int(f.get('ww'))
forecast = Forecast(
datetime=day.strftime(f'%Y-%m-%dT{hour}:00:00'),
condition=CDT_MAP.get((ww, f.get('dayNight', None)), None),
native_precipitation=f.get('precipQuantity', None),
native_temperature=f.get('temp', None),
native_templow=None,
native_wind_gust_speed=f.get('windPeakSpeedKm', None),
native_wind_speed=f.get('windSpeedKm', None),
precipitation_probability=precipitation_probability,
wind_bearing=f.get('windDirectionText', {}).get('en'),
native_pressure=f.get('pressure', None),
is_daytime=f.get('dayNight', None) == 'd'
)
forecasts.append(forecast)
return forecasts
@staticmethod
def daily_list_to_forecast(data: List[dict] | None) -> List[Forecast] | None:
"""Parse data from the API to create a list of daily forecasts"""
if data is None or not isinstance(data, list) or len(data) == 0:
return None
forecasts = list()
n_days = 0
for (idx, f) in enumerate(data):
precipitation = None
if f.get('precipQuantity', None) is not None:
try:
precipitation = float(f.get('precipQuantity'))
except TypeError:
pass
native_wind_gust_speed = None
if f.get('wind', {}).get('peakSpeed') is not None:
try:
native_wind_gust_speed = int(f.get('wind', {}).get('peakSpeed'))
except TypeError:
pass
is_daytime = f.get('dayNight', None) == 'd'
forecast = IrmKmiForecast(
datetime=(datetime.now() + timedelta(days=n_days)).strftime('%Y-%m-%d')
if is_daytime else datetime.now().strftime('%Y-%m-%d'),
condition=CDT_MAP.get((f.get('ww1', None), f.get('dayNight', None)), None),
native_precipitation=precipitation,
native_temperature=f.get('tempMax', None),
native_templow=f.get('tempMin', None),
native_wind_gust_speed=native_wind_gust_speed,
native_wind_speed=f.get('wind', {}).get('speed'),
precipitation_probability=f.get('precipChance', None),
wind_bearing=f.get('wind', {}).get('dirText', {}).get('en'),
is_daytime=is_daytime,
text_fr=f.get('text', {}).get('fr'),
text_nl=f.get('text', {}).get('nl')
)
forecasts.append(forecast)
if is_daytime or idx == 0:
n_days += 1
return forecasts
def create_rain_graph(self,
radar_animation: RadarAnimationData,
api_animation_data: List[dict],
country: str,
images_from_api: Tuple[bytes],
) -> RainGraph:
"""Create a RainGraph object that is ready to output animated and still SVG images"""
sequence: List[AnimationFrameData] = list()
tz = pytz.timezone(self.hass.config.time_zone)
current_time = datetime.now(tz=tz)
most_recent_frame = None
for idx, item in enumerate(api_animation_data):
frame = AnimationFrameData(
image=images_from_api[idx],
time=datetime.fromisoformat(item.get('time')) if item.get('time', None) is not None else None,
value=item.get('value', 0),
position=item.get('position', 0),
position_lower=item.get('positionLower', 0),
position_higher=item.get('positionHigher', 0)
)
sequence.append(frame)
if most_recent_frame is None and current_time < frame['time']:
most_recent_frame = idx - 1 if idx > 0 else idx
radar_animation['sequence'] = sequence
radar_animation['most_recent_image_idx'] = most_recent_frame
satellite_mode = self._style == OPTION_STYLE_SATELLITE
if country == 'NL':
image_path = "custom_components/irm_kmi/resources/nl.png"
bg_size = (640, 600)
else:
image_path = (f"custom_components/irm_kmi/resources/be_"
f"{'satellite' if satellite_mode else 'black' if self._dark_mode else 'white'}.png")
bg_size = (640, 490)
return RainGraph(radar_animation, image_path, bg_size,
dark_mode=self._dark_mode,
tz=self.hass.config.time_zone)
def warnings_from_data(self, warning_data: list | None) -> List[WarningData] | None:
"""Create a list of warning data instances based on the api data"""
if warning_data is None or not isinstance(warning_data, list) or len(warning_data) == 0:
return None
result = list()
for data in warning_data:
try:
warning_id = int(data.get('warningType', {}).get('id'))
start = datetime.fromisoformat(data.get('fromTimestamp', None))
end = datetime.fromisoformat(data.get('toTimestamp', None))
except TypeError | ValueError:
# Without this data, the warning is useless
continue
try:
level = int(data.get('warningLevel'))
except TypeError:
level = None
result.append(
WarningData(
slug=SLUG_MAP.get(warning_id, 'unknown'),
id=warning_id,
level=level,
friendly_name=data.get('warningType', {}).get('name', {}).get(self.hass.config.language),
text=data.get('text', {}).get(self.hass.config.language),
starts_at=start,
ends_at=end
)
)
return result if len(result) > 0 else None
# Path: custom_components/irm_kmi/data.py
class CurrentWeatherData(TypedDict, total=False):
"""Class to hold the currently observable weather at a given location"""
condition: str | None
temperature: float | None
wind_speed: float | None
wind_gust_speed: float | None
wind_bearing: float | str | None
uv_index: float | None
pressure: float | None
# Path: custom_components/irm_kmi/data.py
class IrmKmiForecast(Forecast):
"""Forecast class with additional attributes for IRM KMI"""
# TODO: add condition_2 as well and evolution to match data from the API?
# TODO: remove the _fr and _nl to have only one 'text' attribute
text_fr: str | None
text_nl: str | None
# Path: tests/conftest.py
def get_api_data(fixture: str) -> dict:
return json.loads(load_fixture(fixture))
# Path: tests/test_coordinator.py
from datetime import datetime, timedelta
from freezegun import freeze_time
from homeassistant.components.weather import (ATTR_CONDITION_CLOUDY,
ATTR_CONDITION_PARTLYCLOUDY,
ATTR_CONDITION_RAINY, Forecast)
from homeassistant.core import HomeAssistant
from pytest_homeassistant_custom_component.common import MockConfigEntry
from custom_components.irm_kmi.coordinator import IrmKmiCoordinator
from custom_components.irm_kmi.data import CurrentWeatherData, IrmKmiForecast
from tests.conftest import get_api_data
async def test_jules_forgot_to_revert_update_interval_before_pushing(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
) -> None:
coordinator = IrmKmiCoordinator(hass, mock_config_entry)
assert timedelta(minutes=5) <= coordinator.update_interval
@freeze_time(datetime.fromisoformat('2024-01-12T07:10:00'))
async def test_warning_data(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry
) -> None:
api_data = get_api_data("be_forecast_warning.json")
coordinator = IrmKmiCoordinator(hass, mock_config_entry)
result = coordinator.warnings_from_data(api_data.get('for', {}).get('warning'))
assert isinstance(result, list)
assert len(result) == 2
first = result[0]
assert first.get('starts_at').replace(tzinfo=None) < datetime.now()
assert first.get('ends_at').replace(tzinfo=None) > datetime.now()
assert first.get('slug') == 'fog'
assert first.get('friendly_name') == 'Fog'
assert first.get('id') == 7
assert first.get('level') == 1
@freeze_time(datetime.fromisoformat('2023-12-26T18:30:00'))
def test_current_weather_be() -> None:
api_data = get_api_data("forecast.json")
result = IrmKmiCoordinator.current_weather_from_data(api_data)
expected = CurrentWeatherData(
| condition=ATTR_CONDITION_CLOUDY, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: v3ucn/Bert-vits2-V2.2
# Path: tools/sentence.py
def split_by_language(text: str, target_languages: list = None) -> list:
pattern = (
r"[\!\"\#\$\%\&\'\(\)\*\+\,\-\.\/\:\;\<\>\=\?\@\[\]\{\}\\\\\^\_\`"
r"\!?\。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」"
r"『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘\'\‛\“\”\„\‟…‧﹏.]+"
)
sentences = re.split(pattern, text)
pre_lang = ""
start = 0
end = 0
sentences_list = []
if target_languages is not None:
sorted_target_languages = sorted(target_languages)
if sorted_target_languages in [["en", "zh"], ["en", "ja"], ["en", "ja", "zh"]]:
new_sentences = []
for sentence in sentences:
new_sentences.extend(split_alpha_nonalpha(sentence))
sentences = new_sentences
for sentence in sentences:
if check_is_none(sentence):
continue
lang = classify_language(sentence, target_languages)
end += text[end:].index(sentence)
if pre_lang != "" and pre_lang != lang:
sentences_list.append((text[start:end], pre_lang))
start = end
end += len(sentence)
pre_lang = lang
sentences_list.append((text[start:], pre_lang))
return sentences_list
# Path: infer.py
def get_net_g(model_path: str, version: str, device: str, hps):
def get_text(text, language_str, hps, device):
def infer(
text,
emotion,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
sid,
language,
hps,
net_g,
device,
reference_audio=None,
skip_start=False,
skip_end=False,
):
def infer_multilang(
text,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
sid,
language,
hps,
net_g,
device,
reference_audio=None,
emotion=None,
skip_start=False,
skip_end=False,
):
# Path: config.py
class Resample_config:
class Preprocess_text_config:
class Bert_gen_config:
class Emo_gen_config:
class Train_ms_config:
class Webui_config:
class Server_config:
class Translate_config:
class Config:
def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
transcription_path: str,
cleaned_path: str,
train_path: str,
val_path: str,
config_path: str,
val_per_lang: int = 5,
max_val_total: int = 10000,
clean: bool = True,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
config_path: str,
num_processes: int = 2,
device: str = "cuda",
use_multi_device: bool = False,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
config_path: str,
num_processes: int = 2,
device: str = "cuda",
use_multi_device: bool = False,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
config_path: str,
env: Dict[str, any],
base: Dict[str, any],
model: str,
num_workers: int,
spec_cache: bool,
keep_ckpts: int,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self,
device: str,
model: str,
config_path: str,
language_identification_library: str,
port: int = 7860,
share: bool = False,
debug: bool = False,
):
def from_dict(cls, dataset_path: str, data: Dict[str, any]):
def __init__(
self, models: List[Dict[str, any]], port: int = 5000, device: str = "cuda"
):
def from_dict(cls, data: Dict[str, any]):
def __init__(self, app_key: str, secret_key: str):
def from_dict(cls, data: Dict[str, any]):
def __init__(self, config_path: str):
# Path: tools/translate.py
def translate(Sentence: str, to_Language: str = "jp", from_Language: str = ""):
"""
:param Sentence: 待翻译语句
:param from_Language: 待翻译语句语言
:param to_Language: 目标语言
:return: 翻译后语句 出错时返回None
常见语言代码:中文 zh 英语 en 日语 jp
"""
appid = config.translate_config.app_key
key = config.translate_config.secret_key
if appid == "" or key == "":
return "请开发者在config.yml中配置app_key与secret_key"
url = "https://fanyi-api.baidu.com/api/trans/vip/translate"
texts = Sentence.splitlines()
outTexts = []
for t in texts:
if t != "":
# 签名计算 参考文档 https://api.fanyi.baidu.com/product/113
salt = str(random.randint(1, 100000))
signString = appid + t + salt + key
hs = hashlib.md5()
hs.update(signString.encode("utf-8"))
signString = hs.hexdigest()
if from_Language == "":
from_Language = "auto"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"q": t,
"from": from_Language,
"to": to_Language,
"appid": appid,
"salt": salt,
"sign": signString,
}
# 发送请求
try:
response = requests.post(
url=url, data=payload, headers=headers, timeout=3
)
response = response.json()
if "trans_result" in response.keys():
result = response["trans_result"][0]
if "dst" in result.keys():
dst = result["dst"]
outTexts.append(dst)
except Exception:
return Sentence
else:
outTexts.append(t)
return "\n".join(outTexts)
# Path: webui.py
import os
import logging
import re_matching
import torch
import utils
import gradio as gr
import webbrowser
import numpy as np
import librosa
from tools.sentence import split_by_language
from infer import infer, latest_version, get_net_g, infer_multilang
from config import config
from tools.translate import translate
language=language,
hps=hps,
net_g=net_g,
device=device,
skip_start=skip_start,
skip_end=skip_end,
)
audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio)
audio_list.append(audio16bit)
silence = np.zeros((int)(44100 * interval_between_para), dtype=np.int16)
audio_list.append(silence)
else:
for idx, p in enumerate(para_list):
skip_start = idx != 0
skip_end = idx != len(para_list) - 1
audio_list_sent = []
sent_list = re_matching.cut_sent(p)
for idx, s in enumerate(sent_list):
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(sent_list) - 1) and skip_end
audio = infer(
s,
reference_audio=reference_audio,
emotion=emotion,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
sid=speaker,
language=language,
hps=hps,
net_g=net_g,
device=device,
skip_start=skip_start,
skip_end=skip_end,
)
audio_list_sent.append(audio)
silence = np.zeros((int)(44100 * interval_between_sent))
audio_list_sent.append(silence)
if (interval_between_para - interval_between_sent) > 0:
silence = np.zeros(
(int)(44100 * (interval_between_para - interval_between_sent))
)
audio_list_sent.append(silence)
audio16bit = gr.processing_utils.convert_to_16_bit_wav(
np.concatenate(audio_list_sent)
) # 对完整句子做音量归一
audio_list.append(audio16bit)
audio_concat = np.concatenate(audio_list)
return ("Success", (44100, audio_concat))
def tts_fn(
text: str,
speaker,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
language,
reference_audio,
emotion,
prompt_mode,
):
if prompt_mode == "Audio prompt":
if reference_audio == None:
return ("Invalid audio prompt", None)
else:
reference_audio = load_audio(reference_audio)[1]
else:
reference_audio = None
audio_list = []
if language == "mix":
bool_valid, str_valid = re_matching.validate_text(text)
if not bool_valid:
return str_valid, (
hps.data.sampling_rate,
np.concatenate([np.zeros(hps.data.sampling_rate // 2)]),
)
result = []
for slice in re_matching.text_matching(text):
_speaker = slice.pop()
temp_contant = []
temp_lang = []
for lang, content in slice:
if "|" in content:
temp = []
temp_ = []
for i in content.split("|"):
if i != "":
temp.append([i])
temp_.append([lang])
else:
temp.append([])
temp_.append([])
temp_contant += temp
temp_lang += temp_
else:
if len(temp_contant) == 0:
temp_contant.append([])
temp_lang.append([])
temp_contant[-1].append(content)
temp_lang[-1].append(lang)
for i, j in zip(temp_lang, temp_contant):
result.append([*zip(i, j), _speaker])
for i, one in enumerate(result):
skip_start = i != 0
skip_end = i != len(result) - 1
_speaker = one.pop()
idx = 0
while idx < len(one):
text_to_generate = []
lang_to_generate = []
while True:
lang, content = one[idx]
temp_text = [content]
if len(text_to_generate) > 0:
text_to_generate[-1] += [temp_text.pop(0)]
lang_to_generate[-1] += [lang]
if len(temp_text) > 0:
| text_to_generate += [[i] for i in temp_text] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: d-krupke/CP-SAT-Log-Analyzer
# Path: cpsat_log_parser/blocks/search_progress.py
class SearchProgressBlock(LogBlock):
def __init__(self, lines: typing.List[str]) -> None:
lines = [line.strip() for line in lines if line.strip()]
if not lines:
raise ValueError("No lines to parse")
if not self.matches(lines):
raise ValueError("Lines do not match SearchProgressBlock")
self.lines = lines
@staticmethod
def matches(lines: typing.List[str]) -> bool:
if not lines:
return False
return lines[0].strip().lower().startswith("Starting search".lower())
def _parse_events(
self,
) -> typing.List[typing.Union[BoundEvent, ObjEvent, ModelEvent]]:
"""
Parse the log file into a list of BoundEvent and ObjEvent.
"""
events = []
for line in self.lines:
obj_event = ObjEvent.parse(line)
if obj_event:
events.append(obj_event)
continue
bound_event = BoundEvent.parse(line)
if bound_event:
events.append(bound_event)
continue
model_event = ModelEvent.parse(line)
if model_event:
events.append(model_event)
continue
return events
def get_presolve_time(self) -> float:
# first line looks like this "Starting search at 16.74s with 24 workers."
m = re.match(
r"Starting [Ss]earch at (?P<time>\d+\.\d+s) with \d+ workers.",
self.lines[0],
)
if m:
return parse_time(m.group("time"))
raise ValueError(f"Could not parse presolve time from '{self.lines[0]}'")
def get_title(self) -> str:
return "Search progress:"
def get_help(self) -> typing.Optional[str]:
return """
The search progress log is an essential element of the overall log, crucial for identifying performance bottlenecks. It clearly demonstrates the solver's progression over time and pinpoints where it faces significant challenges. It is important to discern whether the upper or lower bounds are causing issues, or if the solver initially finds a near-optimal solution but struggles to minimize a small remaining gap.
The structure of the log entries is standardized as follows:
`EVENT NAME\t|\tTIME\t|\tBEST SOLUTION\t|\tRANGE OF THE SEARCH\t|\tCOMMENT`
For instance, an event marked `#2` indicates the discovery of the second solution. Here, you will observe an improvement in the `BEST SOLUTION` metric. A notation like `best:16` confirms that the solver has found a solution with a value of 16.
An event with `#Bound` denotes an enhancement in the bound, as seen by a reduction in the `RANGE OF THE SEARCH`. A detail such as `next:[7,14]` signifies that the solver is now focused on finding a solution valued between 7 and 14.
The `COMMENT` section provides essential information about the strategies that led to these improvements.
Events labeled `#Model` signal modifications to the model, such as fixing certain variables.
To fully grasp the nuances, zooming into the plot is necessary, especially since the initial values can be quite large. A thorough examination of which sections of the process converge quickest is crucial for a comprehensive understanding.
"""
def gap_as_plotly(self) -> typing.Optional[go.Figure]:
gap_events = [
e
for e in self._parse_events()
if isinstance(e, BoundEvent) or isinstance(e, ObjEvent)
]
def is_valid_gap(gap):
if gap is None:
return False
if not math.isfinite(gap):
return False
return True
gaps = [(e.time, e.get_gap()) for e in gap_events if is_valid_gap(e.get_gap())]
fig = go.Figure()
if not gap_events:
return None
# add gaps
fig.add_trace(
go.Scatter(
x=[t for t, _ in gaps],
y=[gap for _, gap in gaps],
mode="lines+markers",
line=dict(color="purple"),
name="Gap",
hovertext=[e.msg for e in gap_events],
)
)
# make the x-axis start at 0
fig.update_xaxes(range=[0, 1.01 * gaps[-1][0]])
max_gap = max(gap for _, gap in gaps if gap is not None)
# make the y-axis start at 0
fig.update_yaxes(range=[-1, min(300, 1.01 * max_gap)])
fig.update_layout(
title="Optimality Gap",
xaxis_title="Time (s)",
yaxis_title="Gap (%)",
legend_title="Legend",
font=dict(family="Courier New, monospace", size=18, color="RebeccaPurple"),
)
return fig
def model_changes_as_plotly(self) -> typing.Optional[go.Figure]:
"""
Plot the model changes in percent over time.
"""
model_events = [e for e in self._parse_events() if isinstance(e, ModelEvent)]
fig = go.Figure()
if not model_events:
return None
# add number of vars
fig.add_trace(
go.Scatter(
x=[e.time for e in model_events],
y=[100 * (e.vars_remaining / e.vars) for e in model_events],
mode="lines+markers",
line=dict(color="green"),
name="Variables",
hovertext=[e.msg for e in model_events],
)
)
# add number of constraints
fig.add_trace(
go.Scatter(
x=[e.time for e in model_events],
y=[100 * (e.constr_remaining / e.constr) for e in model_events],
mode="lines+markers",
line=dict(color="orange"),
name="Constraints",
hovertext=[e.msg for e in model_events],
)
)
# make the x-axis start at 0
fig.update_xaxes(range=[0, 1.01 * model_events[-1].time])
# make the y-axis range from 0 to 100
fig.update_yaxes(range=[0, 101])
fig.update_layout(
title="Model changes",
xaxis_title="Time (s)",
yaxis_title="Remaining (%)",
legend_title="Legend",
font=dict(family="Courier New, monospace", size=18, color="RebeccaPurple"),
)
return fig
def as_plotly(self) -> typing.Optional[go.Figure]:
"""
Plot the progress of the solver.
"""
events = self._parse_events()
obj_events = [e for e in events if isinstance(e, ObjEvent)]
bound_events = [e for e in events if isinstance(e, BoundEvent)]
fig = go.Figure()
if not obj_events and not bound_events:
return None
max_time = max([e.time for e in bound_events + obj_events])
# make sure that both bounds and objs have a value at max_time
if obj_events and obj_events[-1].time < max_time:
if bound_events[-1].obj is None:
# Should nearly never happen
obj_events.append(
ObjEvent(
time=max_time,
obj=obj_events[-1].obj,
bound=bound_events[-1].bound,
msg="",
)
)
else:
obj_events.append(
ObjEvent(
time=max_time,
obj=bound_events[-1].obj,
bound=bound_events[-1].bound,
msg="",
)
)
if bound_events and bound_events[-1].time < max_time:
bound_events.append(
BoundEvent(
time=max_time,
obj=obj_events[-1].obj,
bound=obj_events[-1].bound,
msg="",
)
)
# plot the bounds over time. Add the comment as hover text
fig.add_trace(
go.Scatter(
x=[b.time for b in bound_events],
y=[b.bound for b in bound_events],
mode="lines+markers",
line=dict(color="cyan"),
name="Bound",
hovertext=[b.msg for b in bound_events],
)
)
# plot the objective values over time. Add the comment as hover text
fig.add_trace(
go.Scatter(
x=[o.time for o in obj_events],
y=[o.obj for o in obj_events],
mode="lines+markers",
line=dict(color="red"),
name="Objective",
hovertext=[o.msg for o in obj_events],
)
)
# make the x-axis start at 0
fig.update_xaxes(range=[0, 1.01 * max_time])
fig.update_layout(
title="Search progress",
xaxis_title="Time (s)",
yaxis_title="Objective",
legend_title="Legend",
font=dict(family="Courier New, monospace", size=18, color="RebeccaPurple"),
)
return fig
# Path: cpsat_log_parser/blocks/solver.py
class SolverBlock(LogBlock):
def __init__(self, lines: typing.List[str]) -> None:
super().__init__(lines)
def _parse_parameters(self, line: str) -> typing.Dict:
"""
The parameters line can look like this:
"Parameters: log_search_progress: true use_timetabling_in_no_overlap_2d: true use_energetic_reasoning_in_no_overlap_2d: true use_pairwise_reasoning_in_no_overlap_2d: true"
"""
parameters = {}
line = line[len("Parameters:") :]
for match in re.finditer(r"(?P<key>\w+): (?P<value>[^ ]+)", line):
parameters[match.group("key")] = match.group("value")
return parameters
def get_title(self) -> str:
return "Solver Information"
def get_help(self) -> str:
return """This block contains basic information about the solver.
As CP-SAT is still under active development and makes serious improvements with every release, it is important to know which version of the solver was used.
The number of workers, i.e., the level of parallelism, is also important to know.
CP-SAT is a portfolio solver and the higher the number of workers, the more strategies are used.
You can find an overview of the different tiers activated by the number of workers in the [CP-SAT documentation](https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers).
While you should be careful with tinkering with the parameters (they have sensible defaults), it is still good to know which parameters were used.
All of these information are actually already shown in the overview.
"""
@staticmethod
def matches(lines: typing.List[str]) -> bool:
if not lines:
return False
return lines[0].strip().startswith("Starting CP-SAT solver")
def get_parameters(self) -> typing.Dict:
for line in self.lines:
if line.startswith("Parameters:"):
return self._parse_parameters(line)
raise ValueError("No parameters found")
def get_number_of_workers(self) -> int:
# the line looks like this: "Setting number of workers to 24"
for line in self.lines:
if line.startswith("Setting number of workers to"):
return int(line.strip().split(" ")[-1])
# If `num_search_workers` is set, the number of workers is not shown in the log.
if "num_search_workers" in self.get_parameters():
return int(self.get_parameters()["num_search_workers"])
raise ValueError("No number of workers found")
def get_version(self) -> str:
# the line looks like this: "Starting CP-SAT solver v9.7.2996"
for line in self.lines:
if line.startswith("Starting CP-SAT solver"):
return line.strip().split(" ")[-1]
raise ValueError("No version found")
def get_parsed_version(self) -> typing.Tuple[int, int, int]:
# the line looks like this: "Starting CP-SAT solver v9.7.2996"
version = self.get_version()[1:]
major, minor, patch = version.split(".")
return int(major), int(minor), int(patch)
# Path: cpsat_log_parser/blocks/solver_response.py
class ResponseBlock(LogBlock):
def __init__(self, lines: typing.List[str]) -> None:
super().__init__(lines)
@staticmethod
def matches(lines: typing.List[str]) -> bool:
if not lines:
return False
return lines[0].startswith("CpSolverResponse")
def get_title(self) -> str:
return "CpSolverResponse"
def to_dict(self) -> dict:
d = {}
for line in self.lines:
if line.startswith("CpSolverResponse"):
continue
key, value = line.split(":")
key = key.strip()
value = value.strip()
if key == "status":
value = value.split(" ")[0]
d[key] = value
return d
def get_gap(self):
vals = self.to_dict()
try:
obj = float(vals["objective"])
bound = float(vals["best_bound"])
except TypeError:
return None
except ValueError:
return None
return 100 * (abs(obj - bound) / max(1, abs(obj)))
def to_pandas(self) -> pd.DataFrame:
return pd.DataFrame([self.to_dict()])
def get_help(self) -> typing.Optional[str]:
return """
This final block of the log contains a summary by the solver.
Here you find the most important information, such as how successful the search was.
You can find the original documentation [here](https://github.com/google/or-tools/blob/8768ed7a43f8899848effb71295a790f3ecbe2f2/ortools/sat/cp_model.proto#L720).
"""
# Path: cpsat_log_parser/blocks/initial_model.py
class InitialModelBlock(LogBlock):
def __init__(self, lines: typing.List[str]) -> None:
super().__init__(lines)
if not self.lines:
raise ValueError("No lines given")
if not self.matches(self.lines):
raise ValueError("Lines do not match")
@staticmethod
def matches(lines: typing.List[str]) -> bool:
if not lines:
return False
if re.match(r"Initial (satisfaction|optimization) model", lines[0]):
return True
return False
def is_optimization(self) -> bool:
if not self.lines:
return False
return self.lines[0].startswith("Initial optimization model")
def get_title(self) -> str:
return "Initial Optimization Model"
def get_model_fingerprint(self) -> str:
return self.lines[0].split("model_fingerprint: ")[1].strip(")")
def get_num_variables(self) -> int:
return int(
self.lines[1]
.split("#Variables: ")[1]
.strip()
.split(" ")[0]
.replace("'", "")
)
def get_num_constraints(self) -> int:
n = 0
for line in self.lines:
if line.startswith("#k"):
# "#kNoOverlap2D: 1 (#rectangles: 24)"
# "#kInterval: 48"
n += int(line.split(":")[1].strip().split(" ")[0].replace("'", ""))
return n
def get_help(self) -> typing.Optional[str]:
return """
This block gives an overview of the model before presolve.
It contains the number of variables and constraints, as well as coefficients and domains.
`- 200 in [0,199]` will indicate that there are 200 variables with domain `[0,199]`, i.e., values between 0 and 199.
`#kLinearN: 3'000 (#terms: 980'948)` indicates that there are 3000 linear constraints with 980'948 coefficients.`
It is useful to compare this to the model after presolve and also if this matches your expectations.
The presolve-phase may reformulate your model extensively to make it more efficient.
"""
# Path: _app/overview.py
import streamlit as st
from cpsat_log_parser.blocks import (
SearchProgressBlock,
SolverBlock,
ResponseBlock,
InitialModelBlock,
)
def show_overview(parser):
st.subheader("Overview", divider=True)
if parser.comments:
with st.chat_message("user"):
# escape markdown to prevent XSS
comment = "\n".join(parser.comments)
comment = comment.replace("\\", "")
comment = comment.replace("[", "\\[*")
comment = comment.replace("]", "*\\]")
st.write(comment)
try:
solver_block = parser.get_block_of_type(SolverBlock)
initial_model_block = parser.get_block_of_type(InitialModelBlock)
search_progress_block = parser.get_block_of_type(SearchProgressBlock)
response_block = parser.get_block_of_type(ResponseBlock)
col1, col2 = st.columns(2)
major, minor, patch = solver_block.get_parsed_version()
if major < 9 or (major == 9 and minor < 8):
col1.metric(
label="CP-SAT Version",
value=solver_block.get_version(),
help="CP-SAT has seen significant performance improvements over the last years. Make sure to use the latest version.",
delta="outdated",
delta_color="inverse",
)
else:
col1.metric(
label="CP-SAT Version",
value=solver_block.get_version(),
help="CP-SAT has seen significant performance improvements over the last years. Make sure to use the latest version.",
)
col2.metric(
label="Number of workers",
value=solver_block.get_number_of_workers(),
help="CP-SAT has different parallelization tiers, triggered by the number of workers. More workers can improve performance. Fine more information [here](https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers)",
)
# https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers
# print all parameters (key: value)
if solver_block.get_parameters():
md = "*CP-SAT was setup with the following parameters:*\n"
st.markdown(md)
st.json(solver_block.get_parameters())
st.markdown(
"*You can find more information about the parameters [here](https://github.com/google/or-tools/blob/stable/ortools/sat/sat_parameters.proto).*"
)
col1, col2, col3 = st.columns(3)
response = response_block.to_dict()
col1.metric(
label="Status",
| value=response["status"], |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: MMC-K/multimodal_understanding
# Path: index_scorer.py
class FaissScorerExhaustiveGPU(object):
_NEED_TO_SET_CANDIDATES=False
def __init__(self,
fvec_root,
nprobe=1,
gpu=0,
**kwargs,
) -> None:
self.gpu = gpu
self.fpath_list = list(sorted(list(glob.glob(os.path.join(fvec_root, "*")))))
self.index = self.load_index(gpu)
self.index.nprobe = nprobe
def load_index(self, fvec_root, gpu=0):
# gpu resources
res = faiss.StandardGpuResources()
logger.info('loading fvecs...')
data = [fvecs_read(path) for path in self.fpath_list]
d = data[0].shape[-1]
logger.info('vector dim: {}'.format(d))
index_flat = faiss.IndexFlatIP(d)
index = faiss.index_cpu_to_gpu(res, gpu, index_flat)
logger.info('adding index...')
for ds in data:
index.add(ds)
return index
def get_topk(self, query_vec, k=4):
return self.index.search(query_vec, k)
# Path: index_scorer.py
class FaissScorerExhaustiveMultiGPU(object):
_NEED_TO_SET_CANDIDATES=False
def __init__(self,
fvec_root,
nprobe=1,
gpu_list=None,
**kwargs,
) -> None:
self.fpath_list = list(sorted(list(glob.glob(os.path.join(fvec_root, "*")))))
self.gpu_list = gpu_list
if self.gpu_list is None:
self.gpu_list = list(range(faiss.get_num_gpus()))
self.index = self.load_index(fvec_root)
self.index.nprobe = nprobe
def load_index(self, fvec_root):
logger.info('loading fvecs...')
logger.info(self.fpath_list)
data = [fvecs_read(path) for path in self.fpath_list]
data = np.concatenate(tuple(data), axis=0)
d = data.shape[-1]
logger.info('vector dim: {}'.format(d))
index_flat = faiss.IndexFlatIP(d)
gmco = faiss.GpuMultipleClonerOptions()
gmco.shard = True
index = faiss.index_cpu_to_gpus_list(index_flat, gmco, self.gpu_list)
logger.info('adding index...')
index.add(data)
return index
def get_topk(self, query_vec, k=4):
return self.index.search(query_vec, k)
# Path: index_scorer.py
class FaissScorer(FaissScorerBase):
def __init__(self,
index_path,
fvec_root="",
proportion_for_training=1.0,
index_str="IVF65536,Flat",
nprobe=4,
**kwargs,
) -> None:
super(FaissScorer, self).__init__(fvec_root)
self.index_path=index_path
self.proportion_for_training = proportion_for_training
self.index = self.load_index(index_str)
self.index.nprobe = nprobe
def load_index(self, index_str="IVF65536,Flat"):
if not os.path.isfile(self.index_path):
data = self.load_data(self.proportion_for_training)
d = data.shape[-1]
index = faiss.index_factory(d, index_str, faiss.METRIC_INNER_PRODUCT)
logger.info('training index...')
index.train(data)
logger.info('loading fvecs...')
data = self.load_data()
logger.info('adding index...')
index.add(data)
faiss.write_index(index, self.index_path)
return faiss.read_index(self.index_path)
def get_topk(self, query_vec, k=4):
return self.index.search(query_vec, k)
# Path: data_utils.py
class DatasetForImages(Dataset):
def __init__(
self,
file_path: str,
image_tokenizer: ViTFeatureExtractor,
shard_idx: int=0,
num_shards: int=1,
image_root_dir=None,
):
super().__init__()
self.file_path = file_path
self.image_tokenizer = image_tokenizer
self.image_root_dir=image_root_dir
logger.info("loading dataset...")
self.data = [
item for item in csv.DictReader(
open(file_path, "r"),
delimiter="\t",
quoting=csv.QUOTE_NONE,
fieldnames=['path', 'image_url']
)
]
self.shard_idx = shard_idx
if num_shards > 1:
self.data = get_list(self.data, num_shards, shard_idx)
logger.info("{} examples was loaded.".format(len(self.data)))
def __getitem__(self, index):
sample = self.data[index]
path = sample["path"]
if self.image_root_dir is not None:
path = os.path.join(self.image_root_dir, path)
image = Image.open(path).convert("RGB")
image_feature = self.image_tokenizer(images=image, return_tensors="pt")
return {
"pixel_values": image_feature["pixel_values"],
}
def __len__(self):
return len(self.data)
def get_collate_fn(self):
def collate_fn(samples):
if len(samples) == 0:
return {}
return {
"pixel_values": default_collate([s["pixel_values"][0] for s in samples])
}
return collate_fn
# Path: modeling_encoder.py
class VisionT5SimpleBiEncoder(BiEncoderBase):
_ENCODER_TYPE='biencoder'
def __init__(self,
args=None,
vision_encoder=None,
language_encoder=None):
super(VisionT5SimpleBiEncoder, self).__init__(
args=args,
vision_encoder=vision_encoder,
language_encoder=language_encoder
)
def load_weight_from_args(self, args):
self.vision_encoder = ViTModel.from_pretrained(args.vision_model)
self.language_encoder = T5EncoderSimple.from_pretrained(args.language_model)
@classmethod
def from_pretrained(cls, *args, **kwargs):
root_path = args[0]
enc_path_q = os.path.join(root_path, "vision")
args_q = copy.deepcopy(list(args))
args_q[0] = enc_path_q
vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)
enc_path_k = os.path.join(root_path, "language")
args_k = copy.deepcopy(list(args))
args_k[0] = enc_path_k
language_encoder = T5EncoderSimple.from_pretrained(*tuple(args_k), **kwargs)
return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)
# Path: modeling_encoder.py
class VisionT5MeanBiEncoder(BiEncoderBase):
_ENCODER_TYPE='biencoder'
def __init__(self,
args=None,
vision_encoder=None,
language_encoder=None):
super(VisionT5MeanBiEncoder, self).__init__(
args=args,
vision_encoder=vision_encoder,
language_encoder=language_encoder
)
def load_weight_from_args(self, args):
self.vision_encoder = ViTModel.from_pretrained(args.vision_model)
self.language_encoder = T5EncoderMean.from_pretrained(args.language_model)
@classmethod
def from_pretrained(cls, *args, **kwargs):
root_path = args[0]
enc_path_q = os.path.join(root_path, "vision")
args_q = copy.deepcopy(list(args))
args_q[0] = enc_path_q
vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)
enc_path_k = os.path.join(root_path, "language")
args_k = copy.deepcopy(list(args))
args_k[0] = enc_path_k
language_encoder = T5EncoderMean.from_pretrained(*tuple(args_k), **kwargs)
return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)
# Path: modeling_encoder.py
class VisionT5SimpleBiEncoderHN(BiEncoderBaseHN):
_ENCODER_TYPE='biencoder'
def __init__(self,
args=None,
vision_encoder=None,
language_encoder=None):
super(VisionT5SimpleBiEncoderHN, self).__init__(
args=args,
vision_encoder=vision_encoder,
language_encoder=language_encoder
)
def load_weight_from_args(self, args):
self.vision_encoder = ViTModel.from_pretrained(args.vision_model)
self.language_encoder = T5EncoderSimple.from_pretrained(args.language_model)
@classmethod
def from_pretrained(cls, *args, **kwargs):
root_path = args[0]
enc_path_q = os.path.join(root_path, "vision")
args_q = copy.deepcopy(list(args))
args_q[0] = enc_path_q
vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)
enc_path_k = os.path.join(root_path, "language")
args_k = copy.deepcopy(list(args))
args_k[0] = enc_path_k
language_encoder = T5EncoderSimple.from_pretrained(*tuple(args_k), **kwargs)
return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)
# Path: modeling_encoder.py
class VisionT5MeanBiEncoderHN(BiEncoderBaseHN):
_ENCODER_TYPE='biencoder'
def __init__(self,
args=None,
vision_encoder=None,
language_encoder=None):
super(VisionT5MeanBiEncoderHN, self).__init__(
args=args,
vision_encoder=vision_encoder,
language_encoder=language_encoder
)
def load_weight_from_args(self, args):
self.vision_encoder = ViTModel.from_pretrained(args.vision_model)
self.language_encoder = T5EncoderMean.from_pretrained(args.language_model)
@classmethod
def from_pretrained(cls, *args, **kwargs):
root_path = args[0]
enc_path_q = os.path.join(root_path, "vision")
args_q = copy.deepcopy(list(args))
args_q[0] = enc_path_q
vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)
enc_path_k = os.path.join(root_path, "language")
args_k = copy.deepcopy(list(args))
args_k[0] = enc_path_k
language_encoder = T5EncoderMean.from_pretrained(*tuple(args_k), **kwargs)
return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)
# Path: training_retriever.py
def create_directory_info(args, create_dir=True):
model_dir = os.path.join(args.output_dir, "{}-{}-{}".format(
args.model_cls.replace('/', '_'),
args.vision_model.replace('/', '_'),
args.language_model.replace('/', '_')))
if args.dir_suffix is not None:
model_dir = '_'.join([model_dir, args.dir_suffix])
weights_dir = os.path.join(model_dir, "weights")
logs_dir = os.path.join(model_dir, "logs")
path_info = {
'model_dir': model_dir,
'weights_dir': weights_dir,
'logs_dir': logs_dir,
}
if create_dir:
for k, v in path_info.items():
create_dir_if_not_exist(v)
path_info['best_model_path'] = os.path.join(weights_dir, "best_model.pth")
path_info['ckpt_path'] = os.path.join(weights_dir, "checkpoint.pth")
return path_info
# Path: training_retriever.py
MODEL_CLS = {
"VisionT5SimpleBiEncoder": {
"model_cls": VisionT5SimpleBiEncoder,
},
"VisionT5MeanBiEncoder": {
"model_cls": VisionT5MeanBiEncoder,
},
"VisionT5SimpleBiEncoderHN": {
"model_cls": VisionT5SimpleBiEncoderHN,
},
"VisionT5MeanBiEncoderHN": {
"model_cls": VisionT5MeanBiEncoderHN,
},
}
# Path: text_generated_image_to_image_retriever/text_to_image_retrieval.py
import argparse
import sys
import os
import csv
import time
import json
import shutil
import logging
import hashlib
import functools
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
import torch.distributed as dist
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from numpy.core.numeric import indices
from torch.utils.data import DataLoader
from torch.nn import CrossEntropyLoss
from torch import optim
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AutoTokenizer, ViTFeatureExtractor
from torch.utils.tensorboard import SummaryWriter
from index_scorer import FaissScorerExhaustiveGPU, FaissScorerExhaustiveMultiGPU, FaissScorer
from data_utils import DatasetForImages
from modeling_encoder import (
VisionT5SimpleBiEncoder,
VisionT5MeanBiEncoder,
VisionT5SimpleBiEncoderHN,
VisionT5MeanBiEncoderHN,
)
from training_retriever import (
create_directory_info,
MODEL_CLS)
# model
parser.add_argument("--vision_model",
default="google/vit-base-patch16-384", type=str)
parser.add_argument("--language_model",
default="KETI-AIR/ke-t5-base", type=str)
parser.add_argument("--model_cls", default="VisionT5MeanBiEncoder",
choices=["VisionT5SimpleBiEncoder",
"VisionT5MeanBiEncoder"],
type=str, help="model class")
parser.add_argument("--dir_suffix",
default=None, type=str)
parser.add_argument("--output_dir",
default="output", type=str)
parser.add_argument("--markdown_out",
default="md", type=str)
# resume
parser.add_argument("--hf_path", default=None, type=str,
help="path to score huggingface model")
parser.add_argument("--topk", default=10,
type=int, help="top k")
parser.add_argument("--image_size", default=180,
type=int, help="image size for html formatting")
# default settings for training, evaluation
parser.add_argument("--batch_size", default=16,
type=int, help="mini batch size")
parser.add_argument("--num_workers", default=0, type=int,
help="number of workers")
# distributed setting
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--local_world_size", type=int, default=1,
help="The size of the local worker group.")
parser.add_argument("--rank", type=int, default=0,
help="The rank of the worker within a worker group.")
parser.add_argument("--world_size", type=int, default=1,
help="world size. (num_nodes*num_dev_per_node)")
parser.add_argument("--distributed", action='store_true',
help="is distributed training")
parser.add_argument('--model_gpu',
default=0, type=int)
parser.add_argument('--scorer_gpus', nargs="+",
default=[0, 1, 2, 3], type=int)
# --data_path ../kfashion_images_group.tsv --fvecs_dir result/simple_query_ko/fvecs --hf_path result/simple_query_ko/hf_model --query_path query.json --markdown_out result/simple_query_ko/md --model_cls VisionT5MeanBiEncoder
args = parser.parse_args(["--data_path", "../kfashion_images_group.tsv",\
"--fvecs_dir", FVECS_DIR, \
"--hf_path", HF_PATH,\
"--markdown_out", MARKDOWN_OUT,\
"--model_cls", "VisionT5MeanBiEncoder",\
"--scorer_gpus", "0"])
print(args.scorer_gpus)
print(args.fvecs_dir)
path_info = create_directory_info(args, create_dir=False)
if args.fvecs_dir is None:
args.fvecs_dir = os.path.join(path_info["model_dir"], "fvecs")
if args.hf_path.lower()=='default':
args.hf_path = os.path.join(path_info["model_dir"], "hf")
model_device = torch.device('cuda:{}'.format(args.model_gpu))
if faiss_scorer is None:
faiss_scorer = FaissScorerExhaustiveMultiGPU(
fvec_root=args.fvecs_dir,
gpu_list=args.scorer_gpus
)
# get model class
model_cls_cfg = MODEL_CLS[args.model_cls]
model_cls = model_cls_cfg["model_cls"]
# load model
model = model_cls.from_pretrained(args.hf_path)
model = model.to(model_device)
# get tokenizer
text_tokenizer = AutoTokenizer.from_pretrained(args.language_model)
# image_tokenizer = ViTFeatureExtractor.from_pretrained(args.vision_model)
ref_data = [
item for item in tqdm.tqdm(csv.DictReader(
open(args.data_path, "r"),
delimiter="\t",
quoting=csv.QUOTE_MINIMAL,
fieldnames=['path', 'image_url']
), desc="loading item...")
]
model.eval()
markdown_out_dir = args.markdown_out
if not os.path.isdir(markdown_out_dir):
os.makedirs(markdown_out_dir, exist_ok=True)
with torch.no_grad():
text_feature = text_tokenizer(text_query_list, return_tensors="pt", truncation='longest_first', padding=True)
q_vecs = model.encode_text({
"input_ids":text_feature["input_ids"].to(model_device),
"attention_mask":text_feature["attention_mask"].to(model_device),})
q_vecs = q_vecs.cpu().numpy()
scores, indice = faiss_scorer.get_topk(q_vecs, args.topk)
result_list = []
for t, score, index in zip(range(len(text_query_list)), scores, indice):
result = [ {
| "k": k+1, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CASIA-IVA-Lab/FLAP
# Path: models/hf_llama/modeling_llama.py
class LlamaForCausalLM(LlamaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.model = LlamaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
#self.post_init()
def get_input_embeddings(self):
return self.model.embed_tokens
def set_input_embeddings(self, value):
self.model.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, LlamaForCausalLM
>>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you consciours? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
):
if past_key_values:
input_ids = input_ids[:, -1:]
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past_key_values:
position_ids = position_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and past_key_values is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": attention_mask,
}
)
return model_inputs
@staticmethod
def _reorder_cache(past_key_values, beam_idx):
reordered_past = ()
for layer_past in past_key_values:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
# Path: lib/prune.py
def prune_wanda_sp(args, model, tokenizer, device=torch.device("cuda:0")):
"""
Wanda on structured pruning.
Args:
args (object): Command line arguments parsed via argparse.
model (nn.Module): PyTorch model to prune.
tokenizer (Tokenizer): Tokenizer associated with the model.
device (torch.device, optional): Device to move tensors to. Defaults to CUDA device 0.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=128,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = {}
subset.update({'self_attn.o_proj': find_layers(layer)['self_attn.o_proj']})
subset.update({'mlp.down_proj': find_layers(layer)['mlp.down_proj']})
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}): ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
wrapped_layers[name] = WrappedGPT(subset[name])
def add_batch(name):
def tmp(_, inp, out):
wrapped_layers[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in wrapped_layers:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for name in subset:
print(f"pruning layer {i} name {name}")
W_metric = torch.abs(subset[name].weight.data) * torch.sqrt(wrapped_layers[name].scaler_row.reshape((1,-1)))
if name == 'self_attn.o_proj':
W_metric = W_metric.mean(axis=0).reshape(-1, 128).sum(dim=1) # importance score of each head
thresh = torch.sort(W_metric.cuda())[0][int(args.pruning_ratio*layer.self_attn.num_heads)].cpu()
W_mask = (W_metric>=thresh)
compress(layer, W_mask, None, None, None, device, bias=False, unstr=args.unstr)
else:
W_metric = W_metric.mean(axis=0)
thresh = torch.sort(W_metric.cuda())[0][int(W_metric.numel()*args.pruning_ratio)].cpu()
W_mask = (W_metric>=thresh)
compress(layer, None, W_mask, None, None, device, bias=False, unstr=args.unstr)
wrapped_layers[name].free()
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
inps, outs = outs, inps # the pruned output as input to the next layer
torch.cuda.empty_cache()
model.config.use_cache = use_cache
torch.cuda.empty_cache()
# Path: lib/prune.py
def prune_flap(args, model, tokenizer, device=torch.device("cuda:0")):
"""
Our FLAP Pruning.
Args:
args (object): Command line arguments parsed via argparse.
model (nn.Module): PyTorch model to prune.
tokenizer (Tokenizer): Tokenizer associated with the model.
device (torch.device, optional): Device to move tensors to. Defaults to CUDA device 0.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
print("loading calibdation data")
dataloader, _ = get_loaders("wikitext2", nsamples=args.nsamples,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, device)
layers = model.model.layers
attn_metric_list, mlp_metric_list = [], []
attn_baseline_inp_list, mlp_baseline_inp_list = [], []
attn_mask, mlp_mask = [], []
# Split into sub-problems, separate statistics for each module
for i in tqdm(range(len(layers)), desc="Processing layers"):
layer = layers[i]
subset = {}
subset.update({'self_attn.o_proj': find_layers(layer)['self_attn.o_proj']})
subset.update({'mlp.down_proj': find_layers(layer)['mlp.down_proj']})
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}): ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
wrapped_layers[name] = BiasGPT(subset[name], args.metrics)
def add_batch(name):
def tmp(_, inp, out):
wrapped_layers[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in wrapped_layers:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for name in subset:
if name == 'self_attn.o_proj':
W_metric = metrics[args.metrics](wrapped_layers, subset, name) ** 2
if args.structure == "UL-UM":
W_metric = W_metric.reshape(-1, 128).sum(dim=1)
thresh = torch.sort(W_metric.cuda())[0][int(args.pruning_ratio*layer.self_attn.num_heads)].cpu()
W_mask = (W_metric>=thresh)
attn_mask.append(W_mask)
elif args.structure == "UL-MM":
W_metric = W_metric.reshape(-1, 128).sum(dim=1)
thresh = torch.sort(W_metric.cuda())[0][args.remove_heads // len(layers)].cpu()
W_mask = (W_metric>=thresh)
attn_mask.append(W_mask)
else:
attn_metric_list.append(W_metric.cpu())
attn_baseline_inp_list.append(wrapped_layers[name].baseline_inp.type(torch.half))
else:
W_metric = metrics[args.metrics](wrapped_layers, subset, name)
if args.structure == "UL-UM":
thresh = torch.sort(W_metric.cuda())[0][int(W_metric.numel()*args.pruning_ratio)].cpu()
W_mask = (W_metric>=thresh)
mlp_mask.append(W_mask)
elif args.structure == "UL-MM":
thresh = torch.sort(W_metric.cuda())[0][cal_remove_neuron(args, model)].cpu()
W_mask = (W_metric>=thresh)
mlp_mask.append(W_mask)
else:
mlp_metric_list.append(W_metric.cpu())
mlp_baseline_inp_list.append(wrapped_layers[name].baseline_inp.type(torch.half))
wrapped_layers[name].free()
inps, outs = outs, inps # Use the original output as input to the next layer
torch.cuda.empty_cache()
standarlization = lambda x: (x - torch.mean(x, axis=1, keepdim=True)) / torch.std(x, axis=1, keepdim=True)
if args.structure in ["AL-MM", "AL-AM"]:
attn_metric = torch.stack(attn_metric_list)
attn_metric = standarlization(attn_metric)
attn_metric = attn_metric.reshape(len(layers), -1, 128).mean(dim=2)
mlp_metric = torch.stack(mlp_metric_list)
mlp_metric = standarlization(mlp_metric)
if args.structure == "AL-MM":
sorted_attn = torch.sort(attn_metric.view(-1), descending=True)[0]
attn_thres = sorted_attn[-int(args.remove_heads)]
attn_mask = (attn_metric > attn_thres) # 1 means retain
sorted_mlp = torch.sort(mlp_metric.view(-1), descending=True)[0]
mlp_thres = sorted_mlp[-cal_remove_neuron(args, model)]
mlp_mask = (mlp_metric > mlp_thres)
else:
prune_metric = torch.cat([attn_metric.view(-1), mlp_metric.view(-1)])
sorted_prune, indices = torch.sort(prune_metric, descending=True)
compression_weight = torch.ones_like(indices)
compression_weight[indices < attn_metric.numel()] = 512.0 / 3
threshold = sorted_prune[torch.argmin(torch.abs(torch.cumsum(compression_weight, 0) - torch.sum(compression_weight)*(1 - args.pruning_ratio)))]
attn_mask = (attn_metric > threshold)
mlp_mask = (mlp_metric > threshold)
else:
attn_mask = torch.stack(attn_mask)
mlp_mask = torch.stack(mlp_mask)
for idx in range(len(layers)):
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}):
compress(model.model.layers[idx], attn_mask[idx], None, attn_baseline_inp_list[idx], None, model.hf_device_map[f"model.layers.{idx}"], unstr=args.unstr)
else:
compress(model.model.layers[idx], attn_mask[idx], None, attn_baseline_inp_list[idx], None, device, unstr=args.unstr)
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}):
compress(model.model.layers[idx], None, mlp_mask[idx], None, mlp_baseline_inp_list[idx], model.hf_device_map[f"model.layers.{idx}"], unstr=args.unstr)
else:
compress(model.model.layers[idx], None, mlp_mask[idx], None, mlp_baseline_inp_list[idx], device, unstr=args.unstr)
model.config.use_cache = use_cache
torch.cuda.empty_cache()
# Path: lib/prune.py
def prune_magnitude_sp(args, model, tokenizer, device=torch.device("cuda:0")):
"""
Magnitude Pruning on structured pruning.
Args:
args (object): Command line arguments parsed via argparse.
model (nn.Module): PyTorch model to prune.
tokenizer (Tokenizer): Tokenizer associated with the model.
device (torch.device, optional): Device to move tensors to. Defaults to CUDA device 0.
"""
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = {}
subset.update({'self_attn.o_proj': find_layers(layer)['self_attn.o_proj']})
subset.update({'mlp.down_proj': find_layers(layer)['mlp.down_proj']})
for name in subset:
print(f"pruning layer {i} name {name}")
W_metric = torch.norm(subset[name].weight.data, dim=0)
if name == 'self_attn.o_proj':
W_metric = W_metric.reshape(-1, 128).sum(dim=1) # importance score of each head
thresh = torch.sort(W_metric.cuda())[0][int(args.pruning_ratio*layer.self_attn.num_heads)].cpu()
W_mask = (W_metric>=thresh)
compress(layer, W_mask, None, None, None, device, bias=False, unstr=args.unstr)
else:
thresh = torch.sort(W_metric.cuda())[0][int(W_metric.numel()*args.pruning_ratio)].cpu()
W_mask = (W_metric>=thresh)
compress(layer, None, W_mask, None, None, device, bias=False, unstr=args.unstr)
# Path: lib/prune.py
def check_sparsity(model):
"""
Check the sparsity of the weights in different layers of the model.
Args:
model (nn.Module): The model to check.
Returns:
float: Ratio of the count of non-zero weights to total parameters in the model.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
intermediate_size = model.config.intermediate_size
hidden_size = model.config.hidden_size
count = 0
total_params = 0
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
sub_count = 0
sub_params = 0
for name in subset:
W = subset[name].weight.data
sub_count += W.numel()
count += W.numel()
if 'self_attn' in name:
total_params += hidden_size * hidden_size
sub_params += hidden_size * hidden_size
else:
total_params += hidden_size * intermediate_size
sub_params += hidden_size * intermediate_size
if subset[name].bias is not None:
count += subset[name].bias.data.numel()
sub_count += subset[name].bias.data.numel()
print(f"layer {i} sparsity {float(sub_count)/sub_params:.6f}")
model.config.use_cache = use_cache
return float(count)/total_params
# Path: lib/eval.py
def eval_ppl(model, tokenizer, device=torch.device("cuda:0")):
"""
Evaluate perplexity (ppl) on a specified model and tokenizer.
Args:
model (torch.nn.Module): The language model to be evaluated.
tokenizer (Tokenizer): Tokenizer instance for encoding texts.
device (torch.device): Device to move data onto (e.g., 'cuda:0' or 'cpu').
Returns:
float: The perplexity of the language model on the test dataset.
"""
# Set dataset
dataset = "wikitext2" # Dataset consisting of extracted sentences from Wikipedia articles
# Print status
print(f"evaluating on {dataset}")
# Get the test loader
_, testloader = get_loaders(
dataset, seed=0, seqlen=model.seqlen, tokenizer=tokenizer
)
# Evaluate perplexity in no grad context to avoid updating the model
with torch.no_grad():
# Perplexity measures how well the probability distribution predicted by the model aligns with the actual distribution of the words. Lower perplexity is better.
ppl = eval_ppl_wikitext(model, testloader, 1, device)
return ppl
# Path: main.py
import argparse
import os
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from models.hf_llama.modeling_llama import LlamaForCausalLM
from importlib.metadata import version
from lib.prune import prune_wanda_sp, prune_flap, prune_magnitude_sp, check_sparsity
from lib.eval import eval_ppl
print('torch', version('torch'))
print('transformers', version('transformers'))
print('accelerate', version('accelerate'))
print('# of gpus: ', torch.cuda.device_count())
def get_llm(model, cache_dir="llm_weights"):
# model = AutoModelForCausalLM.from_pretrained(
# model,
# torch_dtype=torch.float16,
# cache_dir=cache_dir,
# low_cpu_mem_usage=True,
# device_map="auto"
# )
model = LlamaForCausalLM.from_pretrained(
model,
torch_dtype=torch.float16,
cache_dir=cache_dir,
low_cpu_mem_usage=True,
# device_map="auto"
)
for i in range(32):
model.model.layers[i].self_attn.o_proj.bias = torch.nn.Parameter(torch.zeros_like(model.model.layers[i].self_attn.o_proj.bias, device='cpu')) # 或 'cuda'
model.model.layers[i].mlp.down_proj.bias = torch.nn.Parameter(torch.zeros_like(model.model.layers[i].mlp.down_proj.bias, device='cpu')) # 或 'cuda'
torch.nn.init.zeros_(model.model.layers[i].self_attn.o_proj.bias)
torch.nn.init.zeros_(model.model.layers[i].mlp.down_proj.bias)
model.seqlen = 128
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, help='LLaMA model') # Huggingface model name
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--nsamples', type=int, default=2048, help='Number of calibration samples.')
parser.add_argument('--pruning_ratio', type=float, default=0, help='Pruning ratio.')
parser.add_argument('--remove_heads', type=int, default=8, help='Remove num_heads')
parser.add_argument("--metrics", type=str, default="WIFV", choices=["IFV", "WIFV", "WIFN", 'N/A'])
parser.add_argument("--structure", type=str, default="AL-AM", choices=["UL-UM", "UL-MM", "AL-MM", "AL-AM", 'N/A'])
parser.add_argument("--prune_method", type=str, default="flap", choices=["flap", "wanda_sp", "mag_sp"])
parser.add_argument("--cache_dir", default="llm_weights", type=str)
parser.add_argument('--unstr', action="store_true")
parser.add_argument('--eval', action="store_true")
parser.add_argument('--save_model', type=str, default=None, help='Path to save the pruned model.')
args = parser.parse_args()
# Setting seeds for reproducibility
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
# Build the model and tokenizer
print(f"loading llm model {args.model}")
model = get_llm(args.model, args.cache_dir)
device = torch.device("cuda:0")
model.to(device)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=False)
if "30b" in args.model or "65b" in args.model: # for 30b and 65b we use device_map to load onto multiple A6000 GPUs, thus the processing here.
device = model.hf_device_map["lm_head"]
print("use device ", device)
# Prune the model
print("pruning starts")
if args.prune_method == "flap":
if args.metrics == 'N/A':
raise ValueError("For FLAP pruning, the metrics parameter must be chosen from ['IFV', 'WIFV', 'WIFN']. 'N/A' is not a valid choice.")
| if args.structure == 'N/A': |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: alibaba/u2mot
# Path: yolox/tracker/basetrack.py
class BaseTrack(object):
_count = 0
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
# multi-camera
location = (np.inf, np.inf)
@property
def end_frame(self):
return self.frame_id
@staticmethod
def next_id():
BaseTrack._count += 1
return BaseTrack._count
def activate(self, *args):
raise NotImplementedError
def predict(self):
raise NotImplementedError
def update(self, *args, **kwargs):
raise NotImplementedError
def mark_lost(self):
self.state = TrackState.Lost
def mark_long_lost(self):
self.state = TrackState.LongLost
def mark_removed(self):
self.state = TrackState.Removed
@staticmethod
def clear_count():
BaseTrack._count = 0
# Path: yolox/tracker/basetrack.py
class TrackState(object):
New = 0
Tracked = 1
Lost = 2
LongLost = 3
Removed = 4
# Path: yolox/tracker/kalman_filter.py
class KalmanFilter(object):
"""
A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space
x, y, w, h, vx, vy, vw, vh
contains the bounding box center position (x, y), width w, height h,
and their respective velocities.
Object motion follows a constant velocity model. The bounding box location
(x, y, w, h) is taken as direct observation of the state space (linear
observation model).
"""
def __init__(self):
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current
# state estimate. These weights control the amount of uncertainty in
# the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, w, h) with center position (x, y),
width w, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[2],
2 * self._std_weight_position * measurement[3],
2 * self._std_weight_position * measurement[2],
2 * self._std_weight_position * measurement[3],
10 * self._std_weight_velocity * measurement[2],
10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[2],
10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous
time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
self._std_weight_position * mean[2],
self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[2],
self._std_weight_velocity * mean[3],
self._std_weight_velocity * mean[2],
self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((
self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
std = [
self._std_weight_position * mean[2],
self._std_weight_position * mean[3],
self._std_weight_position * mean[2],
self._std_weight_position * mean[3]]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((
self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean, covariance):
"""Run Kalman filter prediction step (Vectorized version).
Parameters
----------
mean : ndarray
The Nx8 dimensional mean matrix of the object states at the previous
time step.
covariance : ndarray
The Nx8x8 dimensional covariance matrics of the object states at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 2],
self._std_weight_position * mean[:, 3],
self._std_weight_position * mean[:, 2],
self._std_weight_position * mean[:, 3]]
std_vel = [
self._std_weight_velocity * mean[:, 2],
self._std_weight_velocity * mean[:, 3],
self._std_weight_velocity * mean[:, 2],
self._std_weight_velocity * mean[:, 3]]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = []
for i in range(len(mean)):
motion_cov.append(np.diag(sqr[i]))
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean, covariance, measurement):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, w, h), where (x, y)
is the center position, w the width, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(
projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((
kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self, mean, covariance, measurements,
only_position=False, metric='maha'):
"""Compute gating distance between state distribution and measurements.
A suitable distance threshold can be obtained from `chi2inv95`. If
`only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Parameters
----------
mean : ndarray
Mean vector over the state distribution (8 dimensional).
covariance : ndarray
Covariance of the state distribution (8x8 dimensional).
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in
format (x, y, a, h) where (x, y) is the bounding box center
position, a the aspect ratio, and h the height.
only_position : Optional[bool]
If True, distance computation is done with respect to the bounding
box center position only.
Returns
-------
ndarray
Returns an array of length N, where the i-th element contains the
squared Mahalanobis distance between (mean, covariance) and
`measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
d = measurements - mean
if metric == 'gaussian':
return np.sum(d * d, axis=1)
elif metric == 'maha':
cholesky_factor = np.linalg.cholesky(covariance)
z = scipy.linalg.solve_triangular(
cholesky_factor, d.T, lower=True, check_finite=False,
overwrite_b=True)
squared_maha = np.sum(z * z, axis=0)
return squared_maha
else:
raise ValueError('invalid distance metric')
# Path: yolox/tracker/gmc.py
class GMC:
def __init__(self, method='orb', downscale=2, verbose=None):
super(GMC, self).__init__()
self.method = method
self.downscale = max(1, int(downscale))
if self.method == 'orb':
self.detector = cv2.FastFeatureDetector_create(20)
self.extractor = cv2.ORB_create()
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
seqName = verbose[0]
fileDir = verbose[1]
if '-FRCNN' in seqName:
seqName = seqName[:-6]
elif '-DPM' in seqName:
seqName = seqName[:-4]
elif '-SDP' in seqName:
seqName = seqName[:-4]
self.gmcFile = open(f"yolox/tracker/GMC_files/{fileDir}/GMC-{seqName}.txt", 'w+')
elif self.method == 'sift':
self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.matcher = cv2.BFMatcher(cv2.NORM_L2)
elif self.method == 'ecc':
number_of_iterations = 5000
termination_eps = 1e-6
self.warp_mode = cv2.MOTION_EUCLIDEAN
self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
elif self.method == 'file' or self.method == 'files':
seqName = verbose[0]
# MOT17_ablation, MOTChallenge, VisDrone/test-dev, BDD100K/val, BDD100K/test
fileDir = verbose[1]
filePath = f'yolox/tracker/GMC_files/{fileDir}'
if '-FRCNN' in seqName:
seqName = seqName[:-6]
elif '-DPM' in seqName:
seqName = seqName[:-4]
elif '-SDP' in seqName:
seqName = seqName[:-4]
self.gmcFile = open(filePath + "/GMC-" + seqName + ".txt", 'r')
if self.gmcFile is None:
raise ValueError("Error: Unable to open GMC file in directory:" + filePath)
elif self.method == 'none' or self.method == 'None':
self.method = 'none'
else:
raise ValueError("Error: Unknown CMC method:" + method)
self.prevFrame = None
self.prevKeyPoints = None
self.prevDescriptors = None
self.initializedFirstFrame = False
self.frameCnt = 0
def apply(self, raw_frame, detections=None):
if self.method == 'orb' or self.method == 'sift':
try:
H = self.applyFeaures(raw_frame, detections)
except:
H = np.array([[1., 0., 0.], [0., 1., 0.]])
self.gmcFile.write('%d\t%.6f\t%.6f\t%.6f\t%.6f\t%.6f\t%.6f\t\n' % \
(self.frameCnt, H[0, 0], H[0, 1], H[0, 2], H[1, 0], H[1, 1], H[1, 2]))
self.frameCnt += 1
return H
elif self.method == 'ecc':
return self.applyEcc(raw_frame, detections)
elif self.method == 'file':
return self.applyFile(raw_frame, detections)
elif self.method == 'none':
return np.eye(2, 3)
else:
return np.eye(2, 3)
def applyEcc(self, raw_frame, detections=None):
# Initialize
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3, dtype=np.float32)
# Downscale image (TODO: consider using pyramids)
if self.downscale > 1.0:
frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
# Initialization done
self.initializedFirstFrame = True
return H
# Run the ECC algorithm. The results are stored in warp_matrix.
# (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)
try:
(cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)
except:
print('Warning: find transform failed. Set warp as identity')
return H
def applyFeaures(self, raw_frame, detections=None):
# Initialize
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3)
# Downscale image (TODO: consider using pyramids)
if self.downscale > 1.0:
# frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# find the keypoints
mask = np.zeros_like(frame)
# mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255
mask[int(0.02 * height): int(0.98 * height), int(0.02 * width): int(0.98 * width)] = 255
if detections is not None:
for det in detections:
tlbr = (det[:4] / self.downscale).astype(np.int_)
mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0
keypoints = self.detector.detect(frame, mask)
# compute the descriptors
keypoints, descriptors = self.extractor.compute(frame, keypoints)
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
# Initialization done
self.initializedFirstFrame = True
return H
# Match descriptors.
knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)
# Filtered matches based on smallest spatial distance
matches = []
spatialDistances = []
maxSpatialDistance = 0.25 * np.array([width, height])
# Handle empty matches case
if len(knnMatches) == 0:
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
for m, n in knnMatches:
if m.distance < 0.9 * n.distance:
prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt
currKeyPointLocation = keypoints[m.trainIdx].pt
spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0],
prevKeyPointLocation[1] - currKeyPointLocation[1])
if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \
(np.abs(spatialDistance[1]) < maxSpatialDistance[1]):
spatialDistances.append(spatialDistance)
matches.append(m)
meanSpatialDistances = np.mean(spatialDistances, 0)
stdSpatialDistances = np.std(spatialDistances, 0)
inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances
goodMatches = []
prevPoints = []
currPoints = []
for i in range(len(matches)):
if inliesrs[i, 0] and inliesrs[i, 1]:
goodMatches.append(matches[i])
prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)
currPoints.append(keypoints[matches[i].trainIdx].pt)
prevPoints = np.array(prevPoints)
currPoints = np.array(currPoints)
# Draw the keypoint matches on the output image
if 0:
matches_img = np.hstack((self.prevFrame, frame))
matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)
W = np.size(self.prevFrame, 1)
for m in goodMatches:
prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)
curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)
curr_pt[0] += W
color = np.random.randint(0, 255, (3,))
color = (int(color[0]), int(color[1]), int(color[2]))
matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)
matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)
matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)
plt.figure()
plt.imshow(matches_img)
plt.show()
# Find rigid matrix
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
# Handle downscale
if self.downscale > 1.0:
H[0, 2] *= self.downscale
H[1, 2] *= self.downscale
else:
print('Warning: not enough matching points')
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
def applyFile(self, raw_frame=None, detections=None):
line = self.gmcFile.readline()
tokens = line.split("\t")
H = np.eye(2, 3, dtype=np.float_)
if len(tokens) > 6:
H[0, 0] = float(tokens[1])
H[0, 1] = float(tokens[2])
H[0, 2] = float(tokens[3])
H[1, 0] = float(tokens[4])
H[1, 1] = float(tokens[5])
H[1, 2] = float(tokens[6])
return H
# Path: yolox/tracker/u2mot_tracker.py
import numpy as np
from collections import deque
from .basetrack import BaseTrack, TrackState
from .kalman_filter import KalmanFilter
from .gmc import GMC
from . import matching
#!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score, cls=0, feat=None, feat_history=50):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.cls = -1
self.cls_hist = [] # (cls id, freq)
self.update_cls(cls, score)
self.score = score
self.tracklet_len = 0
self.smooth_feat = None
self.curr_feat = None
self.features = deque([], maxlen=feat_history)
if feat is not None:
self.update_features(feat)
self.alpha = 0.9
def update_features(self, feat):
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def update_cls(self, cls, score):
if len(self.cls_hist) > 0:
max_freq = 0
found = False
for c in self.cls_hist:
if cls == c[0]:
c[1] += score
found = True
if c[1] > max_freq:
max_freq = c[1]
self.cls = c[0]
if not found:
self.cls_hist.append([cls, score])
self.cls = cls
else:
self.cls_hist.append([cls, score])
self.cls = cls
def predict(self):
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[6] = 0
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][6] = 0
| multi_mean[i][7] = 0 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: liuhuang31/HiFTNet-sr
# Path: env.py
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
# Path: env.py
def build_env(config, config_name, path):
t_path = os.path.join(path, config_name)
if config != t_path:
os.makedirs(path, exist_ok=True)
shutil.copyfile(config, os.path.join(path, config_name))
# Path: meldataset.py
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,
device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):
self.audio_files = training_files
random.seed(1234)
if shuffle:
random.shuffle(self.audio_files)
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.cached_wav = None
self.n_cache_reuse = n_cache_reuse
self._cache_ref_count = 0
self.device = device
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
def __getitem__(self, index):
filename = self.audio_files[index]
if self._cache_ref_count == 0:
audio, sampling_rate = load_wav(filename, self.sampling_rate)
# audio = audio / MAX_WAV_VALUE
if not self.fine_tuning:
audio = normalize(audio) * 0.95
self.cached_wav = audio
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
self._cache_ref_count = self.n_cache_reuse
else:
audio = self.cached_wav
self._cache_ref_count -= 1
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = random.randint(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,
center=False, training=True)
else:
mel = np.load(
os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel)
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]
else:
mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')
audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files)
# Path: meldataset.py
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, training=False):
# if torch.min(y) < -1.:
# print('min value is ', torch.min(y))
# if torch.max(y) > 1.:
# print('max value is ', torch.max(y))
if training:
with torch.no_grad():
# 16k to 24k/48k
if fmax <= 8000 and (sampling_rate == 24000 or sampling_rate == 48000):
y = y.squeeze().cpu().numpy()
y = librosa.resample(y, sampling_rate, 16000)
y = librosa.resample(y, 16000, 24000)
y = torch.FloatTensor(y)
y = y.unsqueeze(0)
sampling_rate = 24000
n_fft = int(n_fft/2)
hop_size=int(hop_size/2)
win_size=int(win_size/2)
# 24k to 48k
elif fmax <= 12000 and sampling_rate == 48000:
y = y.squeeze().cpu().numpy()
y = librosa.resample(y, sampling_rate, 24000)
y = torch.FloatTensor(y)
y = y.unsqueeze(0)
sampling_rate = 24000
n_fft = int(n_fft/2)
hop_size=int(hop_size/2)
win_size=int(win_size/2)
else:
pass
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
y = y.squeeze(1)
# complex tensor as default, then use view_as_real for future pytorch compatibility
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
# Path: meldataset.py
def get_dataset_filelist(a):
training_files =[]
validation_files =[]
total_files = 0
input_wave_dirs = a.input_wavs_dir.split(",")
for wave_dir in input_wave_dirs:
num_validation_files = 3
files_under_path = 0
allfiles = find_all_wav_path(wave_dir)
for input_file_name in allfiles:
if not os.path.splitext(input_file_name)[-1] == '.wav':
continue
files_under_path +=1
full_file_path = input_file_name
if num_validation_files <=0:
training_files.append(full_file_path)
else:
validation_files.append(full_file_path)
num_validation_files -=1
if files_under_path == 0:
raise Exception("no wave file found!")
total_files +=files_under_path
print(f'total files:{total_files}')
return training_files, validation_files
# Path: models.py
class Generator(torch.nn.Module):
def __init__(self, h, F0_model):
super(Generator, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.m_source = SourceModuleHnNSF(
sampling_rate=h.sampling_rate,
upsample_scale=np.prod(h.upsample_rates) * h.gen_istft_hop_size,
harmonic_num=8, voiced_threshod=10)
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h.upsample_rates) * h.gen_istft_hop_size)
self.noise_convs = nn.ModuleList()
self.noise_res = nn.ModuleList()
self.F0_model = F0_model
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
c_cur = h.upsample_initial_channel // (2 ** (i + 1))
if i + 1 < len(h.upsample_rates): #
stride_f0 = np.prod(h.upsample_rates[i + 1:])
self.noise_convs.append(Conv1d(
h.gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2))
self.noise_res.append(resblock(h, c_cur, 7, [1,3,5]))
else:
self.noise_convs.append(Conv1d(h.gen_istft_n_fft + 2, c_cur, kernel_size=1))
self.noise_res.append(resblock(h, c_cur, 11, [1,3,5]))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.post_n_fft = h.gen_istft_n_fft
self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
self.stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft)
def forward(self, x):
f0, _, _ = self.F0_model(x.unsqueeze(1))
if len(f0.shape) == 1:
f0 = f0.unsqueeze(0)
f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
har_source, _, _ = self.m_source(f0)
har_source = har_source.transpose(1, 2).squeeze(1)
har_spec, har_phase = self.stft.transform(har_source)
har = torch.cat([har_spec, har_phase], dim=1)
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x_source = self.noise_convs[i](har)
x_source = self.noise_res[i](x_source)
x = self.ups[i](x)
if i == self.num_upsamples - 1:
x = self.reflection_pad(x)
x = x + x_source
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])
phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])
return spec, phase
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
# Path: models.py
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
# Path: models.py
class MultiResSpecDiscriminator(torch.nn.Module):
def __init__(self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window"):
super(MultiResSpecDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),
SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),
SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
# Path: models.py
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
# Path: models.py
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
# Path: models.py
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
# Path: models.py
def discriminator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
tau = 0.04
m_DG = torch.median((dr-dg))
L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])
loss += tau - F.relu(tau - L_rel)
return loss
# Path: models.py
def generator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
for dg, dr in zip(disc_real_outputs, disc_generated_outputs):
tau = 0.04
m_DG = torch.median((dr-dg))
L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])
loss += tau - F.relu(tau - L_rel)
return loss
# Path: utils.py
def plot_spectrogram(spectrogram):
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
fig.canvas.draw()
plt.close()
return fig
# Path: utils.py
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '????????')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return None
return sorted(cp_list)[-1]
# Path: utils.py
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
# Path: utils.py
def save_checkpoint(filepath, obj):
print("Saving checkpoint to {}".format(filepath))
torch.save(obj, filepath)
print("Complete.")
# Path: stft.py
class TorchSTFT(torch.nn.Module):
def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):
super().__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))
def transform(self, input_data):
forward_transform = torch.stft(
input_data,
self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device),
return_complex=True)
return torch.abs(forward_transform), torch.angle(forward_transform)
def inverse(self, magnitude, phase):
inverse_transform = torch.istft(
magnitude * torch.exp(phase * 1j),
self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))
return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
# Path: Utils/JDC/model.py
class JDCNet(nn.Module):
"""
Joint Detection and Classification Network model for singing voice melody.
"""
def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):
super().__init__()
self.num_class = num_class
# input = (b, 1, 31, 513), b = batch size
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)
nn.BatchNorm2d(num_features=64),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)
)
# res blocks
self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)
self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)
self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)
# pool block
self.pool_block = nn.Sequential(
nn.BatchNorm2d(num_features=256),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)
nn.Dropout(p=0.2),
)
# maxpool layers (for auxiliary network inputs)
# in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)
self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))
# in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)
self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))
# in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)
self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))
# in = (b, 640, 31, 2), out = (b, 256, 31, 2)
self.detector_conv = nn.Sequential(
nn.Conv2d(640, 256, 1, bias=False),
nn.BatchNorm2d(256),
nn.LeakyReLU(leaky_relu_slope, inplace=True),
nn.Dropout(p=0.2),
)
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
self.bilstm_classifier = nn.LSTM(
input_size=512, hidden_size=256,
batch_first=True, bidirectional=True) # (b, 31, 512)
# input: (b, 31, 512) - resized from (b, 256, 31, 2)
self.bilstm_detector = nn.LSTM(
input_size=512, hidden_size=256,
batch_first=True, bidirectional=True) # (b, 31, 512)
# input: (b * 31, 512)
self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)
# input: (b * 31, 512)
self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier
# initialize weights
self.apply(self.init_weights)
def get_feature_GAN(self, x):
seq_len = x.shape[-2]
x = x.float().transpose(-1, -2)
convblock_out = self.conv_block(x)
resblock1_out = self.res_block1(convblock_out)
resblock2_out = self.res_block2(resblock1_out)
resblock3_out = self.res_block3(resblock2_out)
poolblock_out = self.pool_block[0](resblock3_out)
poolblock_out = self.pool_block[1](poolblock_out)
return poolblock_out.transpose(-1, -2)
def get_feature(self, x):
seq_len = x.shape[-2]
x = x.float().transpose(-1, -2)
convblock_out = self.conv_block(x)
resblock1_out = self.res_block1(convblock_out)
resblock2_out = self.res_block2(resblock1_out)
resblock3_out = self.res_block3(resblock2_out)
poolblock_out = self.pool_block[0](resblock3_out)
poolblock_out = self.pool_block[1](poolblock_out)
return self.pool_block[2](poolblock_out)
def forward(self, x):
"""
Returns:
classification_prediction, detection_prediction
sizes: (b, 31, 722), (b, 31, 2)
"""
###############################
# forward pass for classifier #
###############################
seq_len = x.shape[-1]
x = x.float().transpose(-1, -2)
convblock_out = self.conv_block(x)
resblock1_out = self.res_block1(convblock_out)
resblock2_out = self.res_block2(resblock1_out)
resblock3_out = self.res_block3(resblock2_out)
poolblock_out = self.pool_block[0](resblock3_out)
poolblock_out = self.pool_block[1](poolblock_out)
GAN_feature = poolblock_out.transpose(-1, -2)
poolblock_out = self.pool_block[2](poolblock_out)
# (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)
classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))
classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states
classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)
classifier_out = self.classifier(classifier_out)
classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)
# sizes: (b, 31, 722), (b, 31, 2)
# classifier output consists of predicted pitch classes per frame
# detector output consists of: (isvoice, notvoice) estimates per frame
return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out
@staticmethod
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):
for p in m.parameters():
if p.data is None:
continue
if len(p.shape) >= 2:
nn.init.orthogonal_(p.data)
else:
nn.init.normal_(p.data)
# Path: train.py
import warnings
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from env import AttrDict, build_env
from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist
from models import Generator, MultiPeriodDiscriminator, MultiResSpecDiscriminator, feature_loss, generator_loss,\
discriminator_loss, discriminator_TPRLS_loss, generator_TPRLS_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint
from stft import TorchSTFT
from Utils.JDC.model import JDCNet
warnings.simplefilter(action='ignore', category=FutureWarning)
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
| if h.num_gpus > 1: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: m-abr/FCPCodebase
# Path: agent/Base_Agent.py
class Base_Agent():
all_agents = []
def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,
enable_draw:bool=True, apply_play_mode_correction:bool=True, wait_for_server:bool=True, hear_callback=None) -> None:
self.radio = None # hear_message may be called during Server_Comm instantiation
self.logger = Logger(enable_log, f"{team_name}_{unum}")
self.world = World(robot_type, team_name, unum, apply_play_mode_correction, enable_draw, self.logger, host)
self.world_parser = World_Parser(self.world, self.hear_message if hear_callback is None else hear_callback)
self.scom = Server_Comm(host,agent_port,monitor_port,unum,robot_type,team_name,self.world_parser,self.world,Base_Agent.all_agents,wait_for_server)
self.inv_kinematics = Inverse_Kinematics(self.world.robot)
self.behavior = Behavior(self)
self.path_manager = Path_Manager(self.world)
self.radio = Radio(self.world, self.scom.commit_announcement)
self.behavior.create_behaviors()
Base_Agent.all_agents.append(self)
@abstractmethod
def think_and_send(self):
pass
def hear_message(self, msg:bytearray, direction, timestamp:float) -> None:
if direction != "self" and self.radio is not None:
self.radio.receive(msg)
def terminate(self):
# close shared monitor socket if this is the last agent on this thread
self.scom.close(close_monitor_socket=(len(Base_Agent.all_agents)==1))
Base_Agent.all_agents.remove(self)
@staticmethod
def terminate_all():
for o in Base_Agent.all_agents:
o.scom.close(True) # close shared monitor socket, if it exists
Base_Agent.all_agents = []
# Path: behaviors/custom/Step/Step_Generator.py
class Step_Generator():
GRAVITY = 9.81
Z0 = 0.2
def __init__(self, feet_y_dev, sample_time, max_ankle_z) -> None:
self.feet_y_dev = feet_y_dev
self.sample_time = sample_time
self.state_is_left_active = False
self.state_current_ts = 0
self.switch = False # switch legs
self.external_progress = 0 # non-overlaped progress
self.max_ankle_z = max_ankle_z
def get_target_positions(self, reset, ts_per_step, z_span, z_extension):
'''
Get target positions for each foot
Returns
-------
target : `tuple`
(Left leg y, Left leg z, Right leg y, Right leg z)
'''
assert type(ts_per_step)==int and ts_per_step > 0, "ts_per_step must be a positive integer!"
#-------------------------- Advance 1ts
if reset:
self.ts_per_step = ts_per_step # step duration in time steps
self.swing_height = z_span
self.max_leg_extension = z_extension # maximum distance between ankle to center of both hip joints
self.state_current_ts = 0
self.state_is_left_active = False
self.switch = False
elif self.switch:
self.state_current_ts = 0
self.state_is_left_active = not self.state_is_left_active # switch leg
self.switch = False
else:
self.state_current_ts += 1
#-------------------------- Compute COM.y
W = math.sqrt(self.Z0/self.GRAVITY)
step_time = self.ts_per_step * self.sample_time
time_delta = self.state_current_ts * self.sample_time
y0 = self.feet_y_dev # absolute initial y value
y_swing = y0 + y0 * ( math.sinh((step_time - time_delta)/W) + math.sinh(time_delta/W) ) / math.sinh(-step_time/W)
#-------------------------- Cap maximum extension and swing height
z0 = min(-self.max_leg_extension, self.max_ankle_z) # capped initial z value
zh = min(self.swing_height, self.max_ankle_z - z0) # capped swing height
#-------------------------- Compute Z Swing
progress = self.state_current_ts / self.ts_per_step
self.external_progress = self.state_current_ts / (self.ts_per_step-1)
active_z_swing = zh * math.sin(math.pi * progress)
#-------------------------- Accept new parameters after final step
if self.state_current_ts + 1 >= self.ts_per_step:
self.ts_per_step = ts_per_step # step duration in time steps
self.swing_height = z_span
self.max_leg_extension = z_extension # maximum distance between ankle to center of both hip joints
self.switch = True
#-------------------------- Distinguish active leg
if self.state_is_left_active:
return y0+y_swing, active_z_swing+z0, -y0+y_swing, z0
else:
return y0-y_swing, z0, -y0-y_swing, active_z_swing+z0
# Path: math_ops/Math_Ops.py
class Math_Ops():
'''
This class provides general mathematical operations that are not directly available through numpy
'''
@staticmethod
def deg_sph2cart(spherical_vec):
''' Converts SimSpark's spherical coordinates in degrees to cartesian coordinates '''
r = spherical_vec[0]
h = spherical_vec[1] * pi / 180
v = spherical_vec[2] * pi / 180
return np.array([r * cos(v) * cos(h), r * cos(v) * sin(h), r * sin(v)])
@staticmethod
def deg_sin(deg_angle):
''' Returns sin of degrees '''
return sin(deg_angle * pi / 180)
@staticmethod
def deg_cos(deg_angle):
''' Returns cos of degrees '''
return cos(deg_angle * pi / 180)
@staticmethod
def to_3d(vec_2d, value=0) -> np.ndarray:
''' Returns new 3d vector from 2d vector '''
return np.append(vec_2d,value)
@staticmethod
def to_2d_as_3d(vec_3d) -> np.ndarray:
''' Returns new 3d vector where the 3rd dimension is zero '''
vec_2d_as_3d = np.copy(vec_3d)
vec_2d_as_3d[2] = 0
return vec_2d_as_3d
@staticmethod
def normalize_vec(vec) -> np.ndarray:
''' Divides vector by its length '''
size = np.linalg.norm(vec)
if size == 0: return vec
return vec / size
@staticmethod
def get_active_directory(dir:str) -> str:
global GLOBAL_DIR
return GLOBAL_DIR + dir
@staticmethod
def acos(val):
''' arccosine function that limits input '''
return acos( np.clip(val,-1,1) )
@staticmethod
def asin(val):
''' arcsine function that limits input '''
return asin( np.clip(val,-1,1) )
@staticmethod
def normalize_deg(val):
''' normalize val in range [-180,180[ '''
return (val + 180.0) % 360 - 180
@staticmethod
def normalize_rad(val):
''' normalize val in range [-pi,pi[ '''
return (val + pi) % (2*pi) - pi
@staticmethod
def deg_to_rad(val):
''' convert degrees to radians '''
return val * 0.01745329251994330
@staticmethod
def rad_to_deg(val):
''' convert radians to degrees '''
return val * 57.29577951308232
@staticmethod
def vector_angle(vector, is_rad=False):
''' angle (degrees or radians) of 2D vector '''
if is_rad:
return atan2(vector[1], vector[0])
else:
return atan2(vector[1], vector[0]) * 180 / pi
@staticmethod
def vectors_angle(vec1, vec2, is_rad=False):
''' get angle between vectors (degrees or radians) '''
ang_rad = acos(np.dot(Math_Ops.normalize_vec(vec1),Math_Ops.normalize_vec(vec2)))
return ang_rad if is_rad else ang_rad * 180 / pi
@staticmethod
def vector_from_angle(angle, is_rad=False):
''' unit vector with direction given by `angle` '''
if is_rad:
return np.array([cos(angle), sin(angle)], float)
else:
return np.array([Math_Ops.deg_cos(angle), Math_Ops.deg_sin(angle)], float)
@staticmethod
def target_abs_angle(pos2d, target, is_rad=False):
''' angle (degrees or radians) of vector (target-pos2d) '''
if is_rad:
return atan2(target[1]-pos2d[1], target[0]-pos2d[0])
else:
return atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi
@staticmethod
def target_rel_angle(pos2d, ori, target, is_rad=False):
''' relative angle (degrees or radians) of target if we're located at 'pos2d' with orientation 'ori' (degrees or radians) '''
if is_rad:
return Math_Ops.normalize_rad( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) - ori )
else:
return Math_Ops.normalize_deg( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi - ori )
@staticmethod
def rotate_2d_vec(vec, angle, is_rad=False):
''' rotate 2D vector anticlockwise around the origin by `angle` '''
cos_ang = cos(angle) if is_rad else cos(angle * pi / 180)
sin_ang = sin(angle) if is_rad else sin(angle * pi / 180)
return np.array([cos_ang*vec[0]-sin_ang*vec[1], sin_ang*vec[0]+cos_ang*vec[1]])
@staticmethod
def distance_point_to_line(p:np.ndarray, a:np.ndarray, b:np.ndarray):
'''
Distance between point p and 2d line 'ab' (and side where p is)
Parameters
----------
a : ndarray
2D point that defines line
b : ndarray
2D point that defines line
p : ndarray
2D point
Returns
-------
distance : float
distance between line and point
side : str
if we are at a, looking at b, p may be at our "left" or "right"
'''
line_len = np.linalg.norm(b-a)
if line_len == 0: # assumes vertical line
dist = sdist = np.linalg.norm(p-a)
else:
sdist = np.cross(b-a,p-a)/line_len
dist = abs(sdist)
return dist, "left" if sdist>0 else "right"
@staticmethod
def distance_point_to_segment(p:np.ndarray, a:np.ndarray, b:np.ndarray):
''' Distance from point p to 2d line segment 'ab' '''
ap = p-a
ab = b-a
ad = Math_Ops.vector_projection(ap,ab)
# Is d in ab? We can find k in (ad = k * ab) without computing any norm
# we use the largest dimension of ab to avoid division by 0
k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]
if k <= 0: return np.linalg.norm(ap)
elif k >= 1: return np.linalg.norm(p-b)
else: return np.linalg.norm(p-(ad + a)) # p-d
@staticmethod
def distance_point_to_ray(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):
''' Distance from point p to 2d ray '''
rp = p-ray_start
rd = Math_Ops.vector_projection(rp,ray_direction)
# Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm
# we use the largest dimension of ray_direction to avoid division by 0
k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]
if k <= 0: return np.linalg.norm(rp)
else: return np.linalg.norm(p-(rd + ray_start)) # p-d
@staticmethod
def closest_point_on_ray_to_point(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):
''' Point on ray closest to point p '''
rp = p-ray_start
rd = Math_Ops.vector_projection(rp,ray_direction)
# Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm
# we use the largest dimension of ray_direction to avoid division by 0
k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]
if k <= 0: return ray_start
else: return rd + ray_start
@staticmethod
def does_circle_intersect_segment(p:np.ndarray, r, a:np.ndarray, b:np.ndarray):
''' Returns true if circle (center p, radius r) intersect 2d line segment '''
ap = p-a
ab = b-a
ad = Math_Ops.vector_projection(ap,ab)
# Is d in ab? We can find k in (ad = k * ab) without computing any norm
# we use the largest dimension of ab to avoid division by 0
k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]
if k <= 0: return np.dot(ap,ap) <= r*r
elif k >= 1: return np.dot(p-b,p-b) <= r*r
dp = p-(ad + a)
return np.dot(dp,dp) <= r*r
@staticmethod
def vector_projection(a:np.ndarray, b:np.ndarray):
''' Vector projection of a onto b '''
b_dot = np.dot(b,b)
return b * np.dot(a,b) / b_dot if b_dot != 0 else b
@staticmethod
def do_noncollinear_segments_intersect(a,b,c,d):
'''
Check if 2d line segment 'ab' intersects with noncollinear 2d line segment 'cd'
Explanation: https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
'''
ccw = lambda a,b,c: (c[1]-a[1]) * (b[0]-a[0]) > (b[1]-a[1]) * (c[0]-a[0])
return ccw(a,c,d) != ccw(b,c,d) and ccw(a,b,c) != ccw(a,b,d)
@staticmethod
def intersection_segment_opp_goal(a:np.ndarray, b:np.ndarray):
''' Computes the intersection point of 2d segment 'ab' and the opponents' goal (front line) '''
vec_x = b[0]-a[0]
# Collinear intersections are not accepted
if vec_x == 0: return None
k = (15.01-a[0])/vec_x
# No collision
if k < 0 or k > 1: return None
intersection_pt = a + (b-a) * k
if -1.01 <= intersection_pt[1] <= 1.01:
return intersection_pt
else:
return None
@staticmethod
def intersection_circle_opp_goal(p:np.ndarray, r):
'''
Computes the intersection segment of circle (center p, radius r) and the opponents' goal (front line)
Only the y coordinates are returned since the x coordinates are always equal to 15
'''
x_dev = abs(15-p[0])
if x_dev > r:
return None # no intersection with x=15
y_dev = sqrt(r*r - x_dev*x_dev)
p1 = max(p[1] - y_dev, -1.01)
p2 = min(p[1] + y_dev, 1.01)
if p1 == p2:
return p1 # return the y coordinate of a single intersection point
elif p2 < p1:
return None # no intersection
else:
return p1, p2 # return the y coordinates of the intersection segment
@staticmethod
def distance_point_to_opp_goal(p:np.ndarray):
''' Distance between point 'p' and the opponents' goal (front line) '''
if p[1] < -1.01:
return np.linalg.norm( p-(15,-1.01) )
elif p[1] > 1.01:
return np.linalg.norm( p-(15, 1.01) )
else:
return abs(15-p[0])
@staticmethod
def circle_line_segment_intersection(circle_center, circle_radius, pt1, pt2, full_line=True, tangent_tol=1e-9):
""" Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points.
:param circle_center: The (x, y) location of the circle center
:param circle_radius: The radius of the circle
:param pt1: The (x, y) location of the first point of the segment
:param pt2: The (x, y) location of the second point of the segment
:param full_line: True to find intersections along full line - not just in the segment. False will just return intersections within the segment.
:param tangent_tol: Numerical tolerance at which we decide the intersections are close enough to consider it a tangent
:return Sequence[Tuple[float, float]]: A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment.
Note: We follow: http://mathworld.wolfram.com/Circle-LineIntersection.html
"""
(p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center
(x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy)
dx, dy = (x2 - x1), (y2 - y1)
dr = (dx ** 2 + dy ** 2)**.5
big_d = x1 * y2 - x2 * y1
discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2
if discriminant < 0: # No intersection between circle and line
return []
else: # There may be 0, 1, or 2 intersections with the segment
intersections = [
(cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2,
cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2)
for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct
if not full_line: # If only considering the segment, filter out intersections that do not fall within the segment
fraction_along_segment = [
(xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections]
intersections = [pt for pt, frac in zip(
intersections, fraction_along_segment) if 0 <= frac <= 1]
# If line is tangent to circle, return just one point (as both intersections have same location)
if len(intersections) == 2 and abs(discriminant) <= tangent_tol:
return [intersections[0]]
else:
return intersections
# adapted from https://stackoverflow.com/questions/3252194/numpy-and-line-intersections
@staticmethod
def get_line_intersection(a1, a2, b1, b2):
"""
Returns the point of intersection of the lines passing through a2,a1 and b2,b1.
a1: [x, y] a point on the first line
a2: [x, y] another point on the first line
b1: [x, y] a point on the second line
b2: [x, y] another point on the second line
"""
s = np.vstack([a1,a2,b1,b2]) # s for stacked
h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous
l1 = np.cross(h[0], h[1]) # get first line
l2 = np.cross(h[2], h[3]) # get second line
x, y, z = np.cross(l1, l2) # point of intersection
if z == 0: # lines are parallel
return np.array([float('inf'), float('inf')])
return np.array([x/z, y/z],float)
# Path: behaviors/custom/Walk/Env.py
from agent.Base_Agent import Base_Agent
from behaviors.custom.Step.Step_Generator import Step_Generator
from math_ops.Math_Ops import Math_Ops as M
import math
import numpy as np
class Env():
def __init__(self, base_agent : Base_Agent) -> None:
self.world = base_agent.world
self.ik = base_agent.inv_kinematics
# State space
self.obs = np.zeros(63, np.float32)
# Step behavior defaults
self.STEP_DUR = 8
self.STEP_Z_SPAN = 0.02
self.STEP_Z_MAX = 0.70
# IK
nao_specs = self.ik.NAO_SPECS
self.leg_length = nao_specs[1] + nao_specs[3] # upper leg height + lower leg height
feet_y_dev = nao_specs[0] * 1.12 # wider step
sample_time = self.world.robot.STEPTIME
max_ankle_z = nao_specs[5]
self.step_generator = Step_Generator(feet_y_dev, sample_time, max_ankle_z)
self.DEFAULT_ARMS = np.array([-90,-90,8,8,90,90,70,70],np.float32)
self.walk_rel_orientation = None
self.walk_rel_target = None
self.walk_distance = None
def observe(self, init=False):
r = self.world.robot
if init: # reset variables
self.act = np.zeros(16, np.float32) # memory variable
self.step_counter = 0
# index observation naive normalization
self.obs[0] = min(self.step_counter,15*8) /100 # simple counter: 0,1,2,3...
self.obs[1] = r.loc_head_z *3 # z coordinate (torso)
self.obs[2] = r.loc_head_z_vel /2 # z velocity (torso)
self.obs[3] = r.imu_torso_roll /15 # absolute torso roll in deg
self.obs[4] = r.imu_torso_pitch /15 # absolute torso pitch in deg
self.obs[5:8] = r.gyro /100 # gyroscope
self.obs[8:11] = r.acc /10 # accelerometer
self.obs[11:17] = r.frp.get('lf', np.zeros(6)) * (10,10,10,0.01,0.01,0.01) # left foot: relative point of origin (p) and force vector (f) -> (px,py,pz,fx,fy,fz)*
self.obs[17:23] = r.frp.get('rf', np.zeros(6)) * (10,10,10,0.01,0.01,0.01) # right foot: relative point of origin (p) and force vector (f) -> (px,py,pz,fx,fy,fz)*
# *if foot is not touching the ground, then (px=0,py=0,pz=0,fx=0,fy=0,fz=0)
# Joints: Forward kinematics for ankles + feet rotation + arms (pitch + roll)
| rel_lankle = self.ik.get_body_part_pos_relative_to_hip("lankle") # ankle position relative to center of both hip joints |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: legalontech-oss/simple-search-query-parser-sample
# Path: src/parser/SimpleSearchQueryLexer.py
class SimpleSearchQueryLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
ALPHABETS = 3
OR_OPERATOR = 4
AND_OPERATOR = 5
NOT_OPERATOR = 6
WHITE_SPACES = 7
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'('", "')'", "'OR'", "'AND'", "'NOT'" ]
symbolicNames = [ "<INVALID>",
"ALPHABETS", "OR_OPERATOR", "AND_OPERATOR", "NOT_OPERATOR",
"WHITE_SPACES" ]
ruleNames = [ "T__0", "T__1", "ALPHABETS", "OR_OPERATOR", "AND_OPERATOR",
"NOT_OPERATOR", "WHITE_SPACES" ]
grammarFileName = "SimpleSearchQuery.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.13.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
# Path: src/parser/SimpleSearchQueryParser.py
class SimpleSearchQueryParser ( Parser ):
grammarFileName = "SimpleSearchQuery.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'('", "')'", "<INVALID>", "'OR'", "'AND'",
"'NOT'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "ALPHABETS",
"OR_OPERATOR", "AND_OPERATOR", "NOT_OPERATOR", "WHITE_SPACES" ]
RULE_expr = 0
RULE_term = 1
RULE_factor = 2
RULE_keywords = 3
RULE_alphabets = 4
ruleNames = [ "expr", "term", "factor", "keywords", "alphabets" ]
EOF = Token.EOF
T__0=1
T__1=2
ALPHABETS=3
OR_OPERATOR=4
AND_OPERATOR=5
NOT_OPERATOR=6
WHITE_SPACES=7
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.13.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ExprContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def term(self):
return self.getTypedRuleContext(SimpleSearchQueryParser.TermContext,0)
def expr(self):
return self.getTypedRuleContext(SimpleSearchQueryParser.ExprContext,0)
def OR_OPERATOR(self):
return self.getToken(SimpleSearchQueryParser.OR_OPERATOR, 0)
def getRuleIndex(self):
return SimpleSearchQueryParser.RULE_expr
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpr" ):
return visitor.visitExpr(self)
else:
return visitor.visitChildren(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SimpleSearchQueryParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 0
self.enterRecursionRule(localctx, 0, self.RULE_expr, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 11
self.term(0)
self._ctx.stop = self._input.LT(-1)
self.state = 18
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = SimpleSearchQueryParser.ExprContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 13
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 14
self.match(SimpleSearchQueryParser.OR_OPERATOR)
self.state = 15
self.term(0)
self.state = 20
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,0,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class TermContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def factor(self):
return self.getTypedRuleContext(SimpleSearchQueryParser.FactorContext,0)
def term(self):
return self.getTypedRuleContext(SimpleSearchQueryParser.TermContext,0)
def AND_OPERATOR(self):
return self.getToken(SimpleSearchQueryParser.AND_OPERATOR, 0)
def getRuleIndex(self):
return SimpleSearchQueryParser.RULE_term
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerm" ):
return visitor.visitTerm(self)
else:
return visitor.visitChildren(self)
def term(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = SimpleSearchQueryParser.TermContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 2
self.enterRecursionRule(localctx, 2, self.RULE_term, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 22
self.factor()
self._ctx.stop = self._input.LT(-1)
self.state = 29
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
localctx = SimpleSearchQueryParser.TermContext(self, _parentctx, _parentState)
self.pushNewRecursionContext(localctx, _startState, self.RULE_term)
self.state = 24
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 25
self.match(SimpleSearchQueryParser.AND_OPERATOR)
self.state = 26
self.factor()
self.state = 31
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,1,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class FactorContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def keywords(self):
return self.getTypedRuleContext(SimpleSearchQueryParser.KeywordsContext,0)
def NOT_OPERATOR(self):
return self.getToken(SimpleSearchQueryParser.NOT_OPERATOR, 0)
def getRuleIndex(self):
return SimpleSearchQueryParser.RULE_factor
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFactor" ):
return visitor.visitFactor(self)
else:
return visitor.visitChildren(self)
def factor(self):
localctx = SimpleSearchQueryParser.FactorContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_factor)
try:
self.state = 35
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [1, 3]:
self.enterOuterAlt(localctx, 1)
self.state = 32
self.keywords()
pass
elif token in [6]:
self.enterOuterAlt(localctx, 2)
self.state = 33
self.match(SimpleSearchQueryParser.NOT_OPERATOR)
self.state = 34
self.keywords()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class KeywordsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(SimpleSearchQueryParser.ExprContext,0)
def alphabets(self):
return self.getTypedRuleContext(SimpleSearchQueryParser.AlphabetsContext,0)
def getRuleIndex(self):
return SimpleSearchQueryParser.RULE_keywords
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitKeywords" ):
return visitor.visitKeywords(self)
else:
return visitor.visitChildren(self)
def keywords(self):
localctx = SimpleSearchQueryParser.KeywordsContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_keywords)
try:
self.state = 42
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [1]:
self.enterOuterAlt(localctx, 1)
self.state = 37
self.match(SimpleSearchQueryParser.T__0)
self.state = 38
self.expr(0)
self.state = 39
self.match(SimpleSearchQueryParser.T__1)
pass
elif token in [3]:
self.enterOuterAlt(localctx, 2)
self.state = 41
self.alphabets()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AlphabetsContext(ParserRuleContext):
__slots__ = 'parser'
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ALPHABETS(self):
return self.getToken(SimpleSearchQueryParser.ALPHABETS, 0)
def getRuleIndex(self):
return SimpleSearchQueryParser.RULE_alphabets
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAlphabets" ):
return visitor.visitAlphabets(self)
else:
return visitor.visitChildren(self)
def alphabets(self):
localctx = SimpleSearchQueryParser.AlphabetsContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_alphabets)
try:
self.enterOuterAlt(localctx, 1)
self.state = 44
self.match(SimpleSearchQueryParser.ALPHABETS)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[0] = self.expr_sempred
self._predicates[1] = self.term_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 1)
def term_sempred(self, localctx:TermContext, predIndex:int):
if predIndex == 1:
return self.precpred(self._ctx, 1)
# Path: src/QueryBuilder.py
class QueryBuilder:
def build(self, ast: dict) -> dict:
return {"query": self.build_bool_query(ast)}
def build_bool_query(self, ast: dict) -> dict:
operator = ast.get("operator")
if operator:
match operator:
case "AND":
return {
"bool": {
"must": [
self.build_bool_query(ast["children"][0]),
self.build_bool_query(ast["children"][1]),
]
}
}
case "OR":
return {
"bool": {
"should": [
self.build_bool_query(ast["children"][0]),
self.build_bool_query(ast["children"][1]),
],
"minimum_should_match": 1,
},
}
case "NOT":
return {
"bool": {
"must_not": [
self.build_bool_query(ast["children"][0]),
],
}
}
else:
return {
"match": {
"fruit": {
"query": ast["value"],
}
}
}
# Path: src/VisitorInterp.py
class VisitorInterp(SimpleSearchQueryVisitor):
def visitExpr(self, ctx: SimpleSearchQueryParser.ExprContext):
if ctx.getChildCount() == 3:
opc = ctx.getChild(1).getText()
v1 = self.visit(ctx.getChild(0))
v2 = self.visit(ctx.getChild(2))
return {"operator": opc, "children": [v1, v2]}
else:
return self.visit(ctx.getChild(0))
def visitTerm(self, ctx: SimpleSearchQueryParser.TermContext):
if ctx.getChildCount() == 3:
opc = ctx.getChild(1).getText()
v1 = self.visit(ctx.getChild(0))
v2 = self.visit(ctx.getChild(2))
return {"operator": opc, "children": [v1, v2]}
else:
return self.visit(ctx.getChild(0))
def visitFactor(self, ctx: SimpleSearchQueryParser.FactorContext):
if ctx.getChildCount() == 2:
opc = ctx.getChild(0).getText()
v1 = self.visit(ctx.getChild(1))
return {"operator": opc, "children": [v1]}
else:
return self.visit(ctx.getChild(0))
def visitKeywords(self, ctx: SimpleSearchQueryParser.KeywordsContext):
if ctx.getChildCount() == 3:
return self.visit(ctx.getChild(1))
else:
return self.visit(ctx.getChild(0))
def visitAlphabets(self, ctx: SimpleSearchQueryParser.AlphabetsContext):
return {"value": ctx.getText()}
# Path: src/Driver2.py
import json
import sys
from antlr4 import CommonTokenStream, FileStream
from src.parser.SimpleSearchQueryLexer import SimpleSearchQueryLexer
from src.parser.SimpleSearchQueryParser import SimpleSearchQueryParser
from src.QueryBuilder import QueryBuilder
from src.VisitorInterp import VisitorInterp
def main(argv):
input_stream = FileStream(argv[1])
lexer = SimpleSearchQueryLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = SimpleSearchQueryParser(stream)
tree = parser.expr()
if parser.getNumberOfSyntaxErrors() > 0:
print("syntax errors")
else:
vinterp = VisitorInterp()
query_builder = QueryBuilder()
| ast = vinterp.visit(tree) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: thuiar/TCL-MAP
# Path: utils/functions.py
def restore_model(model, model_dir, device):
output_model_file = os.path.join(model_dir, 'pytorch_model.bin')
m = torch.load(output_model_file, map_location=device)
model.load_state_dict(m)
return model
# Path: utils/functions.py
def save_model(model, model_dir):
save_model = model.module if hasattr(model, 'module') else model
model_file = os.path.join(model_dir, 'pytorch_model.bin')
torch.save(save_model.state_dict(), model_file)
# Path: utils/functions.py
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, args, delta=1e-6):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
"""
self.patience = args.wait_patience
self.logger = logging.getLogger(args.logger_name)
self.monitor = args.eval_monitor
self.counter = 0
self.best_score = 1e8 if self.monitor == 'loss' else 1e-6
self.early_stop = False
self.delta = delta
self.best_model = None
def __call__(self, score, model):
better_flag = score <= (self.best_score - self.delta) if self.monitor == 'loss' else score >= (self.best_score + self.delta)
if better_flag:
self.counter = 0
self.best_model = copy.deepcopy(model)
self.best_score = score
else:
self.counter += 1
self.logger.info(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
# Path: data/utils.py
def get_dataloader(args, data):
train_dataloader = DataLoader(data['train'], shuffle=True, batch_size = args.train_batch_size, num_workers = args.num_workers, pin_memory = True)
dev_dataloader = DataLoader(data['dev'], batch_size = args.eval_batch_size, num_workers = args.num_workers, pin_memory = True)
test_dataloader = DataLoader(data['test'], batch_size = args.eval_batch_size, num_workers = args.num_workers, pin_memory = True)
return {
'train': train_dataloader,
'dev': dev_dataloader,
'test': test_dataloader
}
# Path: utils/metrics.py
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = float(self.sum) / self.count
# Path: utils/metrics.py
class Metrics(object):
"""
column of confusion matrix: predicted index
row of confusion matrix: target index
"""
def __init__(self, args):
self.logger = logging.getLogger(args.logger_name)
self.eval_metrics = ['acc', 'f1', 'prec', 'rec', 'weighted_f1', 'weighted_prec', 'weighted_rec']
def __call__(self, y_true, y_pred, show_results = False):
acc_score = self._acc_score(y_true, y_pred)
macro_f1, weighted_f1 = self._f1_score(y_true, y_pred)
macro_prec, weighted_prec = self._precision_score(y_true, y_pred)
macro_rec, weighted_rec = self._recall_score(y_true, y_pred)
eval_results = {
'acc': acc_score,
'f1': macro_f1,
'weighted_f1': weighted_f1,
'prec': macro_prec,
'weighted_prec': weighted_prec,
'rec': macro_rec,
'weighted_rec': weighted_rec
}
if show_results:
self._show_confusion_matrix(y_true, y_pred)
self.logger.info("***** In-domain Evaluation results *****")
for key in sorted(eval_results.keys()):
self.logger.info(" %s = %s", key, str(round(eval_results[key], 4)))
return eval_results
def _acc_score(self, y_true, y_pred):
return accuracy_score(y_true, y_pred)
def _f1_score(self, y_true, y_pred):
return f1_score(y_true, y_pred, average='macro'), f1_score(y_true, y_pred, average='weighted')
def _precision_score(self, y_true, y_pred):
return precision_score(y_true, y_pred, average='macro'), precision_score(y_true, y_pred, average='weighted')
def _recall_score(self, y_true, y_pred):
return recall_score(y_true, y_pred, average='macro'), recall_score(y_true, y_pred, average='weighted')
def _show_confusion_matrix(self, y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
self.logger.info("***** Test: Confusion Matrix *****")
self.logger.info("%s", str(cm))
# Path: methods/TCL_MAP/model.py
class TCL_MAP(nn.Module):
def __init__(self, args):
super(TCL_MAP, self).__init__()
self.model = MAP_Model.from_pretrained(args.text_backbone, cache_dir = args.cache_path, args = args)
self.cons_model = Cons_Model.from_pretrained(args.text_backbone, cache_dir = args.cache_path, args = args)
self.ctx_vectors = self._init_ctx(args)
self.ctx = nn.Parameter(self.ctx_vectors)
self.label_len = args.label_len
args.feat_size = args.text_feat_dim
args.video_feat_size = args.video_feat_dim
args.audio_feat_size = args.audio_feat_dim
def _init_ctx(self, args):
ctx = torch.empty(args.prompt_len, args.text_feat_dim, dtype=torch.float)
nn.init.trunc_normal_(ctx)
return ctx
def forward(self, text_feats, video_feats, audio_feats, cons_text_feats, condition_idx):
video_feats = video_feats.float()
audio_feats = audio_feats.float()
# process normal sample
outputs, pooled_output, condition, generated_ctx = self.model(
text = text_feats,
visual = video_feats,
acoustic = audio_feats,
condition_idx=condition_idx,
ctx=self.ctx
)
# process augmented sample
cons_input_ids, cons_input_mask, cons_segment_ids = cons_text_feats[:, 0], cons_text_feats[:, 1], cons_text_feats[:, 2]
cons_outputs = self.cons_model(
input_ids = cons_input_ids,
condition_idx=condition_idx,
ctx=generated_ctx,
token_type_ids = cons_segment_ids,
attention_mask = cons_input_mask
)
last_hidden_state = cons_outputs.last_hidden_state
cons_condition_tuple = tuple(last_hidden_state[torch.arange(last_hidden_state.shape[0]), condition_idx.view(-1) + i, :].unsqueeze(1) for i in range(self.label_len))
cons_condition = torch.cat(cons_condition_tuple, dim=1)
# return classification feature and Label/[MASK] token representation
return outputs[0], pooled_output, condition.mean(dim=1), cons_condition.mean(dim=1)
# Path: methods/TCL_MAP/loss.py
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all'):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
features = F.normalize(features, dim=2)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
# Path: methods/TCL_MAP/manager.py
import torch
import torch.nn.functional as F
import logging
import numpy as np
from torch import nn
from utils.functions import restore_model, save_model, EarlyStopping
from tqdm import trange, tqdm
from data.utils import get_dataloader
from utils.metrics import AverageMeter, Metrics
from transformers import AdamW, get_linear_schedule_with_warmup
from .model import TCL_MAP
from .loss import SupConLoss
early_stopping = EarlyStopping(args)
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
self.model.train()
loss_record = AverageMeter()
cons_loss_record = AverageMeter()
cls_loss_record = AverageMeter()
for step, batch in enumerate(tqdm(self.train_dataloader, desc="Iteration")):
text_feats = batch['text_feats'].to(self.device)
cons_text_feats = batch['cons_text_feats'].to(self.device)
condition_idx = batch['condition_idx'].to(self.device)
video_feats = batch['video_feats'].to(self.device)
audio_feats = batch['audio_feats'].to(self.device)
label_ids = batch['label_ids'].to(self.device)
with torch.set_grad_enabled(True):
logits, _, condition, cons_condition = self.model(text_feats, video_feats, audio_feats, cons_text_feats, condition_idx)
cons_feature = torch.cat((condition.unsqueeze(1), cons_condition.unsqueeze(1)), dim=1)
cons_loss = self.cons_criterion(cons_feature)
cls_loss = self.criterion(logits, label_ids)
loss = cls_loss + cons_loss
self.optimizer.zero_grad()
loss.backward()
loss_record.update(loss.item(), label_ids.size(0))
cons_loss_record.update(cons_loss.item(), label_ids.size(0))
cls_loss_record.update(cls_loss.item(), label_ids.size(0))
if args.grad_clip != -1.0:
nn.utils.clip_grad_value_([param for param in self.model.parameters() if param.requires_grad], args.grad_clip)
self.optimizer.step()
self.scheduler.step()
outputs = self._get_outputs(args, self.eval_dataloader)
eval_score = outputs[args.eval_monitor]
eval_results = {
'train_loss': round(loss_record.avg, 4),
'cons_loss': round(cons_loss_record.avg, 4),
'cls_loss': round(cls_loss_record.avg, 4),
'eval_score': round(eval_score, 4),
'best_eval_score': round(early_stopping.best_score, 4),
}
self.logger.info("***** Epoch: %s: Eval results *****", str(epoch + 1))
for key in eval_results.keys():
self.logger.info(" %s = %s", key, str(eval_results[key]))
early_stopping(eval_score, self.model)
if early_stopping.early_stop:
self.logger.info(f'EarlyStopping at epoch {epoch + 1}')
break
self.best_eval_score = early_stopping.best_score
self.model = early_stopping.best_model
if args.save_model:
self.logger.info('Trained models are saved in %s', args.model_output_path)
save_model(self.model, args.model_output_path)
def _get_outputs(self, args, dataloader, show_results = False):
self.model.eval()
total_labels = torch.empty(0,dtype=torch.long).to(self.device)
total_preds = torch.empty(0,dtype=torch.long).to(self.device)
total_logits = torch.empty((0, args.num_labels)).to(self.device)
total_features = torch.empty((0, args.feat_size)).to(self.device)
for batch in tqdm(dataloader, desc="Iteration"):
text_feats = batch['text_feats'].to(self.device)
cons_text_feats = batch['cons_text_feats'].to(self.device)
condition_idx = batch['condition_idx'].to(self.device)
video_feats = batch['video_feats'].to(self.device)
audio_feats = batch['audio_feats'].to(self.device)
label_ids = batch['label_ids'].to(self.device)
with torch.set_grad_enabled(False):
logits, features, condition, cons_condition = self.model(text_feats, video_feats, audio_feats, cons_text_feats, condition_idx)
total_logits = torch.cat((total_logits, logits))
total_labels = torch.cat((total_labels, label_ids))
total_features = torch.cat((total_features, features))
total_probs = F.softmax(total_logits.detach(), dim=1)
total_maxprobs, total_preds = total_probs.max(dim = 1)
y_logit = total_logits.cpu().numpy()
y_pred = total_preds.cpu().numpy()
y_true = total_labels.cpu().numpy()
y_prob = total_maxprobs.cpu().numpy()
y_feat = total_features.cpu().numpy()
outputs = self.metrics(y_true, y_pred, show_results = show_results)
if args.save_pred and show_results:
np.save('y_true_' + str(args.seed) + '.npy', y_true)
np.save('y_pred_' + str(args.seed) + '.npy', y_pred)
outputs.update(
{
'y_prob': y_prob,
'y_logit': y_logit,
'y_true': y_true,
'y_pred': y_pred,
'y_feat': y_feat
}
)
| return outputs |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CoolPointerException/Amigo
# Path: gui/tab_projects.py
class ProjectsTab:
def __init__(self, root, frame):
self.frame = frame
self.root = root
# Select Directory
ttk.Label(frame, text="Project Directory:", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))
self.select_directory_button = ttk.Button(frame, text="Select Directory",
command=self.select_directory)
self.select_directory_button.pack(padx=10, pady=10)
# Project Name
ttk.Label(frame, text="Project Name:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
self.project_name_entry = ttk.Entry(frame)
self.project_name_entry.pack(fill=tk.X, padx=10, pady=10)
# Ignore List
ttk.Label(frame, text="Files to ignore:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
# Frame for Listbox and scrollbars
self.ignore_listbox_frame = tk.Frame(frame)
self.ignore_listbox_frame.pack(fill=tk.X, padx=10, pady=10)
# Vertical scrollbar
self.ignore_vscrollbar = tk.Scrollbar(self.ignore_listbox_frame, orient='vertical')
self.ignore_vscrollbar.pack(side='right', fill='y')
# Ignore Listbox
self.ignore_listbox = tk.Listbox(self.ignore_listbox_frame, height=5,
yscrollcommand=self.ignore_vscrollbar.set)
self.ignore_listbox.pack(side='left', fill='both', expand=True)
# Configure scrollbars
self.ignore_vscrollbar.config(command=self.ignore_listbox.yview)
# Frame for buttons
self.buttons_frame = tk.Frame(frame)
self.buttons_frame.pack(fill=tk.X, padx=10, pady=10)
# Select Files Button
self.select_files_button = ttk.Button(self.buttons_frame, text="Select", command=self.select_files)
self.select_files_button.pack(side='left', padx=5, pady=5)
ttk.Label(self.buttons_frame, text="Select which files should be ignored while indexing")\
.pack(side='left', padx=10, pady=2)
# Spacer frame to create gap between buttons
self.spacer_frame = tk.Frame(self.buttons_frame, width=40)
self.spacer_frame.pack(side='left', padx=5, pady=5)
# Delete Selected File Button
self.delete_file_button = ttk.Button(self.buttons_frame, text="Delete", command=self.delete_selected_file)
self.delete_file_button.pack(side='left', padx=5, pady=5)
ttk.Label(self.buttons_frame, text="Remove selected file from the list").pack(side='left', padx=10, pady=2)
# Ignore Directory List
ttk.Label(frame, text="Directories to ignore:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
# Frame for Listbox and scrollbars
self.ignore_directory_listbox_frame = tk.Frame(frame)
self.ignore_directory_listbox_frame.pack(fill=tk.X, padx=10, pady=10)
# Vertical scrollbar
self.ignore_directory_vscrollbar = tk.Scrollbar(self.ignore_directory_listbox_frame, orient='vertical')
self.ignore_directory_vscrollbar.pack(side='right', fill='y')
# Ignore Listbox
self.ignore_directory_listbox = tk.Listbox(self.ignore_directory_listbox_frame, height=5,
yscrollcommand=self.ignore_directory_vscrollbar.set)
self.ignore_directory_listbox.pack(side='left', fill='both', expand=True)
# Configure scrollbars
self.ignore_directory_vscrollbar.config(command=self.ignore_directory_listbox.yview)
# Frame for buttons
self.buttons_directory_frame = tk.Frame(frame)
self.buttons_directory_frame.pack(fill=tk.X, padx=10, pady=10)
# Select Files Button
self.select_directories_button = ttk.Button(self.buttons_directory_frame, text="Select",
command=self.select_ignored_directory)
self.select_directories_button.pack(side='left', padx=5, pady=5)
ttk.Label(self.buttons_directory_frame, text="Select which directories should be ignored while indexing") \
.pack(side='left', padx=10, pady=2)
# Spacer frame to create gap between buttons
self.spacer_directories_frame = tk.Frame(self.buttons_directory_frame, width=40)
self.spacer_directories_frame.pack(side='left', padx=5, pady=5)
# Delete Selected File Button
self.delete_directory_button = ttk.Button(self.buttons_directory_frame, text="Delete",
command=self.delete_selected_directory)
self.delete_directory_button.pack(side='left', padx=5, pady=5)
ttk.Label(self.buttons_directory_frame, text="Remove selected directory from the list").pack(side='left', padx=10, pady=2)
# Add new Project Button
self.execute_action_button = ttk.Button(frame, text="Create Index",
command=self.add_new_project, style='W.TButton')
self.execute_action_button.pack(padx=10, pady=10)
self.generating_label = ttk.Label(frame, text="")
self.generating_label.pack(padx=10, pady=10)
# Project List Area
ttk.Label(frame, text="Reindex Project:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
ttk.Label(frame, text="In case there is a .git folder in your root directory, you have an option to reindex "
"only the files that were added or modified since the last indexing.", wraplength=880)\
.pack(fill=tk.X, padx=10, pady=2)
self.reindex_frame = tk.Frame(frame)
self.reindex_frame.pack(fill=tk.X)
self.reindex_project = ttk.Combobox(self.reindex_frame)
self.reindex_project.pack(side='left', padx=10, pady=10, fill=tk.X, expand=True)
# Add new Project Button
self.reindex_project_button = ttk.Button(self.reindex_frame, text="Reindex", command=self.reindex_project_action)
self.reindex_project_button.pack(side='left', padx=10, pady=10)
# Logs
ttk.Label(frame, text="Logs:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
# Frame for Logs
self.logs_frame = tk.Frame(frame)
self.logs_frame.pack(fill=tk.X, padx=10, pady=10)
# Vertical scrollbar
self.logs_vscrollbar = tk.Scrollbar(self.logs_frame, orient='vertical')
self.logs_vscrollbar.pack(side='right', fill='y')
# Logs Listbox
self.logs_listbox = tk.Listbox(self.logs_frame, height=8,
yscrollcommand=self.logs_vscrollbar.set)
self.logs_listbox.pack(side='left', fill='both', expand=True)
# Configure scrollbars
self.logs_vscrollbar.config(command=self.logs_listbox.yview)
def select_directory(self):
directory = filedialog.askdirectory()
if directory:
self.selected_directory = directory
self.select_directory_button.config(text=f"Directory: {directory}")
def select_files(self):
if hasattr(self, 'selected_directory'):
files = filedialog.askopenfilenames(initialdir=self.selected_directory, title="Select Files",
filetypes=(("All files", "*.*"),))
for file in files:
if file not in self.ignore_listbox.get(0, tk.END): # Avoid duplicates
self.ignore_listbox.insert(tk.END, file)
else:
messagebox.showerror("Error", "Please select a directory first.")
def delete_selected_file(self):
selected_items = self.ignore_listbox.curselection()
# Must delete from the end to avoid shifting of the indices
for i in reversed(selected_items):
self.ignore_listbox.delete(i)
def select_ignored_directory(self):
if hasattr(self, 'selected_directory'):
directory = filedialog.askdirectory(initialdir=self.selected_directory, title="Select Directory")
if directory:
if directory not in self.ignore_directory_listbox.get(0, tk.END):
self.ignore_directory_listbox.insert(tk.END, directory)
def delete_selected_directory(self):
selected_items = self.ignore_directory_listbox.curselection()
# Must delete from the end to avoid shifting of the indices
for i in reversed(selected_items):
self.ignore_directory_listbox.delete(i)
def reindex_project_action(self):
is_valid = validate(self.root, [
Properties.REINDEX_PROJECT,
Properties.THREADS,
])
if not is_valid:
return
reindex_project = self.reindex_project.get()
# parse project name and project directory
reindex_project = reindex_project.split(" | ")
project_name = reindex_project[0]
project_dir = reindex_project[1]
f = open(project_name + "/ignored_files.txt", "r")
ignored_files = f.read().split("\n")
f = open(project_name + "/ignored_directories.txt", "r")
ignored_directories = f.read().split("\n")
self.create_index_project(
project_name,
project_dir,
ignored_files,
ignored_directories,
True
)
def add_new_project(self):
is_valid = validate(self.root, [
Properties.PROJECT_NAME,
Properties.SELECTED_DIRECTORY,
Properties.THREADS,
])
if not is_valid:
return
project_name = self.project_name_entry.get()
directory = self.selected_directory
files = self.ignore_listbox.get(0, tk.END)
directories = self.ignore_directory_listbox.get(0, tk.END)
self.create_index_project(
project_name,
directory,
files,
directories,
)
def create_index_project(
self,
project_name,
directory,
files,
directories,
is_reindex=False
):
if not is_api_type_set(self.root):
return
api_type = self.root.settings_tab.api_type.get()
threads = int(self.root.settings_tab.threads.get())
init_llama_index(self.root, api_type)
diff = None
if is_reindex:
diff = git_diff(project_name, self.root)
self.generating_label.config(text="Generating index, please wait...")
try:
thread = threading.Thread(target=add_project, args=(
self.root,
directory,
project_name,
files,
directories,
is_reindex,
diff,
threads
))
thread.start()
except Exception as e:
messagebox.showerror("Error", f"Error while generating index!")
self.generating_label.config(text="Finished!")
self.logs_listbox.insert(tk.END, f"Error: {e}")
# Path: gui/settings.py
def load_settings(self):
try:
with open(self.settings_file, 'r') as f:
settings = json.load(f)
if settings.get('api_version'):
self.settings_tab.api_version_entry.delete(0, tk.END)
self.settings_tab.api_version_entry.insert(0, settings.get('api_version'))
if settings.get('api_type'):
self.settings_tab.api_type.set(settings.get('api_type'))
self.settings_tab.show_only_relevant_settings()
if settings.get('api_key'):
self.settings_tab.api_key_entry.delete(0, tk.END)
self.settings_tab.api_key_entry.insert(0, settings.get('api_key'))
if settings.get('api_host'):
self.settings_tab.api_host_entry.delete(0, tk.END)
self.settings_tab.api_host_entry.insert(0, settings.get('api_host'))
if settings.get('gpt_model'):
self.settings_tab.gpt_model.delete(0, tk.END)
self.settings_tab.gpt_model.insert(0, settings.get('gpt_model'))
if settings.get('gpt_deployment'):
self.settings_tab.gpt_deployment.delete(0, tk.END)
self.settings_tab.gpt_deployment.insert(0, settings.get('gpt_deployment'))
if settings.get('embeddings_model'):
self.settings_tab.embeddings_model_entry.delete(0, tk.END)
self.settings_tab.embeddings_model_entry.insert(0, settings.get('embeddings_model'))
if settings.get('embeddings_deployment'):
self.settings_tab.embeddings_deployment_entry.delete(0, tk.END)
self.settings_tab.embeddings_deployment_entry.insert(0, settings.get('embeddings_deployment'))
if settings.get('prompt'):
self.settings_tab.prompt_entry.delete('1.0', tk.END)
self.settings_tab.prompt_entry.insert(INSERT, settings.get('prompt', 'This is a prompt'))
if settings.get('projects'):
projects = settings.get('projects')
for project in projects:
values = list(self.task_tab.selected_project["values"])
self.task_tab.selected_project["values"] = values + [project]
self.projects_tab.reindex_project["values"] = values + [project]
if settings.get('selected_project'):
self.task_tab.selected_project.set(settings.get('selected_project'))
self.projects_tab.reindex_project.set(settings.get('selected_project'))
if settings.get('max_tokens'):
self.settings_tab.max_tokens.delete(0, tk.END)
self.settings_tab.max_tokens.insert(INSERT, settings.get('max_tokens'))
if settings.get('threads'):
self.settings_tab.threads.delete(0, tk.END)
self.settings_tab.threads.insert(INSERT, settings.get('threads'))
except FileNotFoundError:
print("Settings file not found. Using default values.")
# Path: gui/settings.py
def save_settings(self):
settings = {
'api_version': self.settings_tab.api_version_entry.get(),
'api_type': self.settings_tab.api_type.get(),
'api_key': self.settings_tab.api_key_entry.get(),
'api_host': self.settings_tab.api_host_entry.get(),
'gpt_model': self.settings_tab.gpt_model.get(),
'gpt_deployment': self.settings_tab.gpt_deployment.get(),
'embeddings_model': self.settings_tab.embeddings_model_entry.get(),
'embeddings_deployment': self.settings_tab.embeddings_deployment_entry.get(),
'prompt': self.settings_tab.prompt_entry.get("1.0", tk.END).strip(),
'projects': list(self.task_tab.selected_project["values"]),
'selected_project': self.task_tab.selected_project.get(),
'max_tokens': self.settings_tab.max_tokens.get(),
'threads': self.settings_tab.threads.get()
}
with open(self.settings_file, 'w') as f:
json.dump(settings, f, indent=4)
# Path: gui/tab_settings.py
class SettingsTab:
def __init__(self, root, frame):
self.frame = frame
self.root = root
# API Type
ttk.Label(frame, text="API Type:", style='W.Label').grid(column=0, row=0, sticky='E', padx=8)
self.api_type = ttk.Combobox(frame, state="readonly", values=["azure", "openai", "gemini"])
self.api_type.grid(column=1, row=0, padx=24, pady=5, sticky='EW')
self.api_type.bind('<<ComboboxSelected>>', self.api_type_changed)
# API Key
ttk.Label(frame, text="API Key:", style='W.Label').grid(column=0, row=1, sticky='E', padx=8)
self.api_key_entry = ttk.Entry(frame)
self.api_key_entry.grid(column=1, row=1, padx=24, pady=5, sticky='EW')
# API Host URL
self.api_host_label = ttk.Label(frame, text="API Host URL:", style='W.Label')
self.api_host_label.grid(column=0, row=2, sticky='E', padx=8)
self.api_host_entry = ttk.Entry(frame)
self.api_host_entry.grid(column=1, row=2, padx=24, pady=5, sticky='EW')
# API Version
self.api_version_label = ttk.Label(frame, text="API Version:", style='W.Label')
self.api_version_label.grid(column=0, row=3, sticky='E', padx=8)
self.api_version_entry = ttk.Entry(frame)
self.api_version_entry.grid(column=1, row=3, padx=24, pady=5, sticky='EW')
# GPT Model Name
ttk.Label(frame, text="GPT Model Name:", style='W.Label').grid(column=0, row=4, sticky='E', padx=8)
self.gpt_model = ttk.Entry(frame)
self.gpt_model.grid(column=1, row=4, padx=24, pady=5, sticky='EW')
# GPT Deployment Name
self.gpt_deployment_label = ttk.Label(frame, text="GPT Deployment Name:", style='W.Label')
self.gpt_deployment_label.grid(column=0, row=5, sticky='E', padx=8)
self.gpt_deployment = ttk.Entry(frame)
self.gpt_deployment.grid(column=1, row=5, padx=24, pady=5, sticky='EW')
# Embeddings Model Name
ttk.Label(frame, text="Embed Model Name:", style='W.Label').grid(column=0, row=6, sticky='E', padx=8)
self.embeddings_model_entry = ttk.Entry(frame)
self.embeddings_model_entry.grid(column=1, row=6, padx=24, pady=5, sticky='EW')
# Embeddings Deployment Name
self.embeddings_deployment_label = ttk.Label(frame, text="Embed Depl Name:", style='W.Label')
self.embeddings_deployment_label.grid(column=0, row=7, sticky='E', padx=8)
self.embeddings_deployment_entry = ttk.Entry(frame)
self.embeddings_deployment_entry.grid(column=1, row=7, padx=24, pady=5, sticky='EW')
# Prompt
ttk.Label(frame, text="Prompt:", style='W.Label').grid(column=0, row=8, sticky='E', padx=8)
self.prompt_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)
self.prompt_entry.configure(state='normal')
self.prompt_entry.insert(INSERT, """You are a programmer who tries to complete tasks that your analytic team provides. They give you a task description and code they think might be relevant. Prepare all the necessary changes that are required for the task to be finished. If the team did not provide enough information, point out what information you still need. If you have enough information then your solutions for tasks should follow these guidelines without any additional explanation:
- include only the code that needs to be modified/added,
- print line numbers of the code snippet if a file was modified (use the standard with + prefix for added and - for deleted lines)
- include in the changes also all the required imports
- follow the coding style and conventions from the existing files
- do not explain what the code does
- explain only if there are some things that needs to be checked, before applying changes (vulnerabilities, ambiguity ... )
- if you think there are other files that need to be modified, but were not included in the question, point that out (use the project structure as reference)
- do not create any comments in code snippets """)
self.prompt_entry.grid(column=1, row=8, padx=24, pady=5)
# Optional Settings
ttk.Label(frame, text="Max input token length:", style='W.Label').grid(column=0, row=9, sticky='E', padx=8)
self.max_tokens = ttk.Entry(frame)
self.max_tokens.grid(column=1, row=9, padx=24, pady=5, sticky='EW')
self.max_tokens.insert(INSERT, "64000")
ttk.Label(frame, text="Number of threads:", style='W.Label').grid(column=0, row=10, sticky='E', padx=8)
self.threads = ttk.Entry(frame)
self.threads.insert(INSERT, "1")
self.threads.grid(column=1, row=10, padx=24, pady=5, sticky='EW')
def api_type_changed(self, event):
self.root.isLlamaInitialized = False
self.show_only_relevant_settings()
def show_only_relevant_settings(self):
if self.api_type.get() == "openai" or self.api_type.get() == "gemini":
self.api_host_label.grid_remove()
self.api_host_entry.grid_remove()
self.api_version_label.grid_remove()
self.api_version_entry.grid_remove()
self.gpt_deployment_label.grid_remove()
self.gpt_deployment.grid_remove()
self.embeddings_deployment_label.grid_remove()
self.embeddings_deployment_entry.grid_remove()
if self.api_type.get() == "azure":
self.api_host_label.grid(column=0, row=2, sticky='E', padx=8)
self.api_host_entry.grid(column=1, row=2, padx=24, pady=5, sticky='EW')
self.api_version_label.grid(column=0, row=3, sticky='E', padx=8)
self.api_version_entry.grid(column=1, row=3, padx=24, pady=5, sticky='EW')
self.gpt_deployment_label.grid(column=0, row=5, sticky='E', padx=8)
self.gpt_deployment.grid(column=1, row=5, padx=24, pady=5, sticky='EW')
self.embeddings_deployment_label.grid(column=0, row=7, sticky='E', padx=8)
self.embeddings_deployment_entry.grid(column=1, row=7, padx=24, pady=5, sticky='EW')
# Path: gui/tab_task.py
class TaskTab:
def __init__(self, root, frame):
self.frame = frame
self.root = root
# Task Requirements
ttk.Label(frame, text="Task Requirements:", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))
self.task_requirements_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)
self.task_requirements_entry.configure(state='normal')
self.task_requirements_entry.pack(fill=tk.X, padx=10, pady=10)
# Select project
ttk.Label(frame, text="Selected Project:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
self.selected_project = ttk.Combobox(frame)
self.selected_project.pack(fill=tk.X, padx=10, pady=10)
# Run Generation Button
self.run_generation_button = ttk.Button(frame, text="Generate", command=self.generate_answer)
self.run_generation_button.pack(padx=10, pady=10)
# Clear chat Button
self.run_generation_button = ttk.Button(frame, text="Clear chat", command=self.clear_chat)
self.run_generation_button.pack(padx=10, pady=10)
# Generation Response Field
self.generation_response_frame = ttk.Frame(self.frame)
self.generation_response = HtmlFrame(self.generation_response_frame)
# Loading screen
self.loading_frame = ttk.Frame(self.frame)
self.loader = HtmlFrame(self.loading_frame)
self.load_loading_page()
def clear_chat(self):
self.root.messages = []
self.load_web_page()
def generate_answer(self):
is_valid = validate(self.root, [
Properties.TASK_REQUIREMENTS,
Properties.SELECTED_PROJECT,
Properties.PROMPT,
Properties.MAX_TOKENS,
Properties.API_TYPE,
])
if not is_valid:
return
api_type = self.root.settings_tab.api_type.get()
task_requirements = self.task_requirements_entry.get("1.0", tk.END)
project_name = self.selected_project.get()
prompt = self.root.settings_tab.prompt_entry.get("1.0", tk.END)
max_tokens = self.root.settings_tab.max_tokens.get()
init_llama_index(self.root, api_type)
self.generation_response_frame.place_forget()
self.loading_frame.place(y=380, x=0, relwidth=1, height=900)
if not self.root.messages:
self.root.messages.append(
ChatMessage(role="system", content=prompt)
)
try:
thread = threading.Thread(target=question, args=(
project_name,
task_requirements,
prompt,
max_tokens,
self.root
))
thread.start()
except Exception as e:
messagebox.showerror("Error", "Error occurred while generating response: \n" + str(e))
self.root.messages = []
def load_web_page(self):
tempfile.NamedTemporaryFile(mode='w')
f = open("temp.html", 'w')
f.write("<html><body style='background: rgb(28, 28, 28)'>")
for message in self.root.messages:
content = message.content.replace("\n", "<br>")
if message.role == "system":
continue
if message.role == "user":
f.write(f"<p style='color: rgb(255, 255, 255); font-size: 12px; font-family: Arial; margin-right: "
f"40px; border-radius: 0 12px 12px 0; background: "
f"rgb(117, 92, 129); padding: 10px;'>{content}</p>")
if message.role == "assistant":
f.write(f"<p style='color: rgb(255, 255, 255); font-size: 12px; font-family: Arial; margin-left: "
f"40px; border-radius: 12px 0 0 12px; background: rgb(117, 92, 129); padding: 10px'>{content}</p>")
f.write("</body></html>")
f.flush()
f.close()
self.generation_response.load_file(os.path.abspath(f.name), force=True)
self.generation_response.pack()
return True
def load_loading_page(self):
temp_folder = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
script_dir = os.path.dirname(os.path.abspath(__file__))
if temp_folder == script_dir:
# Running as a Python script.
loading_animation = os.path.abspath("assets/loading.png").replace('\\', '/')
else:
# Running as a PyInstaller bundle.
loading_animation = os.path.join(temp_folder, 'loading.png').replace('\\', '/')
tempfile.NamedTemporaryFile(mode='r')
tempfile.NamedTemporaryFile(mode='w')
f = open("loading.html", 'w')
f.write(f"<html><body style='background-color: rgb(28, 28, 28)'><img src='file:///{loading_animation}' style"
f"='width: 300px; height: 300px; margin: auto; display: block; padding-top: 100px'></body></html>")
f.flush()
f.close()
self.loader.load_file(os.path.abspath("loading.html"), force=True)
self.loader.pack()
return True
# Path: main.py
import tkinter as tk
import sv_ttk
from tkinter import ttk
from tkinter.ttk import Style
from gui.tab_projects import ProjectsTab
from gui.settings import load_settings, save_settings
from gui.tab_settings import SettingsTab
from gui.tab_task import TaskTab
class Application(tk.Tk):
def __init__(self):
super().__init__()
self.title("Amigo")
self.geometry("900x1100")
self.style = Style()
self.isLlamaInitialized = False
sv_ttk.set_theme("dark")
self.messages = []
self.style.configure('W.TButton', font=('calibri', 18, 'bold', 'underline'), borderwidth='4')
self.style.configure('W.Label', font=('calibri', 13, 'bold'))
# Create the tab control
self.tab_control = ttk.Notebook(self)
# Create tabs
self.settings_frame = ttk.Frame(self.tab_control)
self.task_frame = ttk.Frame(self.tab_control)
self.projects_frame = ttk.Frame(self.tab_control)
# Add tabs to notebook
self.tab_control.add(self.task_frame, text='Task')
self.tab_control.add(self.projects_frame, text='Projects')
self.tab_control.add(self.settings_frame, text='Settings')
# Init UI
self.settings_tab = SettingsTab(self, self.settings_frame)
self.task_tab = TaskTab(self, self.task_frame)
self.projects_tab = ProjectsTab(self, self.projects_frame)
# Init settings
self.settings_file = 'app_settings.json'
load_settings(self)
self.tab_control.pack(expand=1, fill="both")
def on_close(self):
save_settings(self)
self.destroy()
def main(self):
self.protocol("WM_DELETE_WINDOW", self.on_close)
self.mainloop()
| if __name__ == "__main__": |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: quocanh34/magic-animate-modified
# Path: magicanimate/models/attention.py
class Transformer3DModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
num_layers: int = 1,
dropout: float = 0.0,
norm_num_groups: int = 32,
cross_attention_dim: Optional[int] = None,
attention_bias: bool = False,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
):
super().__init__()
self.use_linear_projection = use_linear_projection
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
inner_dim = num_attention_heads * attention_head_dim
# Define input layers
self.in_channels = in_channels
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
if use_linear_projection:
self.proj_in = nn.Linear(in_channels, inner_dim)
else:
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
# Define transformers blocks
self.transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
inner_dim,
num_attention_heads,
attention_head_dim,
dropout=dropout,
cross_attention_dim=cross_attention_dim,
activation_fn=activation_fn,
num_embeds_ada_norm=num_embeds_ada_norm,
attention_bias=attention_bias,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
for d in range(num_layers)
]
)
# 4. Define output layers
if use_linear_projection:
self.proj_out = nn.Linear(in_channels, inner_dim)
else:
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):
# Input
assert hidden_states.dim() == 5, f"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}."
video_length = hidden_states.shape[2]
hidden_states = rearrange(hidden_states, "b c f h w -> (b f) c h w")
# JH: need not repeat when a list of prompts are given
if encoder_hidden_states.shape[0] != hidden_states.shape[0]:
encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)
batch, channel, height, weight = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
if not self.use_linear_projection:
hidden_states = self.proj_in(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
else:
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
hidden_states = self.proj_in(hidden_states)
# Blocks
for block in self.transformer_blocks:
hidden_states = block(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
timestep=timestep,
video_length=video_length
)
# Output
if not self.use_linear_projection:
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
hidden_states = self.proj_out(hidden_states)
else:
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
)
output = hidden_states + residual
output = rearrange(output, "(b f) c h w -> b c f h w", f=video_length)
if not return_dict:
return (output,)
return Transformer3DModelOutput(sample=output)
# Path: magicanimate/models/resnet.py
class Downsample3D(nn.Module):
def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.padding = padding
stride = 2
self.name = name
if use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
else:
raise NotImplementedError
def forward(self, hidden_states):
assert hidden_states.shape[1] == self.channels
if self.use_conv and self.padding == 0:
raise NotImplementedError
assert hidden_states.shape[1] == self.channels
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: magicanimate/models/resnet.py
class ResnetBlock3D(nn.Module):
def __init__(
self,
*,
in_channels,
out_channels=None,
conv_shortcut=False,
dropout=0.0,
temb_channels=512,
groups=32,
groups_out=None,
pre_norm=True,
eps=1e-6,
non_linearity="swish",
time_embedding_norm="default",
output_scale_factor=1.0,
use_in_shortcut=None,
):
super().__init__()
self.pre_norm = pre_norm
self.pre_norm = True
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.time_embedding_norm = time_embedding_norm
self.output_scale_factor = output_scale_factor
if groups_out is None:
groups_out = groups
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
if temb_channels is not None:
if self.time_embedding_norm == "default":
time_emb_proj_out_channels = out_channels
elif self.time_embedding_norm == "scale_shift":
time_emb_proj_out_channels = out_channels * 2
else:
raise ValueError(f"unknown time_embedding_norm : {self.time_embedding_norm} ")
self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)
else:
self.time_emb_proj = None
self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
if non_linearity == "swish":
self.nonlinearity = lambda x: F.silu(x)
elif non_linearity == "mish":
self.nonlinearity = Mish()
elif non_linearity == "silu":
self.nonlinearity = nn.SiLU()
self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut
self.conv_shortcut = None
if self.use_in_shortcut:
self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, input_tensor, temb):
hidden_states = input_tensor
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.conv1(hidden_states)
if temb is not None:
temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]
if temb is not None and self.time_embedding_norm == "default":
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
if temb is not None and self.time_embedding_norm == "scale_shift":
scale, shift = torch.chunk(temb, 2, dim=1)
hidden_states = hidden_states * (1 + scale) + shift
hidden_states = self.nonlinearity(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
input_tensor = self.conv_shortcut(input_tensor)
output_tensor = (input_tensor + hidden_states) / self.output_scale_factor
return output_tensor
# Path: magicanimate/models/resnet.py
class Upsample3D(nn.Module):
def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_conv_transpose = use_conv_transpose
self.name = name
conv = None
if use_conv_transpose:
raise NotImplementedError
elif use_conv:
self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)
def forward(self, hidden_states, output_size=None):
assert hidden_states.shape[1] == self.channels
if self.use_conv_transpose:
raise NotImplementedError
# Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
dtype = hidden_states.dtype
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(torch.float32)
# upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984
if hidden_states.shape[0] >= 64:
hidden_states = hidden_states.contiguous()
# if `output_size` is passed we force the interpolation output
# size and do not make use of `scale_factor=2`
if output_size is None:
hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode="nearest")
else:
hidden_states = F.interpolate(hidden_states, size=output_size, mode="nearest")
# If the input is bfloat16, we cast back to bfloat16
if dtype == torch.bfloat16:
hidden_states = hidden_states.to(dtype)
hidden_states = self.conv(hidden_states)
return hidden_states
# Path: magicanimate/models/motion_module.py
def get_motion_module(
in_channels,
motion_module_type: str,
motion_module_kwargs: dict
):
if motion_module_type == "Vanilla":
return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,)
else:
raise ValueError
# Path: magicanimate/models/unet_3d_blocks.py
import torch
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
get_motion_module(
in_channels=in_channels,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
) if use_motion_module else None
)
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.motion_modules = nn.ModuleList(motion_modules)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None):
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet, motion_module in zip(self.attentions, self.resnets[1:], self.motion_modules):
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
hidden_states = motion_module(hidden_states, temb, encoder_hidden_states=encoder_hidden_states) if motion_module is not None else hidden_states
hidden_states = resnet(hidden_states, temb)
return hidden_states
class CrossAttnDownBlock3D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
cross_attention_dim=1280,
output_scale_factor=1.0,
downsample_padding=1,
add_downsample=True,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
resnets = []
attentions = []
motion_modules = []
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock3D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
| pre_norm=resnet_pre_norm, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: JonatanNevo/better-iptables
# Path: iptables/enums.py
class ConnbytesDirection(str, Enum):
ORIGINAL = "original"
REPLY = "reply"
BOTH = "both"
# Path: iptables/enums.py
class ConnbytesMode(str, Enum):
BYTES = "bytes"
PACKETS = "packets"
AVGERAGE = "avgpkt"
# Path: iptables/enums.py
class ConntrackStates(str, Enum):
INVALID = "INVALID"
ESTABLISHED = "ESTABLISHED"
RELATED = "RELATED"
UNTRACKED = "UNTRACKED"
SNAT = "SNAT"
DNAT = "DNAT"
NEW = "NEW"
# Path: iptables/enums.py
class ConntrackStatus(str, Enum):
NONE = "NONE"
EXPECTED = "EXPECTED"
SEEN_REPLY = "SEEN_REPLY"
ASSURED = "ASSURED"
CONFIRMED = "CONFIRMED"
# Path: iptables/enums.py
class ConntrackDirection(str, Enum):
ORIGINAL = "original"
REPLY = "reply"
# Path: iptables/enums.py
class LimitUnits(str, Enum):
SECOND = "second"
MINUTE = "minute"
HOUR = "hour"
DAY = "day"
# Path: iptables/enums.py
class State(str, Enum):
INVALID = "INVALID"
ESTABLISHED = "ESTABLISHED"
NEW = "NEW"
RELATED = "RELATED"
UNTRACKED = "UNTRACKED"
# Path: iptables/enums.py
class TcpFlags(str, Enum):
SYN = "SYN"
ACK = "ACK"
FIN = "FIN"
RST = "RST"
URG = "URG"
PSH = "PSH"
ALL = "ALL"
NONE = "NONE"
# Path: iptables/enums.py
class Targets(str, Enum):
ACCEPT = "ACCEPT"
DROP = "DROP"
RETURN = "RETURN"
AUDIT = "AUDIT"
CHECKSUM = "CHECKSUM"
CLASSIFY = "CLASSIFY"
CLUSTERIP = "CLUSTERIP"
CONNMARK = "CONNMARK"
CONNSECMARK = "CONNSECMARK"
CT = "CT"
DNAT = "DNAT"
DNPT = "DNPT"
DSCP = "DSCP"
ECN = "ECN"
HL = "HL"
HMARK = "HMARK"
IDLETIMER = "IDLETIMER"
LED = "LED"
LOG = "LOG"
MARK = "MARK"
MASQUERADE = "MASQUERADE"
NETMAP = "NETMAP"
NFLOG = "NFLOG"
NFQUEUE = "NFQUEUE"
NOTRACK = "NOTRACK"
RATEEST = "RATEEST"
REDIRECT = "REDIRECT"
REJECT = "REJECT"
SECMARK = "SECMARK"
SET = "SET"
SNAT = "SNAT"
SNPT = "SNPT"
SYNPROXY = "SYNPROXY"
TCPMSS = "TCPMSS"
TCPOPTSTRIP = "TCPOPTSTRIP"
TEE = "TEE"
TOS = "TOS"
TPROXY = "TPROXY"
TRACE = "TRACE"
TTL = "TTL"
ULOG = "ULOG"
# Path: iptables/enums.py
class Protocols(str, Enum):
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
ALL = "all"
# Path: iptables/enums.py
class Tables(str, Enum):
FILTER = "filter"
NAT = "nat"
MANGLE = "mangle"
RAW = "raw"
SECURITY = "security"
# Path: iptables/enums.py
class Chains(str, Enum):
INPUT = "INPUT"
FORWARD = "FORWARD"
OUTPUT = "OUTPUT"
PREROUTING = "PREROUTING"
POSTROUTING = "POSTROUTING"
# Path: iptables/enums.py
class Actions(str, Enum):
APPEND = "-A"
DELETE = "-D"
INSERT = "-I"
REPLACE = "-R"
CHECK = "-C"
LIST = "-L"
FLUSH = "-F"
ZERO = "-Z"
NEW_CHAIN = "-N"
DELETE_CHAIN = "-X"
RENAME_CHAIN = "-E"
POLICY = "-P"
LIST_RULES = "-S"
# Path: iptables/enums.py
class RejectType(str, Enum):
ICMP_NET_UNREACHABLE = "icmp-net-unreachable"
ICMP_HOST_UNREACHABLE = "icmp-host-unreachable"
ICMP_PORT_UNREACHABLE = "icmp-port-unreachable"
ICMP_PROT_UNREACHABLE = "icmp-proto-unreachable"
ICMP_NET_PROHIBITED = "icmp-net-prohibited"
ICMP_HOST_PROHIBITED = "icmp-host-prohibited"
ICMP_ADMIN_PROHIBITED = "icmp-admin-prohibited"
TCP_RESET = "tcp-reset"
ICMP6_NO_ROUTE = "icmp6-no-route"
NO_ROUTE = "no-route"
ICMP6_ADM_PROHIBITED = "icmp6-adm-prohibited"
ADM_PROHIBITED = "adm-prohibited"
ICMP6_ADDR_UNREACHABLE = "icmp6-addr-unreachable"
ADDR_UNREACHABLE = "addr-unreach"
ICMP6_PORT_UNREACHABLE = "icmp6-port-unreachable"
# Path: iptables/exceptions.py
class IPTablesError(Exception):
pass
# Path: iptables/exceptions.py
class IPVersionError(IPTablesError):
def __init__(self):
super().__init__("ipv4 and ipv6 cannot be both True")
# Path: iptables/exceptions.py
class ConnbytesError(IPTablesError):
def __init__(self):
super().__init__("connbytes must be in the format of 'bytes:bytes'")
# Path: iptables/exceptions.py
class ConnlimitAddrError(IPTablesError):
def __init__(self):
super().__init__("saddr and daddr cannot be both True")
# Path: iptables/exceptions.py
class MultiportSourceAndDestinationError(IPTablesError):
def __init__(self):
super().__init__("source_ports and destination_ports cannot be both True")
# Path: iptables/exceptions.py
class MultiportPortsAndOtherError(IPTablesError):
def __init__(self):
super().__init__("ports cannot be used with source_ports or destination_ports")
# Path: iptables/exceptions.py
class MultiportFormatError(IPTablesError):
def __init__(self):
super().__init__("ports must be an int or a string in format of 'port:port'")
# Path: iptables/iptables.py
import dataclasses
import re
from enum import Enum
from typing import Optional, Union, List, Tuple
from typing_extensions import Self
from iptables.enums import ConnbytesDirection, ConnbytesMode, ConntrackStates, ConntrackStatus, ConntrackDirection, \
LimitUnits, State, TcpFlags, Targets, Protocols, Tables, Chains, Actions, RejectType
from iptables.exceptions import IPTablesError, IPVersionError, ConnbytesError, ConnlimitAddrError, \
MultiportSourceAndDestinationError, MultiportPortsAndOtherError, MultiportFormatError
return self
def connmark(self, mark: int, mask: Optional[int] = None) -> Self:
if mask:
parameters = [("mark", f"{mark}/{mask}")]
else:
parameters = [("mark", mark)]
self._modules.append(Module(module="connmark", parameters=parameters))
return self
def conntrack(
self,
*,
state: Optional[List[ConntrackStates]] = None,
status: Optional[List[ConntrackStatus]] = None,
protocol: Optional[Protocols] = None,
original_source: Optional[str] = None,
original_source_port: Optional[int] = None,
original_destination: Optional[str] = None,
original_destination_port: Optional[int] = None,
reply_source: Optional[str] = None,
reply_source_port: Optional[int] = None,
reply_destination: Optional[str] = None,
reply_destination_port: Optional[int] = None,
expire: Optional[int] = None,
direction: Optional[ConntrackDirection] = None,
) -> Self:
parameters = []
if state:
parameters.append(("ctstate", ",".join(state)))
if status:
parameters.append(("ctstatus", ",".join(status)))
if protocol:
parameters.append(("ctproto", protocol))
if original_source:
parameters.append(("ctorigsrc", original_source))
if original_source_port:
parameters.append(("ctorigsrcport", original_source_port))
if original_destination:
parameters.append(("ctorigdst", original_destination))
if original_destination_port:
parameters.append(("ctorigdstport", original_destination_port))
if reply_source:
parameters.append(("ctreplsrc", reply_source))
if reply_source_port:
parameters.append(("ctreplsrcport", reply_source_port))
if reply_destination:
parameters.append(("ctrepldst", reply_destination))
if reply_destination_port:
parameters.append(("ctrepldstport", reply_destination_port))
if expire:
parameters.append(("ctexpire", expire))
if direction:
parameters.append(("ctdir", direction))
self._modules.append(Module(module="conntrack", parameters=parameters))
return self
def cpu(self, cpu: int) -> Self:
self._modules.append(Module(module="cpu", parameters=[("cpu", str(cpu))]))
return self
def limit(self, rate: int = 3, units: LimitUnits = LimitUnits.HOUR, burst: int = 5) -> Self:
self._modules.append(Module(module="limit", parameters=[("limit", f"{rate}/{units}"), ("limit-burst", burst)]))
return self
def mac(self, mac: str) -> Self:
self._modules.append(Module(module="mac", parameters=[("mac-source", mac)]))
return self
def mark(self, mark: int, mask: Optional[int] = None) -> Self:
if mask:
parameters = [("mark", f"{mark}/{mask}")]
else:
parameters = [("mark", mark)]
self._modules.append(Module(module="mark", parameters=parameters))
return self
def multiport(
self,
source_ports: Optional[List[Union[int, str]]] = None,
destination_ports: Optional[List[Union[int, str]]] = None,
ports: Optional[List[Union[int, str]]] = None
) -> Self:
if source_ports and destination_ports:
raise MultiportSourceAndDestinationError
if ports and (source_ports or destination_ports):
raise MultiportPortsAndOtherError
for port_type, port_list in [("ports", ports), ("source-ports", source_ports),
("destination-ports", destination_ports)]:
for port in port_list:
if isinstance(port, str) and not re.match("\d*:\d*", port):
raise MultiportFormatError
parameters = [(port_type, ",".join(port_list))]
self._modules.append(Module(module="multiport", parameters=parameters))
return self
def state(self, state: State) -> Self:
self._modules.append(Module(module="state", parameters=[("state", state)]))
return self
def tcp(
self,
syn: bool = False,
option: Optional[int] = None,
flags: Optional[Tuple[List[TcpFlags], List[TcpFlags]]] = None,
source_port: Optional[Union[int, str]] = None,
destination_port: Optional[Union[int, str]] = None
) -> Self:
parameters = []
if syn:
parameters.append(("syn", None))
if option:
parameters.append(("tcp-option", option))
if flags:
mask, comp = flags
mask_list = ",".join(mask)
comp_list = ",".join(comp)
| parameters.append(("tcp-flags", f"{mask_list} {comp_list}")) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: daihaojun554/biliscrapy
# Path: biliscrapy/models.py
class BiliDanmu(models.Model):
_id = models.CharField(max_length=255)
cid = models.CharField(max_length=255)
content = models.TextField()
color = models.CharField(max_length=255)
fontsize = models.IntegerField()
midHash = models.CharField(max_length=255)
mode = models.CharField(max_length=255)
progress = models.FloatField()
ctime = models.DateTimeField()
def __str__(self):
return self.content
# Path: biliscrapy/models.py
class BiliComment(models.Model):
avid = models.CharField(max_length=255)
uname = models.CharField(max_length=255)
# 最高等级就是6级
current_level = models.IntegerField()
# 用户等级
like = models.IntegerField()
# 用户性别 男 女 保密
sex = models.CharField(max_length=10)
ctime = models.DateTimeField()
message = models.TextField()
def __str__(self):
return self.message
# Path: biliscrapy/models.py
class BiliVideo(models.Model):
bvid = models.CharField(max_length=30, unique=True)
avid = models.IntegerField(unique=True)
oid = models.IntegerField(unique=True)
title = models.CharField(max_length=100)
author = models.CharField(max_length=100)
tag = models.CharField(max_length=100)
pubdate = models.DateField()
pic = models.URLField()
desc = models.TextField()
danmu_fetched = models.BooleanField(default=False)
comment_fetched = models.BooleanField(default=False)
danmaku_count = models.IntegerField(default=0)
comment_count = models.IntegerField(default=0)
def __str__(self):
return self.title
# Path: biliscrapy/models.py
class Card(models.Model):
card_code = models.CharField(max_length=100, unique=True)
expiration_date = models.DateTimeField()
last_used_address = models.GenericIPAddressField(null=True, blank=True)
is_used = models.BooleanField(default=False)
# action = models.CharField(max_length=100)
# is_active = models.BooleanField(default=True)
# is_expired = models.BooleanField(default=False)
# count = models.IntegerField(default=0)
def __str__(self):
return self.card_code
# Path: biliscrapy/network/bilibili_comment.py
class Comments:
def __init__(self):
script_dir = os.path.dirname(os.path.abspath(__file__))
# 构建文件路径
file_path = os.path.join(script_dir, 'bilibili_cookies.json')
if not file_path:
self.cookies = {}
with open(file_path, 'r', encoding='utf-8') as file:
self.cookies_data = json.load(file)
self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}
self.utils = bili_utils()
self.logger = logging.getLogger('log')
def extract_comments(self, replies):
extracted_comments = []
if not replies:
return extracted_comments
for reply in replies:
extracted_comment = {
'uname': reply['member']['uname'],
'current_level': reply['member']['level_info']['current_level'],
'like': reply['like'],
'sex': reply['member']['sex'],
'ctime': reply['ctime'],
'message': reply['content']['message']
}
extracted_comments.append(extracted_comment)
if 'replies' in reply and reply['replies']:
nested_replies = self.extract_comments(reply['replies'])
extracted_comments.extend(nested_replies)
return extracted_comments
def get_comments(self, bvorurl):
self.logger.info("Getting comments for bvorurl:{}".format(bvorurl))
bv = self.utils.bv_get(bvorurl)
avid = self.utils.bv2av(bv)
count = 1
while avid is None:
avid = self.utils.bv2av(bv)
count += 1
self.logger.info(f"avid is None, retrying...count is {count}")
time.sleep(3)
self.logger.info(f"avid===>{avid}")
comments = [] # 使用列表存储评论
# 获取评论总数和每页评论数量
# 计算总页数
page_num = 1
page_size = 20
while True:
url = f'https://api.bilibili.com/x/v2/reply?type=1&oid={avid}&sort=2&pn={page_num}&ps={page_size}'
response = requests.get(url, headers=headers, cookies=self.cookies)
data = response.json()
if data['code'] != 0:
break
# 提取回复信息
extracted_data = self.extract_comments(data['data']['replies'])
# 过滤重复的评论
new_comments = [comment for comment in extracted_data if comment not in comments]
comments.extend(new_comments) # 将新的评论添加到列表中
self.logger.info(f"提取到了{len(new_comments)}条评论,从第 {page_num} 页")
if len(new_comments) == 0:
self.logger.info("提取完毕所有评论,共提取到{}条评论!=====>avid{}".format(len(comments), avid))
break
# 判断是否有下一页
total_count = data['data']['page']['count']
total_pages = (total_count + page_size - 1) // page_size # 计算总页数
if page_num >= total_pages:
self.logger.info("提取完毕所有评论,共提取到{}条评论!=====>avid{}".format(len(comments), avid))
break
# 构建下一页的URL
page_num += 1
self.logger.info("开始提取第{}页评论".format(page_num))
time.sleep(random.uniform(0.5, 1.5))
self.logger.info(f"总共{len(comments)}条评论!")
# 写入JSON文件
os.makedirs("./data/comment/", exist_ok=True) # 创建多层目录
file_path = f'./data/comment/{avid}_{page_num}-{page_size}_{len(comments)}.json'
if len(comments) < 2000:
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(comments, f, indent=4, ensure_ascii=False)
return comments
# Path: biliscrapy/network/bilibili_utils.py
class bili_utils:
def __init__(self):
self.logger = logging.getLogger('log')
self.header = headers
self.script_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')
with open(file_path, 'r') as file:
self.cookies_data = json.load(file)
self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}
def bv_get(self, bvorurl):
# https://api.bilibili.com/x/web-interface/view?bvid=BV1uG41197Tf
# 将bv提取出来
bv_identifier = "BV" # BV号的标识符
if "http://" in bvorurl or "https://" in bvorurl: # 检查是否是一个URL
self.logger.info("你输入的是http链接,正在解析...")
bv_index = bvorurl.find(bv_identifier)
if bv_index != -1: # 如果找到了BV号
bv = bvorurl[bv_index:bv_index + len(bv_identifier) + 10] # 提取BV号
self.logger.info(f"BV号为......: {bv}")
return bv
else:
self.logger.info("你输入的链接地址有误!")
return
elif bv_identifier in bvorurl: # 如果输入的是BV号
self.logger.info(f"你输入的是BV号{bvorurl},正在解析...")
bv = bvorurl
return bv
else:
self.logger.info(f"请输入正确的链接地址或BV号!,{bvorurl}")
return "BV1111111111"
'''
av 就是 oid 评论里面的参数
'''
def bv2av(self, bv):
bv2av_url = 'https://api.bilibili.com/x/web-interface/view?bvid='
if bv.startswith("BV"):
url = bv2av_url + str(bv)
retry_count = 0
max_retries = 10
retry_delay = 1 # seconds
while retry_count < max_retries:
try:
response = requests.get(url,headers=headers,cookies=self.cookies)
response.raise_for_status() # 检查请求是否成功
data = response.json()
# self.logger.info(data)
if 'data' in data and 'aid' in data['data']:
avid = data['data']['aid']
self.logger.info(f"找到的avid{avid}")
return avid
else:
self.logger.info("未找到有效的aid值,正在重新尝试获取...")
retry_count += 1
time.sleep(retry_delay)
except (requests.RequestException, ValueError) as e:
self.logger.info(f"请求发生错误:{e}")
retry_count += 1
self.logger.info("服务器返回错误!请稍后再试!")
self.logger.info(f"正在重新尝试获取aid,尝试次数==>{retry_count}")
time.sleep(retry_delay)
return None
'''
cid 是弹幕用的参数
'''
def bv2cid(self, bv):
url = f"https://api.bilibili.com/x/player/pagelist?bvid={str(bv)}&jsonp=jsonp"
retry_count = 1
json_s = requests.get(url,headers=headers,cookies=self.cookies).json()
self.logger.info("bv====》"+bv)
if json_s['code'] == 0:
cid = json_s['data'][0]['cid']
self.logger.info("提取出来的cid是:" + str(cid))
return cid
else:
self.logger.error("服务器返回错误!请稍后再试!")
retry_count+=1
if retry_count > 10:
self.logger.error("尝试次数过多,请稍后再试!")
return None
else:
self.logger.error("正在重新尝试获取cid,尝试次数==>" + str(retry_count))
return self.bv2cid(bv)
def get_bilibili_cookies(self):
options = webdriver.ChromeOptions()
# options.add_argument('--headless')
# options.add_argument('--disable-gpu')
# 动态获取路径 不用每次都手动输入路径
# chromedriver.exe 的路径
# 获取当前脚本的绝对路径
current_path = os.path.dirname(os.path.abspath(__file__))
# 构建 chromedriver 的绝对路径
driver_path = os.path.join(current_path, 'chromedriver.exe')
# 创建 WebDriver 服务
service = Service(driver_path)
# service = Service('./chromedriver.exe')
options.add_argument('--no-sandbox')
options.binary_location='C:\\Program Files\\Google\\chrome-win64\\chrome.exe'
driver = webdriver.Chrome(options=options, service=service)
# 打开 Bilibili 网站
driver.get('https://www.bilibili.com/')
#
login_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,
'#i_cecream > div.bili-feed4 > div.bili-header.large-header > div.bili-header__bar > ul.right-entry > li:nth-child(1) > li > div.right-entry__outside.go-login-btn')))
login_btn.click()
# 等待登录完成成
time.sleep(10)
driver.get('https://www.bilibili.com/')
# 在这里,模拟登录流程(需要输入账号和密码)
# 扫码登录然后,等待完成,完成的条件是屏幕上出现了某个
search = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#nav-searchform > div.nav-search-btn')))
search.click()
time.sleep(3)
cookies = driver.get_cookies()
# 获取当前脚本的路径
current_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_path, 'bilibili_cookies.json'), 'w') as f:
# 写入当前文件
f.write(json.dumps(cookies))
# 写入成功
self.logger.info('写入成功{}'.format(cookies))
driver.quit()
return
def get_info_by_bv(self, bv):
url = f"https://api.bilibili.com/x/web-interface/view?bvid={str(bv)}"
def try_get(url):
try:
response = requests.get(url, headers=self.header, cookies=self.cookies)
js_str = response.json()
if js_str.get('code', 0) == 0:
return js_str['data']
else:
# 可能需要根据API的设计,记录不同的错误
self.logger.error(
f"Video API returned non-success code: {js_str.get('code', 'Unknown')} with message: {js_str.get('msg', 'Unknown')}")
except requests.exceptions.RequestException as e:
self.logger.error(f"An error occurred: {e}")
return None
result = None
retry_count = 10
for _ in range(retry_count):
result = try_get(url)
if result:
break
return result
# 检查url是否合法
def check_url(self, url):
if url.startswith("BV"):
return True
elif url.startswith("https://www.bilibili.com/"):
return True
else:
return False
# Path: biliscrapy/network/bilibili_video.py
class Video:
def __init__(self):
script_path = os.path.dirname(os.path.abspath(__file__))
self.dir_path = os.path.join(script_path, 'data', 'video')
os.makedirs(self.dir_path, exist_ok=True)
self.utils = bili_utils()
self.script_dir = os.path.dirname(os.path.abspath(__file__))
# 构建文件路径
file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')
if not file_path:
self.cookies = {}
with open(file_path, 'r') as file:
self.cookies_data = json.load(file)
self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}
self.headers = headers
self.logger = logging.getLogger('log')
def get_video_info(self, url: str) -> str:
"""
从给定的URL中提取视频信息。
:param url: 要获取信息的视频的URL。
:return: 返回包含视频信息的JSON字符串,如果URL无效,则返回字符串'invalid url'。
"""
try:
isValid = self.utils.check_url(url)
if not isValid:
return 'url is invalid'
resp = requests.get(url, headers=self.headers, cookies=self.cookies)
cont = re.compile(r".*?window.__playinfo__=(?P<info1>.*?);\(function\(\)", re.S)
a = cont.search(resp.text, re.S)
info = a.group('info1').replace("</script><script>window.__INITIAL_STATE__=", ',')
return f"[{info}]"
except requests.RequestException as e:
self.logger.error("Error occurred while getting video info: {}".format(str(e)))
return ''
def download_file(self, url, filename):
"""
下载文件的函数
参数:
url (str): 要下载的文件的URL
filename (str): 下载的文件保存的路径和文件名
"""
try:
response = requests.get(url, headers=self.headers, stream=True, cookies=self.cookies)
total_size = int(response.headers.get('Content-Length', 0))
block_size = 1024
progress_bar = tqdm(total=total_size, unit='B', unit_scale=True)
with open(os.path.join(self.dir_path, filename), 'wb') as file:
for data in response.iter_content(block_size):
file.write(data)
progress_bar.update(len(data))
progress_bar.close()
self.logger.info("Downloading file.{}".format(filename))
except requests.exceptions.RequestException as e:
self.logger.error("Error occurred while downloading the file: {}".format(str(e)))
def merge_video_audio(self, video_file, audio_file):
"""
合并视频和音频文件。
参数:
self: 类自身引用。
video_file: 视频文件路径。
audio_file: 音频文件路径。
返回值:
无
异常:
如果视频文件或音频文件不存在,则会打印错误消息并返回。
注意:
合并后的文件以视频文件的基础名称和 '.mp4' 扩展名的形式保存。
原始视频和音频文件在合并成功后会被删除。
"""
if not os.path.isfile(os.path.join(self.dir_path, video_file)):
print(f"Error: {video_file} 不是文件或不存在。")
return
if not os.path.isfile(os.path.join(self.dir_path, audio_file)):
print(f"Error: {audio_file} 不是文件或不存在。")
return
# 合并视频和音频文件
# 使用ffmpeg命令行工具将视频和音频文件合并为mp4格式文件
cmd = f"ffmpeg -i {os.path.join(self.dir_path, video_file)} -i {os.path.join(self.dir_path, audio_file)} -c:v copy -c:a aac -strict experimental {os.path.join(self.dir_path, video_file.replace('.flv', ''))}.mp4"
self.logger.info(cmd)
try:
os.system(cmd)
except Exception as e:
print(f"运行 ffmpeg 时发生错误: {e}")
return
# 检查合并后的文件是否成功创建
output_file = os.path.splitext(os.path.basename(video_file))[0] + '.mp4'
if not os.path.isfile(os.path.join(self.dir_path, output_file)):
print("文件合并失败。")
return
# 删除原始视频和音频文件
os.remove(os.path.join(self.dir_path, video_file))
os.remove(os.path.join(self.dir_path, audio_file))
self.logger.info(f"成功合并视频和音频,------->{output_file}")
# Path: biliscrapy/views.py
import time
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.utils.timezone import make_aware
from .models import BiliDanmu, BiliComment, BiliVideo, Card
from .network.bilibili_danmu import *
from .network.bilibili_comment import Comments
from .network.bilibili_utils import bili_utils
from .network.bilibili_video import Video
from django.utils import timezone
from django.http import JsonResponse, HttpResponse
logger.error("error~~~~~~~~~")
logger.error(e)
# 如果视频记录不存在,则创建新的视频记录
info = utils.get_info_by_bv(bvid)
logger.info("info---->{}".format(info))
if info is None:
return render(request, 'danmaku.html', context)
cid = utils.bv2cid(bvid)
logger.info(f'{cid}, cid')
video = BiliVideo(bvid=bvid,
avid=info['aid'],
oid=cid,
title=info['title'],
author=info['owner']['name'],
tag=info['tname'],
pubdate=make_aware(datetime.fromtimestamp(info['pubdate'])),
pic=info['pic'],
desc=info['desc'],
danmu_fetched=True,
danmaku_count=danmaku_count
) # 设置弹幕抓取状态
video.save()
logger.info("新视频信息已添加")
# 查询数据库并返回结果
# 查询数据库并返回结果
danmakus = BiliDanmu.objects.filter(cid=cid).values().order_by('ctime')
paginator = Paginator(danmakus, 15) # 每页显示10条记录
page_number = request.POST.get('page') if request.POST.get('page') else 1 # 获取页码参数
page_obj = paginator.get_page(page_number) # 获取对应页码的数据
print(paginator.count)
context = {
"url": url,
'result': 'error',
'bvid': bv,
'total': paginator.count,
'data': page_obj,
'new_request': not bvid_exists,
}
if len(danmakus) > 0:
context['result'] = 'success'
return render(request, 'danmaku.html', context)
return render(request, 'danmaku.html')
def comment(request):
if request.method == 'POST':
bv = request.POST.get('bv') # 获取用户输入的 BV 号或链接
url = bv
context = {
'result': 'error',
'data': [],
'message': '请输入正确的链接地址或BV号!',
'cid': ''
}
c = Comments()
bv_ = utils.bv_get(bv) if bv.startswith("https://www.bilibili.com/video/BV") or bv.startswith(
"BV") or bv.startswith("bv") else bv
logger.info(f'bv_====>{bv_}')
vv = BiliVideo.objects.filter(bvid=bv_).values()
# logger.info(vv[0]['avid'], 'sadjkaskjadssajasjdsjkaaashhakads')
av = utils.bv2av(bv_)
av_count = 1
while av is None:
logger.info(f"av is None, retrying...{av_count}")
av_count += 1
av = utils.bv2av(bv_)
avid = vv[0]['avid'] if vv else av
logger.info(f"avid=====>{avid}")
if avid is None:
context = {
'result': 'error',
'data': [],
'message': 'b站服务器返回错误,请重新尝试'
}
return render(request, 'comment.html', context)
comments_exist = BiliComment.objects.filter(avid=avid).exists()
if not comments_exist:
comments = c.get_comments(bv)
comment_obj = [BiliComment(
avid=avid,
uname=cmt['uname'],
current_level=cmt['current_level'],
like=cmt['like'],
sex=cmt['sex'],
ctime=make_aware(datetime.fromtimestamp(cmt['ctime'])),
message=cmt['message']
) for cmt in comments]
BiliComment.objects.bulk_create(comment_obj)
bili_comment_count = BiliComment.objects.filter(avid=avid).count()
try:
# 尝试更新视频的抓取弹幕的状态
video = BiliVideo.objects.get(avid=avid)
video.comment_fetched = True
video.comment_count = bili_comment_count
video.save()
except BiliVideo.DoesNotExist:
# 如果视频记录不存在,则创建新的视频记录
info = utils.get_info_by_bv(bv_)
if info is None:
return render(request, 'comment.html', context)
cid = utils.bv2cid(bv_)
# 如果cid 为空的话就一直重新尝试获取cid
cid_count = 1
while cid is None:
cid = utils.bv2cid(bv_)
logger.info(f'{cid}, cid,尝试了{cid_count}次')
cid_count += 1
time.sleep(3)
video = BiliVideo(avid=avid,
bvid=bv_,
oid=cid,
title=info['title'],
author=info['owner']['name'],
tag=info['tname'],
pubdate=make_aware(datetime.fromtimestamp(info['pubdate'])),
pic=info['pic'],
desc=info['desc'],
comment_fetched=True,
comment_count=bili_comment_count
) # 设置弹幕抓取状态
| video.save() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mjavadpur/Sadtalker_LongVideos
# Path: src/face3d/models/bfm.py
class ParametricFaceModel:
def __init__(self,
bfm_folder='./BFM',
recenter=True,
camera_distance=10.,
init_lit=np.array([
0.8, 0, 0, 0, 0, 0, 0, 0, 0
]),
focal=1015.,
center=112.,
is_train=True,
default_name='BFM_model_front.mat'):
if not os.path.isfile(os.path.join(bfm_folder, default_name)):
transferBFM09(bfm_folder)
model = loadmat(os.path.join(bfm_folder, default_name))
# mean face shape. [3*N,1]
self.mean_shape = model['meanshape'].astype(np.float32)
# identity basis. [3*N,80]
self.id_base = model['idBase'].astype(np.float32)
# expression basis. [3*N,64]
self.exp_base = model['exBase'].astype(np.float32)
# mean face texture. [3*N,1] (0-255)
self.mean_tex = model['meantex'].astype(np.float32)
# texture basis. [3*N,80]
self.tex_base = model['texBase'].astype(np.float32)
# face indices for each vertex that lies in. starts from 0. [N,8]
self.point_buf = model['point_buf'].astype(np.int64) - 1
# vertex indices for each face. starts from 0. [F,3]
self.face_buf = model['tri'].astype(np.int64) - 1
# vertex indices for 68 landmarks. starts from 0. [68,1]
self.keypoints = np.squeeze(model['keypoints']).astype(np.int64) - 1
if is_train:
# vertex indices for small face region to compute photometric error. starts from 0.
self.front_mask = np.squeeze(model['frontmask2_idx']).astype(np.int64) - 1
# vertex indices for each face from small face region. starts from 0. [f,3]
self.front_face_buf = model['tri_mask2'].astype(np.int64) - 1
# vertex indices for pre-defined skin region to compute reflectance loss
self.skin_mask = np.squeeze(model['skinmask'])
if recenter:
mean_shape = self.mean_shape.reshape([-1, 3])
mean_shape = mean_shape - np.mean(mean_shape, axis=0, keepdims=True)
self.mean_shape = mean_shape.reshape([-1, 1])
self.persc_proj = perspective_projection(focal, center)
self.device = 'cpu'
self.camera_distance = camera_distance
self.SH = SH()
self.init_lit = init_lit.reshape([1, 1, -1]).astype(np.float32)
def to(self, device):
self.device = device
for key, value in self.__dict__.items():
if type(value).__module__ == np.__name__:
setattr(self, key, torch.tensor(value).to(device))
def compute_shape(self, id_coeff, exp_coeff):
"""
Return:
face_shape -- torch.tensor, size (B, N, 3)
Parameters:
id_coeff -- torch.tensor, size (B, 80), identity coeffs
exp_coeff -- torch.tensor, size (B, 64), expression coeffs
"""
batch_size = id_coeff.shape[0]
id_part = torch.einsum('ij,aj->ai', self.id_base, id_coeff)
exp_part = torch.einsum('ij,aj->ai', self.exp_base, exp_coeff)
face_shape = id_part + exp_part + self.mean_shape.reshape([1, -1])
return face_shape.reshape([batch_size, -1, 3])
def compute_texture(self, tex_coeff, normalize=True):
"""
Return:
face_texture -- torch.tensor, size (B, N, 3), in RGB order, range (0, 1.)
Parameters:
tex_coeff -- torch.tensor, size (B, 80)
"""
batch_size = tex_coeff.shape[0]
face_texture = torch.einsum('ij,aj->ai', self.tex_base, tex_coeff) + self.mean_tex
if normalize:
face_texture = face_texture / 255.
return face_texture.reshape([batch_size, -1, 3])
def compute_norm(self, face_shape):
"""
Return:
vertex_norm -- torch.tensor, size (B, N, 3)
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
"""
v1 = face_shape[:, self.face_buf[:, 0]]
v2 = face_shape[:, self.face_buf[:, 1]]
v3 = face_shape[:, self.face_buf[:, 2]]
e1 = v1 - v2
e2 = v2 - v3
face_norm = torch.cross(e1, e2, dim=-1)
face_norm = F.normalize(face_norm, dim=-1, p=2)
face_norm = torch.cat([face_norm, torch.zeros(face_norm.shape[0], 1, 3).to(self.device)], dim=1)
vertex_norm = torch.sum(face_norm[:, self.point_buf], dim=2)
vertex_norm = F.normalize(vertex_norm, dim=-1, p=2)
return vertex_norm
def compute_color(self, face_texture, face_norm, gamma):
"""
Return:
face_color -- torch.tensor, size (B, N, 3), range (0, 1.)
Parameters:
face_texture -- torch.tensor, size (B, N, 3), from texture model, range (0, 1.)
face_norm -- torch.tensor, size (B, N, 3), rotated face normal
gamma -- torch.tensor, size (B, 27), SH coeffs
"""
batch_size = gamma.shape[0]
v_num = face_texture.shape[1]
a, c = self.SH.a, self.SH.c
gamma = gamma.reshape([batch_size, 3, 9])
gamma = gamma + self.init_lit
gamma = gamma.permute(0, 2, 1)
Y = torch.cat([
a[0] * c[0] * torch.ones_like(face_norm[..., :1]).to(self.device),
-a[1] * c[1] * face_norm[..., 1:2],
a[1] * c[1] * face_norm[..., 2:],
-a[1] * c[1] * face_norm[..., :1],
a[2] * c[2] * face_norm[..., :1] * face_norm[..., 1:2],
-a[2] * c[2] * face_norm[..., 1:2] * face_norm[..., 2:],
0.5 * a[2] * c[2] / np.sqrt(3.) * (3 * face_norm[..., 2:] ** 2 - 1),
-a[2] * c[2] * face_norm[..., :1] * face_norm[..., 2:],
0.5 * a[2] * c[2] * (face_norm[..., :1] ** 2 - face_norm[..., 1:2] ** 2)
], dim=-1)
r = Y @ gamma[..., :1]
g = Y @ gamma[..., 1:2]
b = Y @ gamma[..., 2:]
face_color = torch.cat([r, g, b], dim=-1) * face_texture
return face_color
def compute_rotation(self, angles):
"""
Return:
rot -- torch.tensor, size (B, 3, 3) pts @ trans_mat
Parameters:
angles -- torch.tensor, size (B, 3), radian
"""
batch_size = angles.shape[0]
ones = torch.ones([batch_size, 1]).to(self.device)
zeros = torch.zeros([batch_size, 1]).to(self.device)
x, y, z = angles[:, :1], angles[:, 1:2], angles[:, 2:],
rot_x = torch.cat([
ones, zeros, zeros,
zeros, torch.cos(x), -torch.sin(x),
zeros, torch.sin(x), torch.cos(x)
], dim=1).reshape([batch_size, 3, 3])
rot_y = torch.cat([
torch.cos(y), zeros, torch.sin(y),
zeros, ones, zeros,
-torch.sin(y), zeros, torch.cos(y)
], dim=1).reshape([batch_size, 3, 3])
rot_z = torch.cat([
torch.cos(z), -torch.sin(z), zeros,
torch.sin(z), torch.cos(z), zeros,
zeros, zeros, ones
], dim=1).reshape([batch_size, 3, 3])
rot = rot_z @ rot_y @ rot_x
return rot.permute(0, 2, 1)
def to_camera(self, face_shape):
face_shape[..., -1] = self.camera_distance - face_shape[..., -1]
return face_shape
def to_image(self, face_shape):
"""
Return:
face_proj -- torch.tensor, size (B, N, 2), y direction is opposite to v direction
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
"""
# to image_plane
face_proj = face_shape @ self.persc_proj
face_proj = face_proj[..., :2] / face_proj[..., 2:]
return face_proj
def transform(self, face_shape, rot, trans):
"""
Return:
face_shape -- torch.tensor, size (B, N, 3) pts @ rot + trans
Parameters:
face_shape -- torch.tensor, size (B, N, 3)
rot -- torch.tensor, size (B, 3, 3)
trans -- torch.tensor, size (B, 3)
"""
return face_shape @ rot + trans.unsqueeze(1)
def get_landmarks(self, face_proj):
"""
Return:
face_lms -- torch.tensor, size (B, 68, 2)
Parameters:
face_proj -- torch.tensor, size (B, N, 2)
"""
return face_proj[:, self.keypoints]
def split_coeff(self, coeffs):
"""
Return:
coeffs_dict -- a dict of torch.tensors
Parameters:
coeffs -- torch.tensor, size (B, 256)
"""
id_coeffs = coeffs[:, :80]
exp_coeffs = coeffs[:, 80: 144]
tex_coeffs = coeffs[:, 144: 224]
angles = coeffs[:, 224: 227]
gammas = coeffs[:, 227: 254]
translations = coeffs[:, 254:]
return {
'id': id_coeffs,
'exp': exp_coeffs,
'tex': tex_coeffs,
'angle': angles,
'gamma': gammas,
'trans': translations
}
def compute_for_render(self, coeffs):
"""
Return:
face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate
face_color -- torch.tensor, size (B, N, 3), in RGB order
landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction
Parameters:
coeffs -- torch.tensor, size (B, 257)
"""
coef_dict = self.split_coeff(coeffs)
face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])
rotation = self.compute_rotation(coef_dict['angle'])
face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])
face_vertex = self.to_camera(face_shape_transformed)
face_proj = self.to_image(face_vertex)
landmark = self.get_landmarks(face_proj)
face_texture = self.compute_texture(coef_dict['tex'])
face_norm = self.compute_norm(face_shape)
face_norm_roted = face_norm @ rotation
face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])
return face_vertex, face_texture, face_color, landmark
def compute_for_render_woRotation(self, coeffs):
"""
Return:
face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate
face_color -- torch.tensor, size (B, N, 3), in RGB order
landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction
Parameters:
coeffs -- torch.tensor, size (B, 257)
"""
coef_dict = self.split_coeff(coeffs)
face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])
#rotation = self.compute_rotation(coef_dict['angle'])
#face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])
face_vertex = self.to_camera(face_shape)
face_proj = self.to_image(face_vertex)
landmark = self.get_landmarks(face_proj)
face_texture = self.compute_texture(coef_dict['tex'])
face_norm = self.compute_norm(face_shape)
face_norm_roted = face_norm # @ rotation
face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])
return face_vertex, face_texture, face_color, landmark
# Path: src/face3d/models/facerecon_model.py
class FaceReconModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=False):
""" Configures options specific for CUT model
"""
# net structure and parameters
parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure')
parser.add_argument('--init_path', type=str, default='./checkpoints/init_model/resnet50-0676ba61.pth')
parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc')
parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/')
parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
# renderer parameters
parser.add_argument('--focal', type=float, default=1015.)
parser.add_argument('--center', type=float, default=112.)
parser.add_argument('--camera_d', type=float, default=10.)
parser.add_argument('--z_near', type=float, default=5.)
parser.add_argument('--z_far', type=float, default=15.)
if is_train:
# training parameters
parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure')
parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth')
parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss')
parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face')
# augmentation parameters
parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels')
parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor')
parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree')
# loss weights
parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss')
parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss')
parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss')
parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss')
parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss')
parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss')
parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss')
parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss')
parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss')
opt, _ = parser.parse_known_args()
parser.set_defaults(
focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15.
)
if is_train:
parser.set_defaults(
use_crop_face=True, use_predef_M=False
)
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
self.visual_names = ['output_vis']
self.model_names = ['net_recon']
self.parallel_names = self.model_names + ['renderer']
self.facemodel = ParametricFaceModel(
bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center,
is_train=self.isTrain, default_name=opt.bfm_model
)
fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi
self.renderer = MeshRenderer(
rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center)
)
if self.isTrain:
self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc']
self.net_recog = networks.define_net_recog(
net_recog=opt.net_recog, pretrained_path=opt.net_recog_path
)
# loss func name: (compute_%s_loss) % loss_name
self.compute_feat_loss = perceptual_loss
self.comupte_color_loss = photo_loss
self.compute_lm_loss = landmark_loss
self.compute_reg_loss = reg_loss
self.compute_reflc_loss = reflectance_loss
self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr)
self.optimizers = [self.optimizer]
self.parallel_names += ['net_recog']
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.input_img = input['imgs'].to(self.device)
self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None
self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None
self.trans_m = input['M'].to(self.device) if 'M' in input else None
self.image_paths = input['im_paths'] if 'im_paths' in input else None
def forward(self, output_coeff, device):
self.facemodel.to(device)
self.pred_vertex, self.pred_tex, self.pred_color, self.pred_lm = \
self.facemodel.compute_for_render(output_coeff)
self.pred_mask, _, self.pred_face = self.renderer(
self.pred_vertex, self.facemodel.face_buf, feat=self.pred_color)
self.pred_coeffs_dict = self.facemodel.split_coeff(output_coeff)
def compute_losses(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
assert self.net_recog.training == False
trans_m = self.trans_m
if not self.opt.use_predef_M:
trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2])
pred_feat = self.net_recog(self.pred_face, trans_m)
gt_feat = self.net_recog(self.input_img, self.trans_m)
self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat)
face_mask = self.pred_mask
if self.opt.use_crop_face:
face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf)
face_mask = face_mask.detach()
self.loss_color = self.opt.w_color * self.comupte_color_loss(
self.pred_face, self.input_img, self.atten_mask * face_mask)
loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt)
self.loss_reg = self.opt.w_reg * loss_reg
self.loss_gamma = self.opt.w_gamma * loss_gamma
self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm)
self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask)
self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \
+ self.loss_lm + self.loss_reflc
def optimize_parameters(self, isTrain=True):
self.forward()
self.compute_losses()
"""Update network weights; it will be called in every training iteration."""
if isTrain:
self.optimizer.zero_grad()
self.loss_all.backward()
self.optimizer.step()
def compute_visuals(self):
with torch.no_grad():
input_img_numpy = 255. * self.input_img.detach().cpu().permute(0, 2, 3, 1).numpy()
output_vis = self.pred_face * self.pred_mask + (1 - self.pred_mask) * self.input_img
output_vis_numpy_raw = 255. * output_vis.detach().cpu().permute(0, 2, 3, 1).numpy()
if self.gt_lm is not None:
gt_lm_numpy = self.gt_lm.cpu().numpy()
pred_lm_numpy = self.pred_lm.detach().cpu().numpy()
output_vis_numpy = util.draw_landmarks(output_vis_numpy_raw, gt_lm_numpy, 'b')
output_vis_numpy = util.draw_landmarks(output_vis_numpy, pred_lm_numpy, 'r')
output_vis_numpy = np.concatenate((input_img_numpy,
output_vis_numpy_raw, output_vis_numpy), axis=-2)
else:
output_vis_numpy = np.concatenate((input_img_numpy,
output_vis_numpy_raw), axis=-2)
self.output_vis = torch.tensor(
output_vis_numpy / 255., dtype=torch.float32
).permute(0, 3, 1, 2).to(self.device)
def save_mesh(self, name):
recon_shape = self.pred_vertex # get reconstructed shape
recon_shape[..., -1] = 10 - recon_shape[..., -1] # from camera space to world space
recon_shape = recon_shape.cpu().numpy()[0]
recon_color = self.pred_color
recon_color = recon_color.cpu().numpy()[0]
tri = self.facemodel.face_buf.cpu().numpy()
mesh = trimesh.Trimesh(vertices=recon_shape, faces=tri, vertex_colors=np.clip(255. * recon_color, 0, 255).astype(np.uint8))
mesh.export(name)
def save_coeff(self,name):
pred_coeffs = {key:self.pred_coeffs_dict[key].cpu().numpy() for key in self.pred_coeffs_dict}
pred_lm = self.pred_lm.cpu().numpy()
pred_lm = np.stack([pred_lm[:,:,0],self.input_img.shape[2]-1-pred_lm[:,:,1]],axis=2) # transfer to image coordinate
pred_coeffs['lm68'] = pred_lm
savemat(name,pred_coeffs)
# Path: src/face3d/visualize.py
import cv2
import numpy as np
import torch
import subprocess, platform
import scipy.io as scio
from src.face3d.models.bfm import ParametricFaceModel
from src.face3d.models.facerecon_model import FaceReconModel
from tqdm import tqdm
# check the sync of 3dmm feature and the audio
# draft
def gen_composed_video(args, device, first_frame_coeff, coeff_path, audio_path, save_path, exp_dim=64):
coeff_first = scio.loadmat(first_frame_coeff)['full_3dmm']
coeff_pred = scio.loadmat(coeff_path)['coeff_3dmm']
coeff_full = np.repeat(coeff_first, coeff_pred.shape[0], axis=0) # 257
coeff_full[:, 80:144] = coeff_pred[:, 0:64]
coeff_full[:, 224:227] = coeff_pred[:, 64:67] # 3 dim translation
coeff_full[:, 254:] = coeff_pred[:, 67:] # 3 dim translation
tmp_video_path = '/tmp/face3dtmp.mp4'
facemodel = FaceReconModel(args)
video = cv2.VideoWriter(tmp_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 25, (224, 224))
for k in tqdm(range(coeff_pred.shape[0]), 'face3d rendering:'):
cur_coeff_full = torch.tensor(coeff_full[k:k+1], device=device)
facemodel.forward(cur_coeff_full, device)
predicted_landmark = facemodel.pred_lm # TODO.
predicted_landmark = predicted_landmark.cpu().numpy().squeeze()
rendered_img = facemodel.pred_face
rendered_img = 255. * rendered_img.cpu().numpy().squeeze().transpose(1,2,0)
out_img = rendered_img[:, :, :3].astype(np.uint8)
video.write(np.uint8(out_img[:,:,::-1]))
video.release()
| command = 'ffmpeg -v quiet -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio_path, tmp_video_path, save_path) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Westlake-geeks/bilibili-livestream-slicer
# Path: Ui_test.py
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
# 主窗口和网格布局
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 550)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(150, 70, 510, 300))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
#输入框左侧的文字
self.roomLabel = QtWidgets.QLabel(self.gridLayoutWidget)
self.roomLabel.setObjectName("roomLabel")
self.gridLayout.addWidget(self.roomLabel, 0, 0, 1, 1)
# 输入框
self.roomEdit = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.roomEdit.setObjectName("roomEdit")
self.gridLayout.addWidget(self.roomEdit, 0, 1, 1, 1)
# 垂直布局 用于排列按钮
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
# add按钮
self.addButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.addButton.setObjectName("addButton")
self.verticalLayout.addWidget(self.addButton)
# start按钮
self.startButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.startButton.setObjectName("startButton")
self.verticalLayout.addWidget(self.startButton)
# stop按钮
self.stopButton = QtWidgets.QPushButton(self.gridLayoutWidget)
self.stopButton.setObjectName("stopButton")
self.verticalLayout.addWidget(self.stopButton)
# 按钮下方的占位控件
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
# 把垂直布局部署到网格
self.gridLayout.addLayout(self.verticalLayout, 1, 0, 1, 1)
# 直播间列表
self.logPoster = QtWidgets.QListWidget(self.gridLayoutWidget)
self.logPoster.setObjectName("logPoster")
self.gridLayout.addWidget(self.logPoster, 1, 1, 1, 1)
# 主窗口布局相关设置
MainWindow.setCentralWidget(self.centralwidget)
# 并没有用到的顶部菜单栏和底部状态栏
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# 设置文字
self.retranslateUi(MainWindow)
# 连接槽
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
# 设置文字
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "LiveStreamSlicer"))
self.addButton.setText(_translate("MainWindow", "add"))
self.startButton.setText(_translate("MainWindow", "start all"))
self.stopButton.setText(_translate("MainWindow", "stop all"))
self.roomLabel.setText(_translate("MainWindow", "roomnumber:"))
# Path: api.py
def is_live(uid):
live_api = "https://api.live.bilibili.com/room/v1/Room/room_init?id=%s" % str(
uid)
rtn = my_request(live_api)
data_dict = json.loads(rtn)
data_value = data_dict.get('data')
live_status_value = data_value.get('live_status')
if live_status_value:
return True
else:
return False
# Path: api.py
def get_name(uid):
live_api = "https://api.live.bilibili.com/room/v1/Room/room_init?id=%s" % str(
uid)
rtn = my_request(live_api)
data_dict = json.loads(rtn)
data_value = data_dict.get('data')
duid_value = data_value.get('uid')
home_url = "https://space.bilibili.com/%s/" % duid_value
headers = {
'cookie': "buvid_fp_plain=undefined; CURRENT_BLACKGAP=0; blackside_state=0; LIVE_BUVID=AUTO2616596088417426; rpdid=|(k|m|))Y~k~0J'uYY)lmlul~; hit-new-style-dyn=1; go-back-dyn=1; is-2022-channel=1; header_theme_version=CLOSE; CURRENT_PID=b03f3c10-ceb5-11ed-b59d-47f8dacf4eec; FEED_LIVE_VERSION=V8; buvid3=103FCEA2-4D34-4196-5E7B-7321C8A1082118620infoc; b_nut=1690476718; _uuid=B1038F2AB-E8CD-29A2-4728-F82FE285F59D84428infoc; buvid4=CFCD8B8D-0FCC-F601-2753-DA825E11CFE613020-022072800-fr%2BgMSZdqRJTFAAYsS9ACQ%3D%3D; i-wanna-go-back=-1; b_ut=5; hit-dyn-v2=1; i-wanna-go-feeds=2; DedeUserID=325718681; DedeUserID__ckMd5=319313351948fd48; CURRENT_QUALITY=116; SESSDATA=c555e98c%2C1711883936%2Caf616%2Aa2CjAD_KFN4n_1-0P_VrGmaHuTOhode3kKsjtR7Aq0iz1U5TFRzKUl69JUDZ-5W532pswSVkFKMUpyQkQ3NmlWYldjLWtnSG9hcG9lQ1RYa0VKaEh3TFlybGxjdlpJQkkwekYwYy0tckZhc1d3eWlrT1k2NHpvQmQtS1MtUGlxU2RxdEM2UFcyWWlnIIEC; bili_jct=f30d6a38050b9fd22f87748b88e5c40f; sid=8nj7ny5x; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTY2MDgwNDYsImlhdCI6MTY5NjM0ODc4NiwicGx0IjotMX0.P976bqS0e1zm2k4khjnX5aqxWCmSIE-zA6MlVXq32wo; bili_ticket_expires=1696607986; fingerprint=c2d58d86c60e35d56558bf9942a9deac; CURRENT_FNVAL=4048; home_feed_column=5; browser_resolution=1699-945; share_source_origin=WEIXIN; bsource=share_source_weixinchat; bp_video_offset_325718681=849021837940621320; buvid_fp=c2d58d86c60e35d56558bf9942a9deac; b_lsid=5469973A_18B009161BC; PVID=1",
# 'referer': "https://space.bilibili.com/353609978/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47"
}
response = requests.get(url=home_url, headers=headers)
user_name = re.findall(r'<title>(.*?)的个人空间', response.text)[0]
if user_name:
return (user_name)
else:
return ("未找到指定用户名称")
# Path: AutoSliceMachine.py
import sys
import matplotlib.pyplot as plt
import numpy as np
import main
import os
import time
import json
import sys
import moviepy
import whisper
import requests
import re
from PyQt5 import QtCore, QtGui, QtWidgets
from Ui_test import Ui_MainWindow
from bilibili_api import live, sync, Credential,video_uploader
from datetime import datetime, timedelta
from api import is_live
from matplotlib.backends.backend_pdf import PdfPages
from moviepy.editor import VideoFileClip
from selenium import webdriver
from api import get_name
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ChromeOptions
from PIL import Image
from PIL import Image, ImageDraw, ImageFont
try:
time.sleep(1)
submit_button = web.find_element(By.CLASS_NAME, "submit-add")
submit_button.click()
time.sleep(100000)
except:
pass
#建一个用于爬取直播视频的线程,核心代码是main.__main__(id),其中id是房间号是一个str
class ThreadC_C(QtCore.QThread):
# 多线程
def __init__(self, file_name,num,start_time,end_time):
super(ThreadC_C, self).__init__()
self.num = num
self.start_time = start_time
self.end_time = end_time
#self.now = datetime.now()
self.file_name = file_name
def run(self):
#print('1111111111111111111111-------------------------------------------------------------------------------------111111111111111111111111-------------------------------------111')
time.sleep(10)#wait for 10 seconds
output_file_path = clipP(self.file_name,self.num,self.start_time,self.end_time)
try:
sync(uploader_video(output_file_path,self.num))
except:
print('上传失败')
class ThreadV(QtCore.QThread):
# 多线程
def __init__(self, num):
super(ThreadV, self).__init__()
self.num = num
self.now = datetime.now()+ timedelta(seconds=2)
self.file_name = "videos/" + str(self.num) + self.now.strftime("_%Y_%m_%d_%H_%M_%S_%f_"+"_.flv")
self.clip = 0
def run(self):
#pass
main.__main__(self.num,self.file_name)
class Thread(QtCore.QThread):
# 多线程
def __init__(self, num):
super(Thread, self).__init__()
self.num = num
def run(self): #爬取弹幕
def parse_datetime(datetime_str):
return datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S")
credential = Credential(
sessdata="daaa877d%2C1718024867%2C467ef%2Ac1CjBp5PW0dsSQNj39aOl6oD7y3C6KnP_5VRmTVGJBAI822wuSR5E-aQqFHqOr9C8qKoISVldObldPTmgwM0lFV3pOWWlDeGlyLTI5UWtjRmQ2T1NScGFyQzNHRnczeGRtUnI2S0xPNXV2TVM1N0NHS3ZSc1A4RXBweVphcURsTExiano1SXl6aDVRIIEC",
bili_jct="71b585f44771a21249f38244524e8a65",
buvid3="E69A492D-3A10-E4EE-138B-ED2AE0BC42CE15929infoc",
dedeuserid="142351113"
)
room = live.LiveDanmaku(self.num, credential=credential)
danmu_count = {}
danmu_sentiment = {}
sentiment_list = []
#main.__main__(self.num)
danmu_list={}
@room.on('DANMU_MSG')
async def on_danmaku(event):
# 收到弹幕f
timestamp = event["data"]["info"][9]['ts']
dt_object = datetime.fromtimestamp(timestamp)
danmu = event["data"]["info"][1]
#print(danmu)
ren = event["data"]["info"][2][1]
time = str(dt_object)
global que
try:
datetime_obj = parse_datetime(time)
# 5s
time_key = datetime_obj - timedelta(seconds=datetime_obj.second % 5)
#40 s
#time_key = datetime_obj - timedelta(seconds=datetime_obj.second % 40)
#time_key = datetime_obj - timedelta(seconds=0)
#danmu_count[time_key] = danmu_count.get(time_key, 0) + 1
#time_key = datetime_obj - timedelta(seconds=1)
#danmu_count[time_key] = danmu_count.get(time_key, 0) + 1
#time_key = datetime_obj - timedelta(seconds=2)
#danmu_count[time_key] = danmu_count.get(time_key, 0) + 1
#time_key = datetime_obj - timedelta(seconds=3)
#danmu_count[time_key] = danmu_count.get(time_key, 0) + 1
#time_key = datetime_obj - timedelta(seconds=4)
que[self.num][time_key] = que[self.num].get(time_key, 0) + 1
except:
print("Someting Wrong!")
sync(room.connect())
class MainForm(QtWidgets.QMainWindow, Ui_MainWindow):
# 继承UI窗口类Ui_MainWindow
def __init__(self):
super(MainForm, self).__init__()
self.setupUi(self)
self.roomlist = [] # 直播间列表
self.processlist = []
self.roomstatus = {} # 开播状态
self.roomtot = 0
self.maximum = 40 # 多线程上限
self.addButton.clicked.connect(self.add) # 把UI上的按钮连接到控制函数
self.startButton.clicked.connect(self.start)
self.stopButton.clicked.connect(self.stop)
def add(self): # 添加新链接
self.roominput = self.roomEdit.text()
if not self.roominput.isdigit():
# self.logPoster.append('invalid room number!')
print('invalid room number!')
| pass
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Angryrou/udao
# Path: udao/utils/logging.py
def _get_logger(name: str = "udao", level: int = logging.DEBUG) -> logging.Logger:
# Path: udao/optimization/concepts/objective.py
class Objective(Constraint):
"""
Parameters
----------
name : str
Name of the objective.
minimize : bool
Direction of the objective: if True, minimize, else maximize.
type: VarTypes
Type of the objective, by default VarTypes.FLOAT
"""
def __init__(
self,
name: str,
minimize: bool,
function: Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]],
lower: Optional[float] = None,
upper: Optional[float] = None,
type: VarTypes = VarTypes.FLOAT,
):
super().__init__(function=function, lower=lower, upper=upper)
self.name = name
self.minimize = minimize
self.type = type
@property
def direction(self) -> int:
"""Get gradient direction from optimization type"""
if self.minimize:
return 1
else:
return -1
def __repr__(self) -> str:
return (
f"Objective(name={self.name}, "
f"direction={'min' if self.minimize else 'max'}, "
f"lower={self.lower}, upper={self.upper})"
)
# Path: udao/optimization/concepts/problem.py
class MOProblem(BaseProblem):
"""Multi-objective optimization problem."""
def __init__(
self,
objectives: Sequence[Objective],
variables: Dict[str, Variable],
constraints: Sequence[Constraint],
data_processor: Optional[DataProcessor] = None,
input_parameters: Optional[Dict[str, Any]] = None,
) -> None:
self.objectives = objectives
super().__init__(
variables,
constraints,
data_processor=data_processor,
input_parameters=input_parameters,
)
def __repr__(self) -> str:
return (
f"MOProblem(objectives={self.objectives}, "
f"variables={self.variables}, "
f"constraints={self.constraints}, "
f"input_parameters={self.input_parameters})"
)
# Path: udao/optimization/concepts/problem.py
class SOProblem(BaseProblem):
"""Single-objective optimization problem."""
def __init__(
self,
objective: Objective,
variables: Dict[str, Variable],
constraints: Sequence[Constraint],
data_processor: Optional[DataProcessor] = None,
input_parameters: Optional[Dict[str, Any]] = None,
) -> None:
self.objective = objective
super().__init__(
variables,
constraints,
data_processor=data_processor,
input_parameters=input_parameters,
)
def __repr__(self) -> str:
return (
f"SOProblem(objective={self.objective}, "
f"variables={self.variables}, "
f"constraints={self.constraints}, "
f"input_parameters={self.input_parameters})"
)
# Path: udao/optimization/soo/so_solver.py
class SOSolver(ABC):
@abstractmethod
def solve(
self,
problem: SOProblem,
seed: Optional[int] = None,
) -> Tuple[float, Dict[str, float]]:
"""Solve a single-objective optimization problem
Parameters
----------
problem : SOProblem
Single-objective optimization problem to solve
seed : Optional[int], optional
Random seed, by default None
Returns
-------
Tuple[float, Dict[str, float]]
A tuple of the objective value and the variables
that optimize the objective
"""
...
# Path: udao/optimization/utils/moo_utils.py
class Point:
class Rectangle:
def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:
def __repr__(self) -> str:
def __eq__(self, other: "Point") -> bool: # type: ignore
def __init__(self, utopia: Point, nadir: Point) -> None:
def __repr__(self) -> str:
def cal_volume(self, upper_bounds: np.ndarray, lower_bounds: np.ndarray) -> float:
def __lt__(self, other: "Rectangle") -> bool:
def __eq__(self, other: "Rectangle") -> bool: # type: ignore
def is_pareto_efficient(costs: np.ndarray, return_mask: bool = True) -> np.ndarray:
def summarize_ret(
po_obj_list: Sequence, po_var_list: Sequence
) -> Tuple[np.ndarray, np.ndarray]:
def even_weights(stepsize: float, n_objectives: int) -> np.ndarray:
def plot_po(po: np.ndarray, n_obj: int = 2, title: str = "pf_ap") -> None:
def get_default_device() -> th.device:
# Path: udao/optimization/utils/exceptions.py
class NoSolutionError(ValueError):
"Raised when no solution is found for an MOO problem"
...
# Path: udao/optimization/utils/moo_utils.py
class Point:
def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:
"""
A point in the objective space.
Variables are optional, and are not specified for imaginary points
(e.g., utopia and nadir)
Parameters
----------
objs : np.ndarray
Array of objective values of shape (n_objs,)
vars :np.ndarray, optional
Array of variable values of shape (n_vars,), by default None
"""
self.objs = objs
self.vars = vars
self.n_objs = objs.shape[0]
def __repr__(self) -> str:
return f"Point(objs={self.objs}, vars={self.vars})"
def __eq__(self, other: "Point") -> bool: # type: ignore
return bool(np.all(self.objs == other.objs) and np.all(self.vars == other.vars))
# Path: udao/optimization/utils/moo_utils.py
class Rectangle:
def __init__(self, utopia: Point, nadir: Point) -> None:
"""
Parameters
----------
utopia : Points
utopia point
nadir : Points
nadir point
"""
self.upper_bounds = nadir.objs
self.lower_bounds = utopia.objs
self.n_objs = nadir.objs.shape[0]
self.volume = self.cal_volume(nadir.objs, utopia.objs)
self.neg_vol = -self.volume
self.utopia = utopia
self.nadir = nadir
def __repr__(self) -> str:
return f"Rectangle(utopia={self.utopia}, nadir={self.nadir})"
def cal_volume(self, upper_bounds: np.ndarray, lower_bounds: np.ndarray) -> float:
"""
Calculate the volume of the hyper_rectangle
Parameters
----------
upper_bounds : np.ndarray(
Array of upper bounds of the hyper_rectangle, of shape (n_objs,)
lower_bounds : np.ndarrays
Array of lower bounds of the hyper_rectangle of shape (n_objs,)
Returns
-------
float
volume of the hyper_rectangle
"""
volume = np.abs(np.prod(upper_bounds - lower_bounds))
return volume
# Override the `__lt__()` function to make `Rectangles`
# class work with min-heap (referred from VLDB2022)
def __lt__(self, other: "Rectangle") -> bool:
return self.neg_vol < other.neg_vol
def __eq__(self, other: "Rectangle") -> bool: # type: ignore
return bool(
np.all(self.upper_bounds == other.upper_bounds)
and np.all(self.lower_bounds == other.lower_bounds)
)
# Path: udao/optimization/moo/progressive_frontier/base_progressive_frontier.py
class BaseProgressiveFrontier(MOSolver, ABC):
"""
Base class for Progressive Frontier.
Includes the common methods for Progressive Frontier.
"""
@dataclass
class Params:
"""Parameters for Progressive Frontier"""
pass
def __init__(
self,
solver: SOSolver,
params: Params,
) -> None:
super().__init__()
self.solver = solver
self.opt_obj_ind = 0
def get_anchor_point(
self,
problem: MOProblem,
obj_ind: int,
seed: Optional[int] = None,
) -> Point:
"""
Find the anchor point for the given objective,
by unbounded single objective optimization
Parameters
----------
problem : MOProblem
MOO problem in which the objective is to be optimized
obj_ind : int
index of the objective to be optimized
Returns
-------
Point
anchor point for the given objective
"""
try:
_, soo_vars = self.solver.solve(
problem.derive_SO_problem(
objective=problem.objectives[obj_ind],
),
seed=seed,
)
except NoSolutionError:
raise NoSolutionError("Cannot find anchor points.")
else:
objs = self._compute_objectives(problem, soo_vars)
# If the current objective type is Integer,
# further find the optimal value for other objectives with float type
if problem.objectives[obj_ind].type == VarTypes.INT:
utopia_init = np.array(
[0 if i != obj_ind else objs[obj_ind] for i in problem.objectives]
)
utopia_tmp, nadir_tmp = Point(objs=utopia_init), Point(objs=objs)
# select the first objective with float type
float_obj_ind = [
i
for i, objective in enumerate(problem.objectives)
if objective.type == VarTypes.FLOAT
][0]
obj_bounds_dict_so = self._form_obj_bounds_dict(
problem, utopia_tmp, nadir_tmp
)
so_problem = self._so_problem_from_bounds_dict(
problem, obj_bounds_dict_so, problem.objectives[float_obj_ind]
)
try:
_, soo_vars_update = self.solver.solve(so_problem, seed=seed)
except NoSolutionError:
raise NoSolutionError("Cannot find anchor points.")
else:
logger.debug(f"soo_vars_update is: {soo_vars_update}")
objs = self._compute_objectives(problem, soo_vars_update)
return Point(objs, soo_vars_update)
else:
return Point(objs, soo_vars)
def _form_obj_bounds_dict(
self, problem: MOProblem, utopia: Point, nadir: Point
) -> dict[str, list[float]]:
"""
form the dict used in the constrained optimization
e.g. the format:
obj_bounds_dict = {
"latency": [0, 10000000],
"cores": [0, 58]
}
Parameters
----------
utopia: Point
the utopia point
nadir: Point
the nadir point
opt_obj_ind: int
the index of objective to be optimized
Returns
-------
dict with upper and lower bound for each objective
"""
bounds = {}
for i, objective in enumerate(problem.objectives):
if objective.direction < 0:
bounds[objective.name] = [
nadir.objs[i] * objective.direction,
utopia.objs[i] * objective.direction,
]
else:
bounds[objective.name] = [
utopia.objs[i] * objective.direction,
nadir.objs[i] * objective.direction,
]
return bounds
def _so_problem_from_bounds_dict(
self,
problem: MOProblem,
obj_bounds_dict: dict[str, list],
primary_obj: Objective,
) -> SOProblem:
"""
Parameters
----------
obj_bounds_dict : dict[str, list]
A lower and upper bound for each objective
primary_obj : Objective
The objective to be optimized
Returns
-------
Tuple[Objective, Sequence[Constraint]]
The objective and constraints for the single-objective optimization
"""
soo_objective = Objective(
name=primary_obj.name,
minimize=primary_obj.minimize,
function=primary_obj.function,
lower=obj_bounds_dict[primary_obj.name][0],
upper=obj_bounds_dict[primary_obj.name][1],
)
so_problem = problem.derive_SO_problem(soo_objective)
soo_constraints = list(so_problem.constraints)
for obj in problem.objectives:
obj_name = obj.name
if obj_name != primary_obj.name:
soo_constraints.append(
Objective(
name=obj.name,
minimize=obj.minimize,
lower=obj_bounds_dict[obj_name][0],
upper=obj_bounds_dict[obj_name][1],
function=obj.function,
)
)
so_problem.constraints = soo_constraints
return so_problem
@staticmethod
def get_utopia_and_nadir(points: list[Point]) -> Tuple[Point, Point]:
"""
get the utopia and nadir points from a list of points
Parameters
----------
points: list[Point],
each element is a Point (defined class).
Returns
-------
Tuple[Point, Point]
utopia and nadir point
"""
if len(points) == 0:
raise ValueError("The input list of points is empty.")
n_objs = points[0].n_objs
if any([point.n_objs != n_objs for point in points]):
raise Exception("The number of objectives is not consistent among points.")
best_objs = [min([point.objs[i] for point in points]) for i in range(n_objs)]
worst_objs = [max([point.objs[i] for point in points]) for i in range(n_objs)]
logger.debug(f"best_objs {best_objs}")
utopia = Point(np.array(best_objs))
nadir = Point(np.array(worst_objs))
return utopia, nadir
def _compute_objectives(
self,
problem: MOProblem,
variable_values: dict[str, Any],
) -> np.ndarray:
"""Compute an array of objective for a given point.
(variable_values is a dict of variable name and single value)
Parameters
----------
variable_values : dict[str, Any]
Name: value of variables
input_parameters : Optional[dict[str, Any]]
Name: value of other fixed input parameters
Returns
-------
np.ndarray
_description_
"""
obj_list = []
for obj in problem.objectives:
obj_value = problem.apply_function(
obj, variable_values, device=th.device("cpu")
)
obj_value = (obj_value * obj.direction).squeeze()
obj_list.append(obj_value.detach())
return np.array(obj_list)
# Path: udao/optimization/moo/progressive_frontier/parallel_progressive_frontier.py
import itertools
import numpy as np
import torch as th
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from torch.multiprocessing import Pool
from ....utils.logging import logger
from ...concepts import MOProblem, Objective, SOProblem
from ...soo.so_solver import SOSolver
from ...utils import moo_utils as moo_ut
from ...utils.exceptions import NoSolutionError
from ...utils.moo_utils import Point, Rectangle
from .base_progressive_frontier import BaseProgressiveFrontier
class ParallelProgressiveFrontier(BaseProgressiveFrontier):
@dataclass
class Params(BaseProgressiveFrontier.Params):
| processes: int = 1 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: XLearning-SCU/2023-TPAMI-SMILE
# Path: DistComput.py
def get_dist_release(loader, dist_path):
if not os.path.exists(dist_path):
# loader = test_loader
num_data = [10]
with torch.no_grad():
dist_list = [[] for i in range(len(num_data))]
for j, data_t in enumerate(loader, 0):
# get all inputs
fea0, fea1, class_labels0, class_labels1, mask, is_pair, idx = data_t
inputs_t = fea0.cuda()
# inputs_t = torch.cat([fea0,fea1]).cuda()
# labels_t = torch.cat([class_labels0,class_labels1]).cuda()
# inputs_t, _, labels_t, _ = data_t
# inputs_t, labels_t = inputs_t.cuda(), labels_t.cuda()
for i in range(len(inputs_t)):
if i % 1000 == 0:
print(i)
aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])
# dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))
# dist_m = dist[:, 0]
# print(aa.shape)
dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))
dist_m[i] = 1000
sorted_dist = np.sort(dist_m.cpu().numpy())
for jj in range(len(num_data)):
dist_list[jj].append(sorted_dist[num_data[jj]])
inputs_t = fea1.cuda()
for i in range(len(inputs_t)):
if i % 1000 == 0:
print(i)
aa = torch.mul(inputs_t - inputs_t[i], inputs_t - inputs_t[i])
# dist = torch.sqrt(torch.sum(aa, dim=(2, 3)))
# dist_m = dist[:, 0]
# print(aa.shape)
dist_m = torch.sqrt(torch.sum(aa, dim=tuple(torch.arange(1, len(aa.shape)))))
dist_m[i] = 1000
sorted_dist = np.sort(dist_m.cpu().numpy())
for jj in range(len(num_data)):
dist_list[jj].append(sorted_dist[num_data[jj]])
for ii in range(len(num_data)):
DirectoryOperator(dist_path).make_fold()
np.savetxt(dist_path, np.array(dist_list[ii]))
dist = torch.from_numpy(
np.loadtxt(
dist_path
).astype(np.float32)
)
return dist
# Path: _Utils/Calculator.py
def get_nearest_k(h0, h1, k=1, sp_size=1000):
hh0 = h0.half()
hh1 = h1.half()
split = int(np.ceil(len(hh0) / sp_size))
near = []
for i in range(split):
dist = torch.cdist(hh0[i * sp_size:(i + 1) * sp_size], hh1)
nearest = torch.argsort(dist, dim=1)[:, :k]
near.append(nearest)
nearest = torch.cat(near)
return nearest
# Path: _Utils/Logs.py
def update_log(dic, path='../log/res.csv'):
index = 'Epoch'
val = []
name = []
for na, v in dic.items():
val.append(v)
name.append(na)
dt = pd.DataFrame([val], columns=name)
dt = dt.set_index(index)
if os.path.exists(path):
dt_old = pd.read_csv(path, index_col=index)
dt = merge_csv(dt_old, dt)
DirectoryOperator(path).make_fold()
dt.to_csv(path)
# Path: _Utils/Scatter.py
def visualize2(feature_vec, type_vec, group_vec, pred_vec, prefix, ):
fv = feature_vec.reshape((len(feature_vec), -1))
for perplexity in []:# 50
vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(
np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)
)
for s in [5]:
prefix2 = prefix + 'P{}S{}'.format(perplexity, s)
visualize_scatter(vis_fea_multi,
fig_path='{}Multi.svg'.format(prefix2),
label_color=type_vec[group_vec == 0],
# label_shape=type_vec,
s=s
)
for perplexity in [50]:
vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)
for s in [5]: # 5
prefix2 = prefix + 'P{}S{}'.format(perplexity, s)
visualize_scatter(vis_fea,
fig_path='{}Type.svg'.format(prefix2),
label_color=type_vec,
# label_shape=type_vec,
s=s
)
# visualize_scatter(vis_fea,
# fig_path='{}Cluster.svg'.format(prefix),
# label_color=pred_vec,
# label_shape=type_vec,
#
# )
visualize_scatter(vis_fea,
fig_path='{}Group.svg'.format(prefix2),
label_color=group_vec,
# label_shape=type_vec,
s=s
)
# Path: _Utils/Visualize.py
def visualize(feature_vec, type_vec, group_vec, pred_vec, prefix='../Visualization/E{:03d}'.format(0)):
vis_fea = tsne(feature_vec)
visualize_scatter(vis_fea,
fig_path='{}Type.jpg'.format(prefix),
label_color=type_vec,
label_shape=type_vec,
)
visualize_scatter(vis_fea,
fig_path='{}Cluster.jpg'.format(prefix),
label_color=pred_vec,
label_shape=type_vec,
)
visualize_scatter(vis_fea,
fig_path='{}Group.jpg'.format(prefix),
label_color=group_vec,
label_shape=type_vec,
)
# Path: _Utils/Visualize.py
def visual_matrix_console(x):
if len(x.shape) <= 2:
x = x.reshape((*x.shape, 1))
base_wid = int(np.log10(np.max(x) + 0.5)) + 1
head_wid = x.shape[2] * (1 + base_wid)
head_sep = int(head_wid // 2) + 1
print('t\\c ', end='')
for i in range(x.shape[1]):
print(('{:' + '{}'.format(head_sep) + 'd}').format(i), end=' ' * (head_wid - head_sep))
print()
for i, line in enumerate(x):
print('{:2d}: '.format(i), end='')
for cl in line:
sg = True
for g in cl:
if sg:
sg = False
else:
print(' ', end='')
if g != 0:
# print('base_wid == {}'.format(base_wid))
# print('g == {}'.format(g))
print(('{:' + str(base_wid) + 'd}').format(g), end='')
else:
print(' ' * base_wid, end='')
print('|', end='')
print()
# Path: _Utils/Visualize.py
def visualize_image(x, verbose=0, show=False, fig_path=None):
"""
:param show:
:param fig_path:
:param x:
(row, line, pic_h, pic_w) or (row, line, pic_h, pic_w, pic_c), pic_c = 1,3,4
:return:
"""
x = np.asarray(x)
if verbose:
print('img.min() == {}'.format(np.min(x)))
print('img.max() == {}'.format(np.max(x)))
x -= np.min(x)
x /= np.max(x)
row, line = x.shape[:2]
w, h = x.shape[1] * x.shape[3] / 90, x.shape[0] * x.shape[2] / 90
plt.figure(figsize=(w, h)) # w, h
count = 0
for rx in x:
for image in rx:
count += 1
plt.subplot(row, line, count)
plt.imshow(image, cmap='gray', )
plt.xticks([])
plt.yticks([])
plt.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0.1 / h, wspace=0.1 / w)
if not show and fig_path is None:
fig_path = '../_fig/fig.jpg'
if fig_path is not None:
DirectoryOperator.FoldOperator(directory=fig_path).make_fold()
plt.savefig(fig_path, transparent=True)
if show:
plt.show()
plt.close()
# Path: _Utils/Visualize.py
def plot_heat_map(z, xticks=None, yticks=None, xlabel=None, ylabel=None, title=None, show=False, fig_path=None):
"""
:param z: z[i,j] shown in i-th row, j-th line
:param xlabel:
:param ylabel:
:param show:
:param fig_path:
:return:
"""
left = 0.15
right = 1
top = 0.95
bottom = 0.15
w, h = z.shape
plt.figure(figsize=(w / (right - left), h / (top - bottom)))
# plt.figure(figsize=(w / (right - left), h / (top - bottom)))
# plt.subplots_adjust(left=left, right=right, top=top, bottom=bottom)
if xticks is not None:
plt.xticks(np.arange(len(xticks)), np.round(xticks, 2), rotation=45)
if yticks is not None:
plt.yticks(np.arange(len(yticks)), np.round(yticks, 2))
for i in range(z.shape[0]):
for j in range(z.shape[1]):
# plt.text(j, i, accs[i, j].round(2), ha="center", va="center", color="b", fontsize=12,
# fontname='Times New Roman')
plt.text(j, i, z[i, j], ha="center", va="center")
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if title is not None:
plt.title(title)
plt.imshow(z, interpolation='nearest', aspect='auto')
plt.colorbar()
if fig_path is not None:
DirectoryOperator.FoldOperator(directory=fig_path).make_fold()
plt.savefig(fig_path, transparent=True)
if show:
plt.show()
plt.close()
# Path: _Utils/TimeOperator.py
class TimeOperator:
def __init__(self):
self.time_buffer = None
self.time_record = 0
self.time_sum = 0
self.time_count = 0
def time(self, output=False, promt=''):
if self.time_buffer is None:
self.time_buffer = time()
else:
self.time_record = time() - self.time_buffer
self.time_buffer = None
self.time_sum += self.time_record
self.time_count += 1
if output:
print('{}Time == {:7.05f}'.format(promt, self.time_record))
def get_time_sum(self):
return self.time_sum
def show_time_sum(self):
print('{:.02f}'.format(self.get_time_sum()))
def get_fps(self):
return self.time_count / self.time_sum
def __get_speed(self, to_metric=None):
speed = self.get_fps()
metric = 'Second'
if speed < 1 and to_metric != metric:
speed *= 60
metric = 'Minute'
if speed < 1 and to_metric != metric:
speed *= 60
metric = 'Hour'
if speed < 1 and to_metric != metric:
speed *= 24
metric = 'Day'
return speed, metric
def show_process(self, process_now, process_total, name='Epoch'):
if self.time_sum <= 0:
return
speed = self.time_sum / self.time_count
print('{:<5s} [{:3.0f}/{:3.0f}] [{:8.02f}/{:8.02f}]: {:5.02f}({:5.02f}) '.format(
name, process_now, process_total,
process_now * speed, process_total * speed,
self.time_record, speed
))
def show_speed(self):
speed, metric = self.__get_speed()
print('{:4.01f} Frames/{}'.format(speed, metric))
# Path: _Utils/DirectoryOperator.py
class DirectoryOperator:
def __init__(self, directory: str):
self.directory = directory
def make_fold(self):
if not TestMode:
# print('mk dir {}'.format(os.path.dirname(self.directory)))
os.makedirs(os.path.dirname(self.directory), exist_ok=True)
def modification_time(self):
if os.path.exists(self.directory):
return os.path.getmtime(self.directory)
else:
warnings.warn('Time_now is returned since the modification time for non-exist file is not available. File: {}'.format(self.directory))
return time.time()
# Path: DataSetMaster/dataset.py
def get_clusters(args):
item_path = os.path.join(path_operator.get_checkpoint_path(level=1), 'Items0321')
file_mnist_test = os.path.join(item_path, 'mnist_test_clusters89.67.txt')
file_mnist_train = os.path.join(item_path, 'MnistTrain94.31B256.txt')
file_amazon = os.path.join(item_path, 'amazon72.81B032ReValue.txt')
file_webcam = os.path.join(item_path, 'webcamOurLoaderRevalveBatchWiseB032_84.03.txt')
file_usps = os.path.join(item_path, 'usps_train_clusters85.10.txt')
root_har = os.path.join(item_path, 'HAR')
root_mtfl = os.path.join(item_path, 'MTFL')
if args.dataset == 'MNISTUSPS': # 87.75 93.31
if args.MnistTrain:
file_mnist = file_mnist_train
else:
file_mnist = file_mnist_test
file_list = [
file_mnist,
file_usps,
]
elif args.dataset == 'ReverseMNIST': # 89.67 94.31
if args.MnistTrain:
file_mnist = file_mnist_train
else:
file_mnist = file_mnist_test
file_list = [
file_mnist,
file_mnist,
]
elif args.dataset == 'Office': # 75.28
file_list = [
file_amazon,
file_webcam,
]
elif args.dataset == 'MTFL':
file_list = np.sort([os.path.join(root_mtfl, f) for f in os.listdir(root_mtfl) if f.endswith('txt')])
elif args.dataset == 'HAR': # 81.70
file_list = np.sort([os.path.join(root_har, f) for f in os.listdir(root_har) if f.endswith('txt')])
else:
raise NotImplementedError("")
def debug(x):
print(x.shape)
return x
clusters = torch.cat(
[debug(torch.from_numpy(np.loadtxt(c).astype(np.float32)).long()) for c in file_list],
dim=0,
).cuda()
return clusters
# Path: classification.py
def svm_classify(data, data_gt, label, test_prop, C):
"""
trains a linear SVM on the data
input C specifies the penalty factor of SVM
"""
seed = random.randint(0, 1000)
train_idx, test_idx = TT_split(data.shape[1], test_prop, seed)
train_data = np.concatenate([data[0][train_idx], data[1][train_idx]], axis=1)
test_data = np.concatenate([data_gt[0][test_idx], data_gt[1][test_idx]], axis=1)
test_label = label[test_idx]
train_label = label[train_idx]
# print('training SVM...')
clf = svm.LinearSVC(C=C, dual=False)
clf.fit(train_data, train_label.ravel())
p = clf.predict(test_data)
test_acc = accuracy_score(test_label, p)
return test_acc
# Path: evaluate.py
def UMAP(feature_vec, type_vec, group_vec, pred_vec, n_type, n_batch, args, epoch, dst_root='../Visualization'):
t = time.time()
# print("Performing UMAP Visualization...")
# print('feature_vec.shape == {}'.format(feature_vec.shape))
sc.set_figure_params(figsize=(4, 4), dpi=300)
# type_vec = pd.DataFrame(type_vec)
# for key in cell_type_dict.keys():
# type_vec.replace(key, cell_type_dict[key], inplace=True)
# group_vec = pd.DataFrame(group_vec)
# for key in batch_dict.keys():
# batch_vec.replace(key, batch_dict[key], inplace=True)
adata = sc.AnnData(feature_vec)
# print('adata.shape == {}'.format(adata.shape))
sc.pp.neighbors(adata)
adata.obs['cluster'] = pd.DataFrame(pred_vec).values.astype(np.str_)
adata.obs['type'] = pd.DataFrame(type_vec).values.astype(np.str_)
adata.obs['group'] = pd.DataFrame(group_vec).values.astype(np.str_)
sc.tl.umap(adata)
sc.pl.umap(adata,
color=['cluster'],
palette=sns.color_palette("husl", n_type),
save='E{:03d}UmapCluster{}.png'.format(epoch, str(args.dataset)),
show=False)
sc.pl.umap(adata,
color=['type'],
palette=sns.color_palette("husl", n_type),
save='E{:03d}UmapType{}.png'.format(epoch, str(args.dataset)),
show=False)
sc.pl.umap(adata,
color=['group'],
palette=sns.color_palette("hls", n_batch),
save='E{:03d}UmapGroup{}.png'.format(epoch, str(args.dataset)),
show=False)
roott = './figures/'
for root, dirs, files in os.walk(roott):
# print(root)
# print(dirs)
# print(files)
for f in files:
# print(os.path.join('../Visualization', f))
FileOperator(
os.path.join(root, f)
).rename(
os.path.join(dst_root, f.replace('umapE', 'E')),
auto_rename=False
)
if PrintTimer:
print('VisualizeScatter finished with in {:.03f} seconds (x.shape == {}).'.format(
time.time() - t,
feature_vec.shape,
))
# Path: evaluate.py
def evaluate2(feature_vec, pred_vec, type_vec, group_vec):
nmi, ari, acc, pred_adjusted = cluster_metrics(type_vec, pred_vec)
gs = np.unique(group_vec)
ts = np.unique(type_vec)
class_num = len(ts)
group_num = len(gs)
if group_vec is not None and group_num > 1:
balance, entro = my_balance(pred_vec, group_vec, cluster_num=np.unique(type_vec).shape[0],
group_num=np.unique(group_vec).shape[0])
O = torch.zeros((class_num, group_num)).cuda()
for b in gs:
ind_g = b == group_vec
pred_vec_g = pred_vec[ind_g]
for t in ts:
O[t, b] = np.sum(pred_vec_g == t)
O += 1e-6
O = (O / torch.sum(O))
NmiFair = normalized_mutual_information(O).cpu().numpy()
Fmeasure = FMeasure(beta=1)(acc, NmiFair)
else:
balance, entro = 0, 0
NmiFair = 0
Fmeasure = 0
entro_v = np.mean(entro)
global BestAcc, BestAri, BestNmi, BestBalance, BestEntropy, BestFairness, BestNmiFair, BestFmeasure
if BestAcc < acc:
BestAcc = acc
if BestAri < ari:
BestAri = ari
if BestNmi < nmi:
BestNmi = nmi
if BestBalance < balance:
BestBalance = balance
# if BestFairness < fairness:
# BestFairness = fairness
if BestNmiFair < NmiFair:
BestNmiFair = NmiFair
if BestFmeasure < Fmeasure:
BestFmeasure = Fmeasure
if BestEntropy < entro_v:
BestEntropy = entro_v
print(
'NMI={:5.02f}|{:5.02f}, ARI={:5.02f}|{:5.02f}, ACC={:5.02f}|{:5.02f}, Balance={:5.02f}|{:5.02f}, NmiFair={:5.02f}|{:5.02f}, Fmeasure={:5.02f}|{:5.02f}, Entropy={:5.02f}|{:5.02f}[{}],'.format(
nmi * 100, BestNmi * 100,
ari * 100, BestAri * 100,
acc * 100, BestAcc * 100,
balance * 100, BestBalance * 100,
# fairness * 100, BestFairness * 100,
NmiFair * 100, BestNmiFair * 100,
Fmeasure * 100, BestFmeasure * 100,
entro_v, BestEntropy, entro
)
)
met = {
'nmi' : nmi,
'ari' : ari,
'acc' : acc,
'balance' : balance,
'NmiFair' : NmiFair,
'Fmeasure': Fmeasure,
}
return pred_adjusted, met
# tqdm.write('NMI=%.4f, ACC=%.4f, ARI=%.4f' % (nmi, acc, ari), end='')
# if fair_metric:
# kl, ari_b = fair_metrics(feature_vec, group_vec, pred_vec, type_vec)
# print(', KL=%.4f, ARI_b=%.4f' % (kl, ari_b), end='')
# tqdm.write('')
# Path: figures/ScatterMaster.py
def visual_image_scatter():
np_path = os.path.join(
'D:/VirtualMachine/Codes/230904/SMAIL_RunSet_Visual/ --QuickConfig C100 --VisualFreq 5 --VisualRandom 1 --dataset NoisyMNIST30000 --seed 1999 --train_epoch 100/Checkpoints/Epoch099.npz')
# np_path_row = os.path.join(root, np_paths[np_names.index(np_tag)], 'NpPoints', np_epoch)
data = np.load(np_path, allow_pickle=False)
data_vec = data['data_vec']
feature_vec = data['feature_vec']
group_vec = data['group_vec']
type_vec = data['type_vec']
# visualize_image(x=[
# [it.reshape([28, 28]) for it in data_vec[:10]],
# [it.reshape([28, 28]) for it in data_vec[10:20]],
# [it.reshape([28, 28]) for it in data_vec[20:30]],
# ], show=True)
DrawMax = 3000
if len(feature_vec) > DrawMax:
it = np.arange(len(feature_vec))
np.random.shuffle(it)
ind = it[:DrawMax]
feature_vec = feature_vec[ind]
type_vec = type_vec[ind]
group_vec = group_vec[ind]
data_vec = data_vec[ind]
vis_fea = TSNE(perplexity=50).fit_transform(feature_vec)
_, ax = plt.subplots(figsize=(5 * 1 * 2, 5 * 1 * 2 / 1.6))
label_color = np.unique(type_vec)
color_num = len(np.unique(type_vec))
# if color_num <= 2:
# cmap = None
if color_num <= 10:
cmap = 'tab10'
elif color_num <= 20:
cmap = 'tab20'
else:
cmap = 'gist_ncar'
for digit in np.unique(type_vec):
ax.scatter(
*vis_fea[type_vec == digit].T,
# marker=f"${digit}$",
s=0.5,
# color=plt.cm.Dark2(digit),
alpha=0.7,
c=type_vec[type_vec == digit],
cmap=cmap,
vmax=max(4, np.max(label_color)),
vmin=min(0, np.min(label_color)),
zorder=2,
)
w = int(np.sqrt(len(data_vec[0])))
h = w
shown_images = np.array([[1.0, 1.0]]) # just something big
for i in range(data_vec.shape[0]):
# plot every digit on the embedding
# show an annotation box for a group of digits
dist = np.sum((vis_fea[i] - shown_images) ** 2, 1)
if np.min(dist) < 2e1:
# don't show points that are too close
continue
if np.min(dist) < 2e1:
# don't show points that are too close
continue
shown_images = np.concatenate([shown_images, [vis_fea[i]]], axis=0)
# img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, )
img = offsetbox.OffsetImage(data_vec[i].reshape([w, h]), cmap=plt.cm.gray_r, zoom=0.5)
# img.ti
imagebox = offsetbox.AnnotationBbox(
img, # [w, h, 3]
vis_fea[i],
pad=0,
frameon=False
)
imagebox.set(zorder=1)
ax.add_artist(imagebox)
ax.set_title('title')
ax.axis("off")
plt.tight_layout()
plt.savefig('D:/Pengxin/Temp/tmp.pdf')
plt.show()
print()
pass
# Path: Net.py
import math
import os
import time
import warnings
import numpy as np
import torch
import torchvision
import torch.nn.functional as F
import evaluate
import faiss
import scipy.io as sio
from torch import nn
from torch.autograd import Variable
from DistComput import get_dist_release
from _Utils.Calculator import get_nearest_k
from _Utils.Logs import update_log
from _Utils.Scatter import visualize2
from _Utils.Visualize import visualize, visual_matrix_console, visualize_image, plot_heat_map
from _Utils import TimeOperator, DirectoryOperator
from DataSetMaster.dataset import get_clusters
from classification import svm_classify
from evaluate import UMAP, evaluate2
from sklearn import metrics
from munkres import Munkres
from figures.ScatterMaster import visual_image_scatter
def show_distribution_ct(type_vec, group_vec, pred_vec, class_num, group_num):
v = np.zeros((class_num, class_num, group_num), dtype=int)
for t, c, g in zip(type_vec, pred_vec, group_vec):
v[t, c, g] += 1
visual_matrix_console(x=v)
| def kmeans(feature_vec, class_num):
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: botcs/wolfson-scheduler
# Path: solver.py
def unravel_indices(indices, shape):
coord = []
for dim in reversed(shape):
coord.append(indices % dim)
indices = indices // dim
coord = torch.stack(coord[::-1], dim=-1)
return coord
# Path: solver.py
def generalized_outer_addition(vectors, output=None):
"""
Corrected function to compute the outer addition of N K-dimensional vectors using broadcasting.
This function is equivalent to the following code:
```
result = torch.zeros((K1, K2, ..., KN))
for idx1 in range(K1):
for idx2 in range(K2):
...
result[idx1, idx2, ..., idxn] = vectors[idx1] + vectors[idx2] + ... + vectors[idxn]
```
However, it is much faster because it uses pre-computed sums and sums of squares.
:param vectors: List of N vectors of shape (K1, K2, ..., KN)
:param output: Optional output tensor
if provided, must be of shape (K1, K2, ..., KN)
:return: Tensor of shape (K1, K2, ..., KN)
"""
# Assert all vectors are on the same device
device = vectors[0].device
assert all(
v.device == device for v in vectors
), "All vectors must be on the same device"
# Number of vectors (N) and dimensions (K)
# N, K = vectors.shape
N = len(vectors)
Ks = [len(v) for v in vectors]
if output is None:
output = torch.zeros(Ks, dtype=vectors[0].dtype, device=vectors[0].device)
else:
assert output.shape == tuple(Ks), "Output tensor has incorrect shape"
output.zero_()
# Reshape each vector to have a unique non-singleton dimension
for i in range(N):
expanded_shape = [1] * N
expanded_shape[i] = Ks[i]
reshaped_vector = vectors[i].view(*expanded_shape)
output += reshaped_vector
return output
# Path: solver.py
def compute_variances(X, Y):
"""
Compute variances between all combinations of vectors in X and Y.
This function is equivalent to the following code:
```
variances = torch.zeros((X.size(0), Y.size(0)))
for i in range(X.size(0)):
for j in range(Y.size(0)):
concatenated = torch.cat((X[i], Y[j]))
variances[i, j] = torch.var(concatenated, unbiased=False)
```
However, it is much faster because it uses pre-computed sums and sums of squares.
:param X: Tensor of shape (N, K)
:param Y: Tensor of shape (M, L)
"""
# Compute sums and sums of squares for X
sum_X = torch.sum(X, dim=1)
sum_sq_X = torch.sum(X**2, dim=1)
# Compute sums and sums of squares for Y
sum_Y = torch.sum(Y, dim=1)
sum_sq_Y = torch.sum(Y**2, dim=1)
# Lengths of vectors in X and Y
len_X = X.shape[1]
len_Y = Y.shape[1]
# Broadcasting sums and sum of squares for all combinations
total_sum = sum_X.unsqueeze(1) + sum_Y.unsqueeze(0)
total_sum_sq = sum_sq_X.unsqueeze(1) + sum_sq_Y.unsqueeze(0)
total_len = len_X + len_Y
# Compute variances
mean = total_sum / total_len
variances = total_sum_sq / total_len - mean**2
return variances
# Path: solver.py
def get_max_numel(dtype, memory_capacity=None, device="cpu"):
"""
Compute the maximum number of elements that fit in specified memory.
:param dtype: Data type of the tensor (e.g., torch.float32)
:param memory_capacity: Memory capacity in bytes
:param device: 'cpu' or 'cuda'
:return: maximum number of elements that fit
"""
# Get memory capacity
if memory_capacity is None:
memory_capacity = get_free_memory(device)
# Calculate maximum number of elements that fit
element_size = torch.tensor(
[], dtype=dtype
).element_size() # Size in bytes of one element
max_numel = memory_capacity // element_size
return max_numel
# Path: solver.py
def check_matrix_fit_and_num_chunks(
dimensions, dtype, memory_capacity=None, device="cpu"
):
"""
Check if a tensor of given dimensions and data type fits in specified memory.
If not, return chunk sizes that maximize the capacity, slicing only along the first dimension.
:param dimensions: Tuple of dimensions for the tensor
:param dtype: Data type of the tensor (e.g., torch.float32)
:param memory_capacity: Memory capacity in bytes
:param device: 'cpu' or 'cuda'
:return: number of chunks along the first dimension
"""
# Get memory capacity
if memory_capacity is None:
memory_capacity = get_memory_capacity(device)
# Calculate total number of elements
total_elements = 1
for dim in dimensions:
total_elements *= dim
element_size = torch.tensor(
[], dtype=dtype
).element_size() # Size in bytes of one element
total_size = total_elements * element_size # Total memory required for the tensor
if total_size <= memory_capacity:
return 1
# If doesn't fit, calculate chunk size for the first dimension
other_dims_product = 1
for dim in dimensions[1:]:
other_dims_product *= dim
max_first_dim_size = memory_capacity // (other_dims_product * element_size)
if max_first_dim_size == 0:
raise ValueError("Tensor does not fit in memory.")
num_chunks = math.ceil(dimensions[0] / max_first_dim_size)
return num_chunks
# Path: solver.py
def convert_property_to_categorical(property):
"""
Convert the properties to a categorical variable.
:param property: List of properties for each rower.
Shape: (num_rowers)
dtype: Any
:return: Tensor of categorical properties.
Shape: (num_rowers)
dtype: torch.long
"""
unique_properties = set()
for p in property:
unique_properties.add(p)
unique_properties = sorted(list(unique_properties))
property = [unique_properties.index(p) for p in property]
property = torch.tensor(property)
return property
# Path: solver.py
def extract_best_assignment(assignments_per_week, total_score):
"""
Extract the best assignment for each outing.
:param assignments_per_week: Tensor of assignments per week.
shape: (num_outings, num_combinations, num_rowers)
:param total_score: Tensor of total score for each assignment.
shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings
:return: Tensor of best assignment per outing.
shape: (num_outings, 1, num_rowers)
"""
num_outings, num_combinations, num_rowers = assignments_per_week.shape
# Find the top assignments
# top_inds = torch.argsort(total_score.flatten(), descending=True)[0]
top_idx = torch.argmax(total_score.flatten())
top_idx = unravel_indices(top_idx, total_score.shape)
# top_inds tells us for each outing the index of the top assignment
top_assignment = torch.zeros(
num_outings,
1,
num_rowers,
dtype=torch.uint8,
device=assignments_per_week.device,
)
for outing_idx, comb_idx in enumerate(top_idx):
top_assignment[outing_idx] = assignments_per_week[outing_idx, comb_idx]
return top_assignment
# Path: solver.py
@torch.no_grad()
def get_no_overlap_inds(A, B):
"""
Perform matrix multiplication of A and B in chunks.
Return the indices of rows in A and columns in B that have no overlap.
Overlap is defined as a non-zero value in the product of A and B.
:param A: First matrix
shape: (num_combinations_A, num_rowers)
:param B: Second matrix
shape: (num_combinations_B, num_rowers)
:param chunk_sizes: Chunk sizes for the first dimension of A
:return: indices of rows in A and columns in B that have no overlap
"""
# check if the product of the two matrices fits in memory
# if not, chunk the matrices and check for overlap in chunks
num_chunks = check_matrix_fit_and_num_chunks(
(A.shape[0], A.shape[1], B.shape[0]), dtype=A.dtype, device=A.device
)
# num_chunks = 1
def multiply_and_find(A_chunk, B):
# counts the number of double-assignments for each rower between the two boats
assignment_count = torch.matmul(A_chunk, B.T)
no_overlap_inds = torch.nonzero(assignment_count == 0)
return no_overlap_inds
# if the product fits in memory, check for overlap in one go
if num_chunks == 1:
return multiply_and_find(A, B)
A_chunks = torch.chunk(A, num_chunks)
# otherwise, chunk the matrices and check for overlap in chunks
no_overlap_inds = []
offset_idx = 0
for A_chunk in tqdm.tqdm(A_chunks):
# no_overlap_inds.append(multiply_and_find(A_chunk, B).tolist())
chunk_inds = multiply_and_find(A_chunk, B)
# add the chunk size to offset the indices
chunk_inds[:, 0] += offset_idx
offset_idx += A_chunk.shape[0]
no_overlap_inds.append(chunk_inds)
return torch.cat(no_overlap_inds)
# Path: solver.py
@torch.no_grad()
def generate_binary_matrices(
num_rowers,
boat_sizes,
device="cpu",
max_num_combinations=NUM_MAX_COMBINATION_PER_BOAT,
):
"""
Generate binary matrices for each combination of rowers in boats.
:param num_rowers: Total number of rowers
:param boat_sizes: List of boat sizes
"""
per_boat_binary_matrices = []
for boat_size in boat_sizes:
# Precompute indices for combinations
row_indices = []
col_indices = []
num_combinations = math.comb(num_rowers, boat_size)
if num_combinations > max_num_combinations:
M = torch.zeros((max_num_combinations, num_rowers), dtype=torch.bool)
keep_indices = sample(
torch.arange(num_combinations), k=max_num_combinations
)
keep_indices = keep_indices.sort().values
i = 0
for row, combination in enumerate(
itertools.combinations(range(num_rowers), boat_size)
):
if keep_indices[i] != row:
continue
for col in combination:
row_indices.append(i)
col_indices.append(col)
i += 1
if i == max_num_combinations:
break
else:
M = torch.zeros((num_combinations, num_rowers), dtype=torch.bool)
for row, combination in enumerate(
itertools.combinations(range(num_rowers), boat_size)
):
for col in combination:
row_indices.append(row)
col_indices.append(col)
# Use advanced indexing to fill the matrix
M[row_indices, col_indices] = 1
per_boat_binary_matrices.append(M)
return per_boat_binary_matrices
# Path: solver.py
@torch.no_grad()
def eliminate_invalid_boats(
binary_matrix, rower_sides, num_max_combinations=NUM_MAX_COMBINATION_PER_BOAT
):
"""
Eliminate invalid boats from a binary matrix.
Currently we consider a boat invalid if there are more rowers on one side than the other.
We represent stroke as 1 and bow as -1 and 0 for no preference.
:param binary_matrix: Binary matrix of rower combinations
shape: (num_combinations, num_rowers)
:return: Binary matrix with invalid boats eliminated
"""
# gather the rower sides for each rower in each boat for each combination
num_assigned_rowers = binary_matrix[0].sum()
# assert each row has the same number of assigned rowers
assert (binary_matrix.sum(dim=1) == num_assigned_rowers).all()
assert len(rower_sides) == binary_matrix.shape[1]
idx = binary_matrix.nonzero()[:, 1].view(len(binary_matrix), num_assigned_rowers)
outings = rower_sides[idx]
# Compute the offset between the number of stroke and bow seats
offset = torch.sum(outings, dim=1).abs()
# Determine the number of rowers that are both stroke and bow seat
count_where_both = torch.sum(outings == 0, dim=1)
# Eliminate invalid boats
is_valid = count_where_both >= offset
binary_matrix = binary_matrix[is_valid]
if len(binary_matrix) > num_max_combinations:
binary_matrix = sample(binary_matrix, k=num_max_combinations)
return binary_matrix
# Path: solver.py
@torch.no_grad()
def generate_valid_assignments(
single_boat_bin_matrices, num_max_combinations=NUM_MAX_PAIRWISE_COMBINATION
):
"""
Generate valid combinations of rowers across multiple boats on a single outing
:param matrices: List of binary matrices, each representing combinations for a boat.
shape: List[
Tensor(num_combinations_1, num_rowers),
Tensor(num_combinations_2, num_rowers),
...
Tensor(num_combinations_n, num_rowers),
]
:return: Tensor of valid combinations across all boats.
"""
assert len(single_boat_bin_matrices) > 0, "Must have at least one boat"
assert all(
m.shape[1] == single_boat_bin_matrices[0].shape[1]
for m in single_boat_bin_matrices
), "All matrices must have the same number of rowers"
assignments = single_boat_bin_matrices[0]
for boat_ind, boat_B in enumerate(single_boat_bin_matrices[1:], start=2):
no_overlap_inds = get_no_overlap_inds(assignments, boat_B)
if len(no_overlap_inds) > num_max_combinations:
no_overlap_inds = sample(no_overlap_inds, k=num_max_combinations)
A_inds, B_inds = no_overlap_inds.T
# update boat_A to be the combination of boat_A and boat_B with no overlap
assignments = assignments[A_inds] + boat_B[B_inds] * boat_ind
return assignments
# Path: solver.py
def evaluate_skill_variance(assignments_per_week, skill_levels, dtype=torch.float16):
"""
This relies on the notion that the skill levels entered are not categorical
but integer values (or can be mapped to ordered categories, e.g. M1 > M2 > M3 ... )
:param assignments_per_week: Tensor of assignments per week.
shape: (num_outings, num_combinations, num_rowers)
:param skill_levels: Tensor of skill levels for each rower.
shape: (num_rowers,)
:return: Tensor of variance for each combination in each outing.
shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings
"""
# assert that the number of assigned rowers is the same for each outing
for outing_idx in range(len(assignments_per_week)):
num_assigned_rowers = assignments_per_week[outing_idx][0].sum()
assert (
assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers
).all()
num_outings, num_combinations, num_rowers = assignments_per_week.shape
max_num_boats = assignments_per_week.max().item()
outing_variance = torch.zeros(
num_outings, num_combinations, device=assignments_per_week.device, dtype=dtype
)
for boat_idx in range(max_num_boats):
boat_assignment = assignments_per_week == boat_idx + 1
# we use binary masking
X = skill_levels * boat_assignment
# but we need to make sure that we don't include the rowers that are not assigned
X_sum = X.sum(dim=2)
X_len = boat_assignment.sum(dim=2)
X_mean = X_sum / X_len
boat_variance = ((X - X_mean.unsqueeze_(2)) * boat_assignment) ** 2
boat_variance = boat_variance.sum(dim=2)
# we use the unbiased variance since the sample size is small
boat_variance /= torch.clamp(X_len - 1, min=1)
outing_variance += boat_variance
# now we need to compute the variance between the outings across the week
week_variance = generalized_outer_addition(outing_variance)
return week_variance
# Path: solver.py
def evaluate_num_preferred_outings(
assignments_per_week, num_preferred_outings, dtype=torch.long
):
# assert that the number of assigned rowers is the same for each outing
for outing_idx in range(len(assignments_per_week)):
num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()
assert (
assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers
).all()
assignments_per_week = assignments_per_week > 0
num_outings, num_combinations, num_rowers = assignments_per_week.shape
# just to pin memory and reuse the output tensor
num_assignment_per_rower = torch.zeros(
[num_combinations] * num_outings,
device=assignments_per_week.device,
dtype=dtype,
)
week_over_assignment = torch.zeros(
[num_combinations] * num_outings,
device=assignments_per_week.device,
dtype=dtype,
)
for rower_idx in range(num_rowers):
num_assignment_per_rower = generalized_outer_addition(
assignments_per_week[:, :, rower_idx], output=num_assignment_per_rower
)
num_preferred_outings_per_rower = num_preferred_outings[rower_idx]
assignment_diff = num_assignment_per_rower - num_preferred_outings_per_rower
over_assignment = assignment_diff.clamp_(min=0)
week_over_assignment += over_assignment
return week_over_assignment
# Path: solver.py
def evaluate_assignments_per_week(
assignments_per_week, properties, weights, return_stats=False
):
"""
Evaluate the assignments per week.
:param assignments_per_week: Tensor of num_outings different assignments for the week.
Shape: (num_outings, num_combinations, num_rowers)
dtype: torch.uint8
:param properties: dict of Tensors of properties.
Shape: {property_name: Tensor(num_rowers)}
dtype: torch.long
:param weights: dict of weights for each property.
Shape: {property_name: float}
:param return_stats: Whether to return the stats for each property.
:return: Total score for the week.
Shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings
:return: Stats for each weight category.
"""
# Compute variance of skill levels
week_variance = evaluate_skill_variance(
assignments_per_week, properties["skill_level"]
)
# Compute number of preferred outings
week_num_preferred_outings = evaluate_num_preferred_outings(
assignments_per_week, properties["num_preferred_outings"]
)
# Compute total score
total_score = (
weights["skill variance"] * week_variance
+ weights["over assignment"] * week_num_preferred_outings
)
if return_stats:
stats = {
"values": {
"skill variance": week_variance,
"over assignment": week_num_preferred_outings,
},
"weights": weights,
"total": total_score,
}
return total_score, stats
return total_score
# Path: solver.py
def permute_top_assignments(
valid_assignments,
assignments_per_week,
total_scores,
num_permutations=10,
randomize_permutations=True,
):
"""
Permute the top assignments for the week.
"""
num_outings, num_combinations, num_rowers = assignments_per_week.shape
assert len(valid_assignments) == num_outings, "Must have the same number of outings"
assert (
len(assignments_per_week) == num_outings
), "Must have the same number of outings"
if any(m.ndim != 2 for m in valid_assignments):
raise ValueError("All outing assignments have to be 2D for every outing")
if any(m.shape[1] != num_rowers for m in valid_assignments):
raise ValueError(
"All outing assignments have to have the same number of rowers"
)
if any((m.sum(dim=1) != m[0].sum()).any() for m in valid_assignments):
raise ValueError(
f"In each combination of every outing,\
the number of rowers assigned must be the same."
)
# assert all(
# m.ndim == 2
# for m in valid_assignments
# ), f"All matrices must have the same number of dim: {[m.shape for m in valid_assignments]}"
# assert all(
# m.shape[1] == num_rowers
# for m in valid_assignments
# ), "All matrices must have the same number of rowers"
# for outing_idx in range(len(valid_assignments)):
# assert (valid_assignments[outing_idx].sum() == valid_assignments[outing_idx][0].sum()).all(),\
# "Combinations must have the same number of rowers assigned in an outing"
# assert that the number of assigned rowers is the same for each outing
for outing_idx in range(len(assignments_per_week)):
num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()
assert (
assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers
).all()
best_assignment = extract_best_assignment(assignments_per_week, total_scores)
# in the permutations we fix all outings except the outing we are permuting
permuted_assignment = best_assignment.repeat(1, num_permutations + 1, 1)
for outing_idx in range(len(assignments_per_week)):
# just copy the best assignment num_permutations times
if randomize_permutations:
# we need to make sure that the best assignment is included
permuted_assignment[outing_idx, 1:] = sample(
valid_assignments[outing_idx], k=num_permutations
)
else:
permuted_assignment[outing_idx, 1:] = valid_assignments[outing_idx][
:num_permutations
]
return permuted_assignment
# Path: tests/test_solver.py
import torch
import unittest
import math
from unittest.mock import patch
from solver import (
unravel_indices,
generalized_outer_addition,
compute_variances,
get_max_numel,
check_matrix_fit_and_num_chunks,
convert_property_to_categorical,
extract_best_assignment,
get_no_overlap_inds,
generate_binary_matrices,
eliminate_invalid_boats,
generate_valid_assignments,
evaluate_skill_variance,
evaluate_num_preferred_outings,
evaluate_assignments_per_week,
permute_top_assignments,
)
class TestUnravelIndices(unittest.TestCase):
def test_simple_case(self):
indices = torch.tensor([0, 1, 2, 3, 4, 5])
shape = (2, 3)
expected_result = torch.tensor([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
result = unravel_indices(indices, shape)
self.assertTrue(torch.equal(result, expected_result))
| def test_single_dimension(self): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Azure-Samples/functions-python-web-crawler
# Path: .venv/Lib/site-packages/azure/functions/decorators/generic.py
class GenericInputBinding(InputBinding):
@staticmethod
def get_binding_name():
pass
def __init__(self,
name: str,
type: str,
data_type: Optional[DataType] = None,
**kwargs):
super().__init__(name=name, data_type=data_type, type=type)
# Path: .venv/Lib/site-packages/azure/functions/decorators/generic.py
class GenericTrigger(Trigger):
@staticmethod
def get_binding_name():
pass
def __init__(self,
name: str,
type: str,
data_type: Optional[DataType] = None,
**kwargs):
super().__init__(name=name, data_type=data_type, type=type)
# Path: .venv/Lib/site-packages/azure/functions/decorators/generic.py
class GenericOutputBinding(OutputBinding):
@staticmethod
def get_binding_name():
pass
def __init__(self,
name: str,
type: str,
data_type: Optional[DataType] = None,
**kwargs):
super().__init__(name=name, data_type=data_type, type=type)
# Path: .venv/Lib/site-packages/azure/functions/decorators/warmup.py
class WarmUpTrigger(Trigger):
@staticmethod
def get_binding_name() -> str:
return WARMUP_TRIGGER
def __init__(self,
name: str,
data_type: Optional[DataType] = None,
**kwargs) -> None:
super().__init__(name=name, data_type=data_type)
# Path: .venv/Lib/site-packages/azure/functions/_http_asgi.py
class AsgiMiddleware:
"""This middleware is to adapt an ASGI supported Python server
framework into Azure Functions. It can be used by either calling the
.handle() function or exposing the .main property in a HttpTrigger.
"""
_logger = logging.getLogger('azure.functions.AsgiMiddleware')
_usage_reported = False
def __init__(self, app):
"""Instantiate an ASGI middleware to convert Azure Functions HTTP
request into ASGI Python object. Example on handling ASGI app in a HTTP
trigger by overwriting the .main() method:
import azure.functions as func
from FastapiApp import app
main = func.AsgiMiddleware(app).main
"""
if not self._usage_reported:
self._logger.debug("Starting Azure Functions ASGI middleware.")
self._usage_reported = True
self._app = app
self.main = self._handle
def handle(self, req: HttpRequest, context: Optional[Context] = None):
"""Deprecated. Please use handle_async instead:
import azure.functions as func
from FastapiApp import app
async def main(req, context):
return await func.AsgiMiddleware(app).handle_async(req, context)
"""
warn("handle() is deprecated. Please await .handle_async() instead.",
DeprecationWarning, stacklevel=2)
self._logger.warning(
"handle() is deprecated. Please `await .handle_async()` instead.")
return self._handle(req, context)
def _handle(self, req, context):
asgi_request = AsgiRequest(req, context)
scope = asgi_request.to_asgi_http_scope()
asgi_response = asyncio.run(
AsgiResponse.from_app(self._app, scope, req.get_body())
)
return asgi_response.to_func_response()
async def handle_async(self,
req: HttpRequest,
context: Optional[Context] = None):
"""Method to convert an Azure Functions HTTP request into a ASGI
Python object. Example on handling ASGI app in a HTTP trigger by
calling .handle_async() in .main() method:
import azure.functions as func
from FastapiApp import app
async def main(req, context):
return await func.AsgiMiddleware(app).handle_async(req,
context)
"""
return await self._handle_async(req, context)
async def _handle_async(self, req, context):
asgi_request = AsgiRequest(req, context)
scope = asgi_request.to_asgi_http_scope()
asgi_response = await AsgiResponse.from_app(self._app,
scope,
req.get_body())
return asgi_response.to_func_response()
# Path: .venv/Lib/site-packages/azure/functions/_http_wsgi.py
class WsgiRequest:
class WsgiResponse:
class WsgiMiddleware:
def __init__(self,
func_req: HttpRequest,
func_ctx: Optional[Context] = None):
def to_environ(self, errors_buffer: StringIO) -> Dict[str, Any]:
def _get_port(self, parsed_url, lowercased_headers: Dict[str, str]) -> int:
def _get_http_headers(self,
func_headers: Dict[str, str]) -> Dict[str, str]:
def __init__(self):
def from_app(cls, app, environ) -> 'WsgiResponse':
def to_func_response(self) -> HttpResponse:
def _start_response(self, status: str, response_headers: List[Any]):
def __init__(self, app):
def handle(self, req: HttpRequest, context: Optional[Context] = None):
def _handle(self, req, context):
def _handle_errors(self, wsgi_response):
# Path: .venv/Lib/site-packages/azure/functions/decorators/function_app.py
import abc
import json
import logging
from abc import ABC
from datetime import time
from typing import Any, Callable, Dict, List, Optional, Union, \
Iterable
from azure.functions.decorators.blob import BlobTrigger, BlobInput, BlobOutput
from azure.functions.decorators.core import Binding, Trigger, DataType, \
AuthLevel, SCRIPT_FILE_NAME, Cardinality, AccessRights
from azure.functions.decorators.cosmosdb import CosmosDBTrigger, \
CosmosDBOutput, CosmosDBInput, CosmosDBTriggerV3, CosmosDBInputV3, \
CosmosDBOutputV3
from azure.functions.decorators.eventgrid import EventGridTrigger, \
EventGridOutput
from azure.functions.decorators.eventhub import EventHubTrigger, EventHubOutput
from azure.functions.decorators.http import HttpTrigger, HttpOutput, \
HttpMethod
from azure.functions.decorators.queue import QueueTrigger, QueueOutput
from azure.functions.decorators.servicebus import ServiceBusQueueTrigger, \
ServiceBusQueueOutput, ServiceBusTopicTrigger, \
ServiceBusTopicOutput
from azure.functions.decorators.table import TableInput, TableOutput
from azure.functions.decorators.timer import TimerTrigger
from azure.functions.decorators.utils import parse_singular_param_to_enum, \
parse_iterable_param_to_enums, StringifyEnumJsonEncoder
from azure.functions.http import HttpRequest
from .generic import GenericInputBinding, GenericTrigger, GenericOutputBinding
from .warmup import WarmUpTrigger
from .._http_asgi import AsgiMiddleware
from .._http_wsgi import WsgiMiddleware, Context
f" in bindings {bindings}")
# Set route to function name if unspecified in the http trigger
# Set auth level to function app auth level if unspecified in the
# http trigger
if Trigger.is_supported_trigger_type(trigger, HttpTrigger):
if getattr(trigger, 'route', None) is None:
getattr(trigger, 'init_params').append('route')
setattr(trigger, 'route', function_name)
if getattr(trigger, 'auth_level',
None) is None and auth_level is not None:
getattr(trigger, 'init_params').append('auth_level')
setattr(trigger, 'auth_level',
parse_singular_param_to_enum(auth_level, AuthLevel))
self._function._is_http_function = True
def build(self, auth_level: Optional[AuthLevel] = None) -> Function:
"""
Validates and builds the function object.
:param auth_level: Http auth level that will be set if http
trigger function auth level is None.
"""
self._validate_function(auth_level)
return self._function
class DecoratorApi(ABC):
"""Interface which contains essential decorator function building blocks
to extend for creating new function app or blueprint classes.
"""
def __init__(self, *args, **kwargs):
self._function_builders: List[FunctionBuilder] = []
self._app_script_file: str = SCRIPT_FILE_NAME
@property
def app_script_file(self) -> str:
"""Name of function app script file in which all the functions
are defined. \n
Script file defined here is for placeholder purpose, please refer to
worker defined script file path as the single point of truth.
:return: Script file name.
"""
return self._app_script_file
def _validate_type(self,
func: Union[Callable[..., Any], FunctionBuilder]) \
-> FunctionBuilder:
"""Validate the type of the function object and return the created
:class:`FunctionBuilder` object.
:param func: Function object passed to
:meth:`_configure_function_builder`
:raises ValueError: Raise error when func param is neither
:class:`Callable` nor :class:`FunctionBuilder`.
:return: :class:`FunctionBuilder` object.
"""
if isinstance(func, FunctionBuilder):
fb = self._function_builders.pop()
elif callable(func):
fb = FunctionBuilder(func, self._app_script_file)
else:
raise ValueError(
"Unsupported type for function app decorator found.")
return fb
def _configure_function_builder(self, wrap) -> Callable[..., Any]:
"""Decorator function on user defined function to create and return
:class:`FunctionBuilder` object from :class:`Callable` func.
"""
def decorator(func):
fb = self._validate_type(func)
self._function_builders.append(fb)
return wrap(fb)
return decorator
def function_name(self, name: str) -> Callable[..., Any]:
"""Set name of the :class:`Function` object.
:param name: Name of the function.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.configure_function_name(name)
return fb
return decorator()
return wrap
def http_type(self, http_type: str) -> Callable[..., Any]:
"""Set http type of the :class:`Function` object.
:param http_type: Http type of the function.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.configure_http_type(http_type)
return fb
return decorator()
return wrap
class HttpFunctionsAuthLevelMixin(ABC):
"""Interface to extend for enabling function app level http
authorization level setting"""
| def __init__(self, auth_level: Union[AuthLevel, str], *args, **kwargs): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ict-bigdatalab/RIGHT
# Path: dense_retrieval.py
class MySimCSE(SimCSE):
def encode(self, sentence: Union[str, List[str]],
device: str = None,
return_numpy: bool = False,
normalize_to_unit: bool = True,
keepdim: bool = False,
batch_size: int = 64,
max_length: int = 128) -> Union[ndarray, Tensor]:
target_device = self.device if device is None else device
self.model = self.model.to(target_device)
single_sentence = False
if isinstance(sentence, str):
sentence = [sentence]
single_sentence = True
embedding_list = []
with torch.no_grad():
total_batch = len(sentence) // batch_size + (1 if len(sentence) % batch_size > 0 else 0)
for batch_id in range(total_batch):
inputs = self.tokenizer(
sentence[batch_id * batch_size:(batch_id + 1) * batch_size],
padding=True,
truncation=True,
max_length=max_length,
return_tensors="pt"
)
inputs = {k: v.to(target_device) for k, v in inputs.items()}
outputs = self.model(**inputs, return_dict=True)
if self.pooler == "cls":
embeddings = outputs.pooler_output
elif self.pooler == "cls_before_pooler":
embeddings = outputs.last_hidden_state[:, 0]
elif self.pooler == 'all_token_pooling':
embeddings = outputs.last_hidden_state[:, 1:].mean(1)
else:
raise NotImplementedError
if normalize_to_unit:
embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)
embedding_list.append(embeddings.cpu())
embeddings = torch.cat(embedding_list, 0)
if single_sentence and not keepdim:
embeddings = embeddings[0]
if return_numpy and not isinstance(embeddings, ndarray):
return embeddings.numpy()
return embeddings
def search(self, queries: Union[str, List[str]],
device: str = None,
threshold: float = 0.6,
top_k: int = 5) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:
if not self.is_faiss_index:
if isinstance(queries, list):
combined_results = []
for query in queries:
results = self.search(query, device)
combined_results.append(results)
return combined_results
similarities = self.similarity(queries, self.index["index"]).tolist()
id_and_score = []
for i, s in enumerate(similarities):
if s >= threshold:
id_and_score.append((i, s))
id_and_score = sorted(id_and_score, key=lambda x: x[1], reverse=True)[:top_k]
results = [(idx, score) for idx, score in id_and_score]
return results
else:
query_vecs = self.encode(queries, device=device, normalize_to_unit=True, keepdim=True, return_numpy=True)
distance, idx = self.index["index"].search(query_vecs.astype(np.float32), top_k)
def pack_single_result(dist, idx):
results = [(i, s) for i, s in zip(idx, dist) if s >= threshold]
return results
if isinstance(queries, list):
combined_results = []
for i in range(len(queries)):
results = pack_single_result(distance[i], idx[i])
combined_results.append(results)
return combined_results
else:
return pack_single_result(distance[0], idx[0])
# Path: get_datasets.py
def get_transformed_io(src_path, dst_path):
src = read_line_examples_from_file(src_path)
dst = read_line_examples_from_file(dst_path)
assert len(src) == len(dst)
print(f"Total examples = {len(dst)}")
targets, hashtags = get_para_targets(dst)
assert len(src) == len(targets)
return src, targets, hashtags
# Path: get_datasets.py
def get_hashtag_list(dst):
tags = dst.split('[SEP]')
target = []
for j in range(len(tags)):
tags[j] = tags[j].strip()
if tags[j] != '' and tags[j] not in target:
target.append(tags[j])
# if the dst is nothing
if len(target) == 0:
target.append('None')
# statistic_hashtags(hashtags)
return target
# Path: eval_utils.py
def f1(pre, rec):
if pre == 0 and rec == 0:
return 0.0
return 2 * pre * rec / (pre + rec)
# Path: data_augmentation.py
def random_augmentation(words, n=1):
for _ in range(20):
random_percent = random.random()
if random_percent <= 0.7:
new_words = synonym_replacement(words, n)
elif random_percent <= 0.8:
new_words = random_deletion(words, n)
elif random_percent <= 0.9:
new_words = random_swap(words, n)
elif random_percent <= 1:
new_words = random_insertion(words, n)
if new_words != words:
return new_words
return new_words + ' ' + stop_words[random.randint(0, 178)]
# Path: data_preprocess.py
import json
import csv
import jieba
import jieba.posseg as pseg
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
from nltk.corpus import stopwords
from transformers import BertModel
from transformers import BertTokenizer
from dense_retrieval import MySimCSE
from get_datasets import get_transformed_io, get_hashtag_list
from eval_utils import f1
from functools import cmp_to_key
from data_augmentation import random_augmentation
p = true_num / total_p
r = true_num / total_r
f = f1(p, r)
return p, r, f
def hashtag_coverage_cmp(x, y):
# x is in front of y if the function returns -1
if x[1][1] + x[1][2] > y[1][1] + y[1][2]:
return -1
if x[1][1] + x[1][2] == y[1][1] + y[1][2]:
if x[1][0] > y[1][0]:
return -1
elif x[1][0] == y[1][0]:
return 0
else:
return 1
return 1
def generate_training_data_for_retrieval(src_data_path, dst_data_path, retrieval_data_path, output_path):
src, targets, hashtags = get_transformed_io(src_data_path, dst_data_path)
with open(retrieval_data_path, 'r', encoding='UTF-8') as fp:
rev_index_list = json.load(fp)
total_num = len(src)
positive_samples = []
hard_negative_samples = []
for i in tqdm(range(total_num)):
coverage_rate = dict()
for j in range(total_num):
if i == j:
coverage_rate[i] = (-99, -99, -99)
continue
rate = compute_hashtag_coverage(hashtags[i], hashtags[j])
coverage_rate[j] = rate
coverage_rate = sorted(coverage_rate.items(), key=cmp_to_key(hashtag_coverage_cmp), reverse=False)[0]
positive_sample = src[coverage_rate[0]]
rev_index = rev_index_list[i]["index"]
nega_rate = dict()
for ind in rev_index:
rate = compute_hashtag_coverage(hashtags[i], hashtags[ind])
nega_rate[ind] = rate
nega_rate = sorted(nega_rate.items(), key=cmp_to_key(hashtag_coverage_cmp), reverse=True)[:2]
hard_negative_sample = src[nega_rate[0][0]]
if hard_negative_sample == positive_sample:
hard_negative_sample = src[nega_rate[1][0]]
positive_samples.append(positive_sample)
hard_negative_samples.append(hard_negative_sample)
assert len(src) == len(positive_samples) == len(hard_negative_samples)
with open(output_path, 'w', encoding='UTF-8') as fp:
header = ['sent0', 'sent1', 'hard_neg']
writer = csv.writer(fp)
writer.writerow(header)
data = []
for i in range(total_num):
line = [src[i], positive_samples[i], hard_negative_samples[i]]
data.append(line)
writer.writerows(data)
# print(src[i])
# print(hashtags[i])
# print(coverage_rate)
# for cov in coverage_rate:
# print(cov[0])
# print(src[cov[0]])
# print(hashtags[cov[0]])
# print('-'*30)
def generate_training_data_for_selector(src_data_path, dst_data_path, output_path):
src, targets, hashtags = get_transformed_io(src_data_path, dst_data_path)
constructive_src = []
positive_samples = []
hard_negative_samples = []
for i in tqdm(range(len(src))):
for hashtag in hashtags[i]:
hard_negative_hashtag = random_augmentation(hashtag)
constructive_src.append(src[i])
positive_samples.append(hashtag)
hard_negative_samples.append(hard_negative_hashtag)
total_num = len(constructive_src)
with open(output_path, 'w', encoding='UTF-8') as fp:
header = ['sent0', 'sent1', 'hard_neg']
writer = csv.writer(fp)
writer.writerow(header)
data = []
for i in range(total_num):
line = [constructive_src[i], positive_samples[i], hard_negative_samples[i]]
data.append(line)
writer.writerows(data)
def generate_selector_result_for_retrieval_result(src_data_path, retrieval_index_path, retrieval_document_path, selector_model_path, out_path):
src_data = []
with open(src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
src_data.append(line)
retrieval_document = []
with open(retrieval_document_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
retrieval_document.append(line)
with open(retrieval_index_path, 'r', encoding='UTF-8') as fp:
retrieval_index = json.load(fp)
assert len(src_data) == len(retrieval_index)
rev_dst = [[get_hashtag_list(retrieval_document[index]) for index in retrieval_index[i]["index"]] for i in
range(len(src_data))]
model = MySimCSE(selector_model_path, device='cuda')
out_put = []
for i in range(len(src_data)):
hashtag_set = []
for hashtag in rev_dst[i]:
| hashtag_set += hashtag |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ilyamiro/Stewart
# Path: Audio/synthesizer.py
class Synthesizer:
"""
Class for synthesizing Stewart voice
Based on silero-tts v4 model from https://github.com/snakers4/silero-models
"""
def __init__(self, speaker="eugene"):
"""
Synthesizer initializing
:param speaker: One of eugene; kseniya; baya; xenia; aidar; random;
"""
# initialize pygame for playing audio
self.audio_init()
# initialize sample rate and the speaker_voice
self.sample_rate = 48000
self.speaker = speaker
# initialization for torch package
self.device = torch.device("cpu")
torch.set_num_threads(32)
# downloading model from source
self.local_file = "model.pt"
self.download_model()
# creating model
self.model = torch.package.PackageImporter(
f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/model.pt").load_pickle("tts_models",
"model")
self.model.to(self.device)
synthesis_logger.info("Model has beed set")
# setting audio state checker for synthesizer
self.audio = PlayChecker()
self.music_playing = False
self.music_stopped = True
def download_model(self, url="https://models.silero.ai/models/tts/ru/v4_ru.pt"):
"""
Function for downloading voice model
:param url: address for downloading voice model
"""
# downloading model from source
if not os.path.isfile(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/model.pt"):
synthesis_logger.info("Downloading synthesis model")
torch.hub.download_url_to_file(url, self.local_file)
@staticmethod
def audio_init():
"""
Function for initializing pygame audio player
"""
pygame.init()
pygame.mixer.init()
synthesis_logger.debug("Audio initialized")
def say(self, text: str) -> None:
"""
Function for saying something
:param text: text for saying
:return: None
"""
self.synthesize(text)
# playing audio from file using pygame.
# playsound() could be used instead, but it doesn't really imply stop() func, while pygame does
try:
pygame.mixer.music.load(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/audio.wav")
pygame.mixer.music.play()
except pygame.error:
synthesis_logger.error("Audio playing error")
def synthesize(self, text: str) -> None:
if self.music_stopped:
# synthesizing voice
try:
self.model.save_wav(ssml_text=f"<speak><prosody rate='100%'>{text}</prosody></speak>",
speaker=self.speaker,
sample_rate=self.sample_rate,
audio_path=f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Audio/audio.wav") # ssml text supports a lot of parameters, such as intonation, pauses etc.
except AssertionError:
raise SpeakerInvalid
except Exception:
raise SynthesisError
@staticmethod
def is_saying() -> bool:
"""
Function for checking if synthesized audio is being played
:return: checks if stewart is saying something. Returns True if yes, else False
"""
return pygame.mixer.music.get_busy()
def change_speaker(self, speaker: str):
"""
Function for changing voice model's speaker
:param speaker: speaker name. One of eugene; kseniya; baya; xenia; aidar; random;
"""
self.speaker = speaker
def change_sample_rate(self, rate: int):
"""
Function for changing voice model's rate
:param rate: rate of a synthesizer model
"""
self.sample_rate = rate
# Path: Audio/recognition.py
class Voice:
"""
Class for recognizing user's voice
All models taken from: https://alphacephei.com/vosk/models
Usage example
voice = Voice()
voice.start_stream()
while True:
for word in voice.listen():
print(word)
"""
def __init__(self, lang="ru", big_model=False):
# if platform == "linux" or platform == "linux2":
# # on some linux systems, jack_control is disabled on boot, and audio input is not working properly
# # if there is another audio input driver controller, it should be enabled itself
# os.system("jack_control start")
self.rate = 16000
self.init_model(lang, big_model)
self.init_audio(16000)
self.init_recognizer()
def listen(self):
"""
Generator for handling user input.
Reads data from stream and uses recognizer to analyze the data
"""
data = self.stream.read(4000, exception_on_overflow=False)
# checking if data is valid
if self.recognizer.AcceptWaveform(data) and len(data) > 1 and self.stream.is_active():
# using json to load results of user input's analyzing
answer = json.loads(self.recognizer.Result())
# if user said something - it yields
if answer['text']:
# recognition_logger.info("Data readed and analyzed")
yield answer['text']
def init_model(self, model_lang="ru", big_model: bool = False):
"""
:param model_lang: choose model's language: ru/en
:param big_model: choose if the model is going to be big or not. ->
Big models can take up to 8 Gb of RAM on your device, so using them might not be optimal for you
Small models are mainly used for Android/IOS apps, they are much easier to handle,
but they are worse at voice detection.
"""
# default model path
model_path = f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Speech-models/vosk-model-small-ru-0.22"
# choosing model depending on user's choice
if model_lang == "ru":
model_path = f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Speech-models/vosk-model{'-small' if not big_model else ''}-ru-{'0.42' if big_model else '0.22'}"
elif model_lang == "en":
model_path = f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Speech-models/vosk-model{'-small' if not big_model else ''}-en-us-{'0.22' if big_model else '0.15'}"
# initializing the model
self.model = Model(model_path)
recognition_logger.info("Model initialized")
def init_audio(self, rate):
"""
Function for initializing pyaudio stream
:param rate: the quality of audio coming from your microphone into the system.
rate from 16000 to 48000 doesn't really change model's behavior
"""
p = pyaudio.PyAudio()
self.rate = rate
try:
# the number of frames per buffer should preferably be half the rate value
self.stream = p.open(format=pyaudio.paInt16, rate=rate, channels=1, frames_per_buffer=int(rate / 2), input=True)
recognition_logger.info("stream created")
except TypeError:
raise StreamParametersError("Stream parameters (rate) are corrupted. Failed to open stream")
def init_recognizer(self):
"""
Function for initializing recognizer
"""
try:
self.recognizer = KaldiRecognizer(self.model, self.rate)
recognition_logger.info("Recognizer initialized")
except AttributeError:
recognition_logger.error("Model error")
raise ModelError("There was an error initializing this model")
def start_stream(self):
"""
Start voice input
"""
self.stream.start_stream()
recognition_logger.info("stream started")
def stop_stream(self):
"""
Stop voice input
"""
self.stream.stop_stream()
recognition_logger.info("stream stopped")
def is_enabled(self):
"""
Function for checking if the voice input is active
"""
return self.stream.is_active()
# Path: Database/Data.py
class Data:
"""
class for data handling
"""
def __init__(self):
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/data.json", "r") as file:
data = json.load(file)
for key in data.keys():
self.__setattr__(key, data[key])
# Path: PluginSystem/Plugin_system.py
class PluginOperation:
@staticmethod
def register_plugin(plugin: Plugin):
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json", "r") as file:
data = json.load(file)
__info__ = asdict(plugin.info)
if __info__ not in data["plugins"] and os.path.exists(
f'{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/{__info__["path"]}'):
data["plugins"].append(__info__)
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json", "w") as file:
json.dump(data, file, ensure_ascii=False)
plugin_system_logger.info(f"Plugin {__info__['name']} has been registered")
else:
plugin_system_logger.info(f"Plugin {__info__['name']} is already registered")
@staticmethod
def __check_plugin__(name: str, check: str):
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json", "r") as file:
data = json.load(file)
for plugin in data["plugins"]:
if plugin["name"] == name:
prm = plugin[check]
plugin_system_logger.info(f"Plugin's named {name} parameter {check} is equal to {prm}")
return prm
@staticmethod
def unregister_plugin(name: str):
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json", "r") as file:
data = json.load(file)
for plugin in data["plugins"]:
if plugin["name"] == name:
data["plugins"].remove(plugin)
break
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json", "w") as file:
json.dump(data, file, ensure_ascii=False)
plugin_system_logger.debug(f"Plugin {name} was successfully unregistered")
@staticmethod
def __plugin_load__(name):
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json", "r") as file:
data = json.load(file)
plugin_info = None
for go in data["plugins"]:
if go["name"] == name:
plugin_info = go
plugin_system_logger.info(f"Plugin {plugin_info['name']} is going to be loaded to the core")
break
if plugin_info:
spec = importlib.util.spec_from_file_location("main",
f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/{plugin_info['path']}/main.py")
plugin_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(plugin_module)
return plugin_module.Main
else:
plugin_system_logger.error("Plugin not found")
# Path: PluginSystem/Plugin_system.py
class PluginInfo:
name: str
author: str
type: str
path: str
platform: str
about: str
# Path: LogSystem/Loggers.py
class BaseLogger(Logger):
def __init__(self, name, level=logging.NOTSET, ):
def _setup_logger(self):
# Path: Core/Core.py
import json
import os
import random
import threading
import pyautogui
import importlib.util
from Audio.synthesizer import Synthesizer
from Audio.recognition import Voice
from Database.Data import Data
from PluginSystem.Plugin_system import PluginOperation, PluginInfo
from Command_System import *
from LogSystem import core_logger
self.plugin_ref: dict[str: dict] = {}
self.plugin_enable: dict[str: bool] = {}
self.multi_handle_answers = ["Конечно, сэр", "Выполняю, сэр", "Есть, сэр"]
self.default_answers = ["Я тут, сэр", "Слушаю вас, сэр", "Что-то хотели, сэр?", "Всегда к вашим услугам, сэр",
"Я весь внимание, сэр", "Добрый день, сэр!"]
core_logger.debug("Config parameters imported")
def __speech_recognition__(self):
core_logger.debug("Speech recognition started")
while True:
# iterating through speech recognition generator
for data in self.recognition.listen():
# setting recognized data as an attribute for access to the current data in outer field in class
self.said_word = data
# logging
self.log("user", data)
# checking if the application is enabled
if self.power:
# removing trigger word from data for it no to bother the main command processin
request = self.__remove_trigger_word__(data)
if request != "-":
self.__handle_input__(request)
def __handle_input__(self, request):
if not request:
thread = threading.Thread(target=self.synthesizer.say, args=[random.choice(self.default_answers)])
thread.start()
else:
total = self.__multihandle__(request)
if len(total) == 1:
result = self.command_tree.find_command(total[0])
if result:
if result[2]:
thread = threading.Thread(target=self.synthesizer.say, args=[random.choice(result[2])])
thread.start()
result = list(result)
result.extend([total[0], request])
self.__synthesis_handler__(result)
elif len(total) > 1:
thread = threading.Thread(target=self.synthesizer.say, args=[random.choice(self.multi_handle_answers)])
thread.start()
said = False
for command in total:
result = self.command_tree.find_command(command)
if result:
result = list(result)
result.extend([total[0], request])
output = getattr(self, result[0])(parameters=result[1], command=result[3], request=result[4])
if output and not said:
said = True
choiced = random.choice(self.multi_handle_answers)
thread = threading.Thread(target=self.synthesizer.say,
args=[choiced])
thread.start()
self.log("assistant", choiced)
def __synthesis_handler__(self, request):
@self.__say_dec__(request[2])
def __send_handler_request__():
return getattr(self, request[0])(parameters=request[1], command=request[3], request=request[4])
__send_handler_request__()
def __say_dec__(self, synthesis):
def decorator(func):
def wrapper():
if synthesis:
self.log("assistant", synthesis)
to_say = func()
if to_say is not None and not synthesis:
choiced = to_say if not isinstance(to_say, list) else random.choice(to_say)
self.log("assistant", choiced)
thread = threading.Thread(target=self.synthesizer.say,
args=[choiced])
thread.start()
return wrapper
return decorator
def __install_plugins__(self):
for plugin in self.loaded_plugins:
plugin_instance = plugin()
if plugin_instance.info.type == "voice":
plugin_instance.__add_commands__()
self.plugin_enable[plugin_instance.info.name] = True
self.default_answers.extend(plugin_instance.default_answers)
self.multi_handle_answers.extend(plugin_instance.multi_handle_answers)
for handler in plugin_instance.handlers:
self.__setattr__(handler, plugin_instance.__getattribute__(handler))
core_logger.debug(f"Voice plugin {plugin_instance.info.name} has been set")
elif plugin_instance.info.type == "command":
plugin_instance.__add_commands__()
self.plugin_enable[plugin_instance.info.name] = True
self.command_tree.add_commands(plugin_instance.command_tree)
self.plagin_commands(plugin_instance.info.name, plugin_instance.command_tree)
self.__register_commands__(plugin_instance.command_tree)
for handler in plugin_instance.handlers:
self.__setattr__(handler, plugin_instance.__getattribute__(handler))
for var in plugin_instance.variables.keys():
self.__setattr__(var, plugin_instance.variables[var])
core_logger.debug(f"Command Plugin {plugin_instance.info.name} has been set")
self.set_plugins_status()
def disable_plagin(self, name):
if self.plugin_enable[name]:
self.plugin_enable[name] = False
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/commands.json", "r") as file:
data = json.load(file)
data: dict
for command in self.plugin_ref[name].keys():
self.command_tree.delete_command(command)
to_remove = " ".join(command)
if data[to_remove]:
del data[to_remove]
with open(f"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/commands.json", "w") as file:
json.dump(data, file, ensure_ascii=False)
self.set_plugins_status()
| def enable_plugin(self, name): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LLM-Evaluation-s-Always-Fatiguing/leaf-playground-hub
# Path: who_is_the_spy_cn/who_is_the_spy_cn/agents/moderator.py
class Moderator(
SceneStaticAgent,
role_definition=ROLE_DEFINITION,
cls_description="An agent who moderate the game: Who is the Spy"
):
config_cls = ModeratorConfig
config: config_cls
game_rule_with_blank = (
"下面是游戏规则:\n\n"
"###基础信息###\n\n"
"游戏中有三个角色:平民,卧底和白板。在游戏的开始,每个人都会收到一条关键词,仅自己可见\n"
"- 平民们会收到相同的正确的关键词。\n"
"- 卧底们会收到相同的错误的关键词,但是跟正确的关键词较为相似。\n"
"- 白板会被告知自己是白板角色。\n"
"你不会被告知自己的角色,只能通过观察和分析进行猜测。\n\n"
"###游戏阶段###\n\n"
"这场游戏有四个阶段:\n"
"1.描述阶段:所有玩家同时进行一句话描述,无先后顺序。描述的对象可以是你收到的关键词,也可以是任何你认为可能是正确的关键词。"
"当你不确定自己的角色时,可以通过模糊的描述从而隐藏自己的角色和关键词。\n"
"2.预测阶段:根据本场游戏的历史上下文,判断自己是否是卧底,并预测其他的卧底和白板。仅自己可见。\n"
"3.投票阶段:为了达到你的胜利条件,请投出你的选票。被投票最多的人将被淘汰。\n"
"4.游戏结束:当卧底全部被淘汰,或者仅剩两位玩家时,游戏结束。\n\n"
"###胜利条件###\n\n"
"你的胜利条件取决于你的角色:\n"
"- 作为平民,你需要找出白板和卧底并通过投票将他们淘汰。请记住,优先淘汰白板。\n"
"- 作为卧底,你需要隐藏自己的角色并潜伏下去,通过投票淘汰其他角色,活到最后你就胜利了。\n"
"- 作为白板,你需要隐藏自己的角色并潜伏下去,通过投票淘汰卧底。\n\n"
"###限定规则###\n\n"
"- 你的描述应该足够简短,并且不能直接包含你收到的关键词。\n"
"- 你的描述不能与之前的描述重复。\n"
"- 在投票阶段,你不能投自己或者已经被淘汰的人,一人只能投一票。\n"
"- 每句话必须以<EOS>作为结束。"
)
game_rule_without_blank = (
"下面是游戏规则:\n\n"
"###基础信息###\n\n"
"游戏中有两个角色:平民,卧底。在游戏的开始,每个人都会收到一条关键词,仅自己可见\n"
"- 平民们会收到相同的正确的关键词。\n"
"- 卧底们会收到相同的错误的关键词,但是跟正确的关键词较为相似。\n"
"你不会被告知自己的角色,只能通过观察和分析进行猜测。\n\n"
"###游戏阶段###\n\n"
"这场游戏有四个阶段:\n"
"1.描述阶段:所有玩家同时进行一句话描述,无先后顺序。描述的对象可以是你收到的关键词,也可以是任何你认为可能是正确的关键词。"
"当你不确定自己的角色时,可以通过模糊的描述从而隐藏自己的角色和关键词。\n"
"2.预测阶段:根据本场游戏的历史上下文,判断自己是否是卧底,并预测其他的卧底。仅自己可见。\n"
"3.投票阶段:为了达到你的胜利条件,请投出你的选票。被投票最多的人将被淘汰。\n"
"4.游戏结束:当卧底全部被淘汰,或者仅剩两位玩家时,游戏结束。\n\n"
"###胜利条件###\n\n"
"你的胜利条件取决于你的角色:\n"
"- 作为平民,你需要找出卧底并通过投票将他们淘汰。\n"
"- 作为卧底,你需要隐藏自己的角色并潜伏下去,通过投票淘汰其他角色,活到最后你就胜利了。\n\n"
"###限定规则###\n\n"
"- 你的描述应该足够简短,并且不能直接包含你收到的关键词。\n"
"- 你的描述不能与之前的描述重复。\n"
"- 在投票阶段,你不能投自己或者已经被淘汰的人,一人只能投一票。\n"
"- 每句话必须以<EOS>作为结束。"
)
def __init__(self, config: config_cls):
super().__init__(config=config)
self.id2role: Dict[str, PlayerRoles] = {}
self.role2players: Dict[PlayerRoles, List[Profile]] = {
PlayerRoles.CIVILIAN: [],
PlayerRoles.SPY: [],
PlayerRoles.BLANK: []
}
self.id2player: Dict[str, Profile] = {}
self.id2status: Dict[str, PlayerStatus] = {}
self.civilian_key: KeyTypes = None
self.spy_key: KeyTypes = None
async def registry_players(self, players: List[Profile]) -> None:
for player in players:
self.id2player[player.id] = player
self.id2status[player.id] = PlayerStatus.ALIVE
async def init_game(self) -> ModeratorInitGameSummary:
num_players = len(self.id2player)
has_blank = self.env_var["has_blank"].current_value
key_modality = self.env_var["key_modality"].current_value
roles_assignment_strategy = {
4: {
PlayerRoles.CIVILIAN: 3,
PlayerRoles.SPY: 1,
PlayerRoles.BLANK: 0
},
5: {
PlayerRoles.CIVILIAN: 3 if has_blank else 4,
PlayerRoles.SPY: 1,
PlayerRoles.BLANK: 1 if has_blank else 0
},
6: {
PlayerRoles.CIVILIAN: 4 if has_blank else 5,
PlayerRoles.SPY: 1,
PlayerRoles.BLANK: 1 if has_blank else 0
},
7: {
PlayerRoles.CIVILIAN: 4 if has_blank else 5,
PlayerRoles.SPY: 2,
PlayerRoles.BLANK: 1 if has_blank else 0
},
8: {
PlayerRoles.CIVILIAN: 5 if has_blank else 6,
PlayerRoles.SPY: 2,
PlayerRoles.BLANK: 1 if has_blank else 0
},
9: {
PlayerRoles.CIVILIAN: 6 if has_blank else 7,
PlayerRoles.SPY: 2,
PlayerRoles.BLANK: 1 if has_blank else 0
}
}
roles_agent_num = roles_assignment_strategy[num_players]
roles = list(chain(*[[role] * agent_num for role, agent_num in roles_agent_num.items()]))
random.shuffle(roles) # shuffle to randomize the role assignment
for player_id, role in zip(list(self.id2player.keys()), roles):
self.role2players[role].append(self.id2player[player_id])
self.id2role[player_id] = role
if key_modality == KeyModalities.TEXT:
keys = random.choice(load_textual_key())
self.civilian_key, self.spy_key = Text(text=keys["Civilian"]), Text(text=keys["Spy"])
elif key_modality == KeyModalities.IMAGE:
keys = random.choice(load_image_key())
self.civilian_key, self.spy_key = Image(url=keys["Civilian"]), Image(url=keys["Spy"])
else:
raise NotImplementedError(f"[{key_modality.value}] modal not supported yet.")
role_assign_summary = "\n".join(
[f"- {role.value} :: {[f'{player.name}({player.id})' for player in players]}"
for role, players in self.role2players.items()]
)
key_assign_summary = (
f"- {PlayerRoles.CIVILIAN.value} :: {self.civilian_key}\n"
f"- {PlayerRoles.SPY.value} :: {self.spy_key}"
)
msg = (
f"## 角色和关键词分配结果\n\n"
f"### 角色\n{role_assign_summary}\n\n### 关键词\n{key_assign_summary}"
)
keys={
PlayerRoles.CIVILIAN.value: self.civilian_key.display_text,
PlayerRoles.SPY.value: self.spy_key.display_text
}
if has_blank:
keys[PlayerRoles.BLANK.value] = '空白'
return ModeratorInitGameSummary(
sender=self.profile,
receivers=[self.profile],
content=Text(text=msg, display_text=msg),
role2players={role.value: [p.name for p in players] for role, players in self.role2players.items()},
keys=keys
)
async def introduce_game_rule(self) -> ModeratorSummary:
has_blank = self.env_var["has_blank"].current_value
msg = self.game_rule_with_blank if has_blank else self.game_rule_without_blank
return ModeratorSummary(
sender=self.profile,
receivers=list(self.id2player.values()),
content=Text(text=msg, display_text=msg)
)
async def announce_game_start(self) -> ModeratorSummary:
num_players = len(self.id2player)
role2word = {
PlayerRoles.CIVILIAN: f"名{PlayerRoles.CIVILIAN.value}",
PlayerRoles.SPY: f"名{PlayerRoles.SPY.value}",
PlayerRoles.BLANK: f"名{PlayerRoles.BLANK.value}"
}
roles_num_description = ", ".join(
[f"{len(role_players)} {role2word[role]}" for role, role_players in self.role2players.items()]
)
msg = (
f"现在游戏开始!本场游戏有 {num_players} 名玩家, 包括 "
f"{roles_num_description}."
)
return ModeratorSummary(
sender=self.profile,
receivers=list(self.id2player.values()),
content=Text(text=msg, display_text=msg)
)
async def assign_keys(self, player: Profile) -> ModeratorKeyAssignment:
role = self.id2role[player.id]
if role == PlayerRoles.CIVILIAN:
return ModeratorKeyAssignment.create_with_key(
key=self.civilian_key, sender=self.profile, receiver=player
)
elif role == PlayerRoles.SPY:
return ModeratorKeyAssignment.create_with_key(
key=self.spy_key, sender=self.profile, receiver=player
)
else:
return ModeratorKeyAssignment.create_without_key(
sender=self.profile, receiver=player
)
async def ask_for_key_description(self) -> ModeratorAskForDescription:
return ModeratorAskForDescription.create(
sender=self.profile,
receivers=[
player for player in self.id2player.values()
]
)
async def valid_player_description(self, description: PlayerDescription) -> ModeratorWarning:
player_id = description.sender_id
player_role = self.id2role[player_id]
if player_role != PlayerRoles.BLANK and self.env_var["key_modality"].current_value == KeyModalities.TEXT:
warn_msg = (
"你的描述中包含你的关键词,这是不被允许的,请重新进行描述。回复仅包含你对关键词的描述,不需要多余的回答。"
)
if (player_role == PlayerRoles.CIVILIAN and self.civilian_key.text.lower() in description.content.text.lower()) or \
(player_role == PlayerRoles.SPY and self.spy_key.text.lower() in description.content.text.lower()):
return ModeratorWarning(
sender=self.profile,
receivers=[description.sender],
content=Text(text=warn_msg, display_text=warn_msg),
has_warn=True
)
return ModeratorWarning(
sender=self.profile,
receivers=[description.sender],
content=Text(text="", display_text=""),
has_warn=False
)
async def ask_for_role_prediction(self) -> ModeratorAskForRolePrediction:
has_blank = self.env_var["has_blank"].current_value
return ModeratorAskForRolePrediction.create(
sender=self.profile,
receivers=[
player for player in self.id2player.values() if self.id2status[player.id] == PlayerStatus.ALIVE
],
player_names=[
player.name for player in self.id2player.values() if self.id2status[player.id] == PlayerStatus.ALIVE
],
has_blank=has_blank
)
async def summarize_players_prediction(self, predictions: List[PlayerPrediction]) -> ModeratorPredictionSummary:
has_blank = self.env_var["has_blank"].current_value
summaries = []
extracted_predictions = {}
for prediction in predictions:
preds = prediction.get_prediction(
player_names=[player.name for player in self.id2player.values()],
has_blank=has_blank
)
extracted_predictions[prediction.sender_name] = {role.value: list(names) for role, names in preds.items()}
summary = (
f"**{prediction.sender_name}({prediction.sender_id})'s prediction**\n"
f"- {PlayerRoles.SPY.value} :: {list(preds[PlayerRoles.SPY])}"
)
if has_blank:
summary += f"\n- {PlayerRoles.BLANK.value} :: {list(preds[PlayerRoles.BLANK])}"
summaries.append(summary)
alive_spies = [
player.name for player in self.role2players[PlayerRoles.SPY]
if self.id2status[player.id] == PlayerStatus.ALIVE
]
label = (
f"**Correct Answer**\n- {PlayerRoles.SPY.value} :: {alive_spies}"
)
ground_truth = {PlayerRoles.SPY.value: alive_spies}
if has_blank:
alive_blanks = [
player.name for player in self.role2players[PlayerRoles.BLANK]
if self.id2status[player.id] == PlayerStatus.ALIVE
]
label += f"\n- {PlayerRoles.BLANK.value} :: {alive_blanks}"
ground_truth[PlayerRoles.BLANK.value] = alive_blanks
msg = "\n\n".join(summaries) + f"\n\n{label}"
return ModeratorPredictionSummary(
sender=self.profile,
receivers=[self.profile],
content=Text(text=msg, display_text=msg),
predictions=extracted_predictions,
ground_truth=ground_truth
)
async def ask_for_vote(self, targets: List[Profile]) -> ModeratorAskForVote:
return ModeratorAskForVote.create(
sender=self.profile,
receivers=[
player for player in self.id2player.values() if self.id2status[player.id] == PlayerStatus.ALIVE
],
targets=targets
)
async def summarize_player_votes(
self,
votes: List[PlayerVote],
patience: int,
focused_players: Optional[List[Profile]]
) -> ModeratorVoteSummary:
def get_most_voted_players() -> List[Profile]:
eliminated_names = [
player_name for player_name, num_be_voted in player2num_be_voted.items() if
num_be_voted == max(player2num_be_voted.values())
]
return [player for player in self.id2player.values() if player.name in eliminated_names]
player2num_be_voted = {player.name: 0 for player in self.id2player.values()}
player2votes = {}
for vote in votes:
vote_to = vote.get_vote([player.name for player in self.id2player.values()])
if not vote_to:
continue
player2votes[vote.sender_name] = vote_to
player2num_be_voted[vote_to] += 1
if focused_players:
focused_names = [p.name for p in focused_players]
for player_name in player2num_be_voted:
if player_name not in focused_names:
player2num_be_voted[player_name] = 0
voting_detail = "\n".join([f"{voter} 投票给 {voted}" for voter, voted in player2votes.items()]) + "\n"
if focused_players:
voting_detail += (
f"这是一次针对平票玩家的重新投票, 因此将仅统计上次投票中平票玩家 {[p.name for p in focused_players]} 的本次得票。\n"
)
most_voted_players = get_most_voted_players()
if len(most_voted_players) > 1 and patience > 0: # tied
msg = (
f"{voting_detail}\n玩家 {[p.name for p in most_voted_players]} 有相同的票数。"
f"请这些玩家额外进行一次针对自己获得的关键词的一句话描述。"
)
return ModeratorVoteSummary(
sender=self.profile,
receivers=[player for player in self.id2player.values()],
content=Text(text=msg, display_text=msg),
tied_players=most_voted_players,
player_received_votes=player2num_be_voted,
players_voted_to=player2votes
)
else: # eliminate
for player in most_voted_players:
self.id2status[player.id] = PlayerStatus.ELIMINATED
msg = f"{voting_detail}\n玩家 {[p.name for p in most_voted_players]} 获得的票数最多,本轮被淘汰。"
return ModeratorVoteSummary(
sender=self.profile,
receivers=[player for player in self.id2player.values()],
content=Text(text=msg, display_text=msg),
player_received_votes=player2num_be_voted,
players_voted_to=player2votes
)
async def check_if_game_over(self) -> ModeratorCheckGameOverSummary:
def return_game_over(role: PlayerRoles):
winners = [
player.name for player in self.role2players[role]
if self.id2status[player.id] == PlayerStatus.ALIVE
]
msg = f"游戏结束! {role.value} 胜利, 赢家是: {winners}."
return ModeratorCheckGameOverSummary(
sender=self.profile,
receivers=[player for player in self.id2player.values()],
content=Text(text=msg, display_text=msg),
is_game_over=True,
winners=winners
)
has_blank = self.env_var["has_blank"].current_value
num_players = len(self.id2player)
num_alive_players = len(
[player for player, status in self.id2status.items() if status == PlayerStatus.ALIVE]
)
num_alive_civilians = len(
[
player for player in self.role2players[PlayerRoles.CIVILIAN]
if self.id2status[player.id] == PlayerStatus.ALIVE
]
)
num_alive_spies = len(
[
player for player in self.role2players[PlayerRoles.SPY]
if self.id2status[player.id] == PlayerStatus.ALIVE
]
)
if num_alive_civilians == num_alive_players: # civilians win
return return_game_over(PlayerRoles.CIVILIAN)
if (
(num_players > 6 and num_alive_players <= 3 and num_alive_spies > 0) or
(num_players <= 6 and num_alive_players <= 2 and num_alive_spies > 0)
): # spies win
return return_game_over(PlayerRoles.SPY)
if has_blank and num_alive_spies == 0 and num_alive_civilians != num_alive_players: # blank wins
return return_game_over(PlayerRoles.BLANK)
msg = f"没有任何角色获胜,游戏继续。"
return ModeratorCheckGameOverSummary(
sender=self.profile,
receivers=[player for player in self.id2player.values()],
content=Text(text=msg, display_text=msg),
is_game_over=False,
winners=None
)
async def reset_inner_status(self):
self.id2role: Dict[str, PlayerRoles] = {}
self.role2players: Dict[PlayerRoles, List[Profile]] = {
PlayerRoles.CIVILIAN: [],
PlayerRoles.SPY: [],
PlayerRoles.BLANK: []
}
self.id2player: Dict[str, Profile] = {}
self.id2status: Dict[str, PlayerStatus] = {}
self.civilian_key: Union[Audio, Image, Text] = None
self.spy_key: Union[Audio, Image, Text] = None
# Path: who_is_the_spy_cn/who_is_the_spy_cn/agents/player.py
class BaseAIPlayer(
SceneAIAgent,
ABC,
role_definition=ROLE_DEFINITION,
cls_description="An AI agent who participants in the game Who is the Spy as a player",
action_exec_timeout=120
):
config_cls = BaseAIPlayerConfig
config: config_cls
def __init__(self, config: config_cls):
super().__init__(config=config)
@abstractmethod
async def receive_key(self, key_assignment: ModeratorKeyAssignment) -> None:
pass
@abstractmethod
async def describe_key(self, history: List[MessageTypes], receivers: List[Profile]) -> PlayerDescription:
pass
@abstractmethod
async def predict_role(self, history: List[MessageTypes], moderator: Profile) -> PlayerPrediction:
pass
@abstractmethod
async def vote(self, history: List[MessageTypes], moderator: Profile) -> PlayerVote:
pass
@abstractmethod
async def reset_inner_status(self):
pass
# Path: who_is_the_spy_cn/who_is_the_spy_cn/agents/human_player.py
class HumanPlayer(
SceneHumanAgent,
role_definition=ROLE_DEFINITION,
cls_description="参与谁是卧底游戏的人类玩家的代理",
action_exec_timeout=120
):
config_cls = HumanPlayerConfig
config: config_cls
def __init__(self, config: config_cls):
super().__init__(config=config)
async def receive_key(self, key_assignment: ModeratorKeyAssignment) -> None:
pass
async def describe_key(self, history: List[MessageTypes], receivers: List[Profile]) -> PlayerDescription:
description = (await self.wait_human_text_input()) or ""
return PlayerDescription(
sender=self.profile,
receivers=receivers,
content=Text(text=description, display_text=description)
)
async def predict_role(self, history: List[MessageTypes], moderator: Profile) -> PlayerPrediction:
prediction = (await self.wait_human_text_input()) or ""
return PlayerPrediction(
sender=self.profile,
receivers=[moderator, self.profile],
content=Text(text=prediction, display_text=prediction)
)
async def vote(self, history: List[MessageTypes], moderator: Profile) -> PlayerVote:
vote = (await self.wait_human_text_input()) or ""
return PlayerVote(
sender=self.profile,
receivers=[moderator, self.profile],
content=Text(text=vote, display_text=vote)
)
async def reset_inner_status(self):
pass
# Path: who_is_the_spy_cn/who_is_the_spy_cn/scene.py
import asyncio
import random
from typing import List, Optional, Type, Union
from pydantic import Field
from leaf_playground.core.workers import Logger
from leaf_playground.core.scene import Scene
from leaf_playground.core.scene_definition import SceneConfig
from leaf_playground.data.log_body import ActionLogBody
from leaf_playground.data.media import Text
from .agents.moderator import Moderator
from .agents.player import BaseAIPlayer
from .agents.human_player import HumanPlayer
from .scene_definition import *
Player = Union[BaseAIPlayer, HumanPlayer]
class WhoIsTheSpyLogBody(ActionLogBody):
references: Optional[List[MessageTypes]] = Field(default=None)
response: MessageTypes = Field(default=...)
game_id: int = Field(default=...)
round_id: int = Field(default=...)
WhoIsTheSpySceneConfig = SceneConfig.create_config_model(
SCENE_DEFINITION,
additional_config_fields={
"debug_mode": (bool, Field(default=False, exclude=True))
}
)
class WhoIsTheSpyScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=WhoIsTheSpyLogBody):
config_cls = WhoIsTheSpySceneConfig
config: config_cls
log_body_class: Type[WhoIsTheSpyLogBody]
def __init__(self, config: config_cls, logger: Logger):
super().__init__(config=config, logger=logger)
self.moderator: Moderator = self.static_agents["moderator"][0]
self.players: List[Player] = self.agents["player"]
async def _run(self):
def put_message(message: MessageTypes, log_msg: str, action_belonged_chain: Optional[str] = None):
references = None
if not message.sender_id == self.moderator.id:
references = self.message_pool.get_messages(message.sender)
self.message_pool.put_message(message)
log = self.log_body_class(
| references=references, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: djkcyl/ABot-NT
# Path: services/aiohttp.py
class AiohttpClientService(Service):
id = "http.client/aiohttp"
session: ClientSession
def __init__(self, session: ClientSession | None = None) -> None:
self.session = cast(ClientSession, session)
super().__init__()
@property
def stages(self) -> set[str]:
return {"preparing", "cleanup"}
@property
def required(self) -> set:
return set()
async def launch(self, _: Launart) -> None:
async with self.stage("preparing"):
if self.session is None:
self.session = ClientSession(timeout=ClientTimeout(total=None))
async with self.stage("cleanup"):
await self.session.close()
# Path: services/s3file.py
class S3File(Minio):
async def get_object(self, object_name: str) -> ClientResponse:
launart = Launart.current()
session = launart.get_component(AiohttpClientService).session
return await super().get_object("abot7f8befa44d10", object_name, session)
async def put_object(
self,
object_name: str,
data: bytes | BytesIO,
content_type: str = "application/octet-stream",
o_type: str | None = None,
) -> ObjectWriteResult:
if isinstance(data, bytes):
readble = BytesIO(data)
legnth = len(data)
elif isinstance(data, BytesIO):
readble = data
legnth = data.getbuffer().nbytes
else:
msg = "data must be bytes or BytesIO"
raise TypeError(msg)
readble.seek(0)
tags = Tags()
if o_type:
tags["O_Type"] = o_type
return await super().put_object("abot7f8befa44d10", object_name, readble, legnth, content_type, tags=tags)
async def object_exists(self, object_name: str) -> bool:
try:
await self.get_object(object_name)
except S3Error as e:
if e.code == "NoSuchKey":
return False
raise
return True
async def list_objects(self, prefix: str) -> list | None:
return await super().list_objects("abot7f8befa44d10", prefix)
async def get_presigned_url(self, object_name: str, expires_seconds: int = 300) -> str:
return await super().get_presigned_url(
"GET", "abot7f8befa44d10", object_name, timedelta(seconds=expires_seconds)
)
async def remove_object(self, object_name: str) -> None:
return await super().remove_object("abot7f8befa44d10", object_name)
async def set_object_tags(self, object_name: str, tag: Tags | dict[str, str]) -> None:
if isinstance(tag, dict):
tags = Tags()
tags.update(tag)
return await super().set_object_tags("abot7f8befa44d10", object_name, tags)
# Path: services/s3file.py
class S3FileService(Service):
id: str = "abot/s3file"
def __init__(
self,
endpoint: str = "127.0.0.1:8333",
access_key: str | None = None,
secret_key: str | None = None,
*,
secure: bool = False,
) -> None:
super().__init__()
self.s3file = S3File(endpoint, access_key, secret_key, secure=secure)
# def get_interface(self, _) -> Minio:
# return self.s3file
@property
def required(self) -> set:
return set()
@property
def stages(self) -> set[str]:
return {"preparing"}
async def launch(self, _: Launart) -> None:
async with self.stage("preparing"):
if await self.s3file.bucket_exists("abot7f8befa44d10"):
logger.info("S3 Bucket 已存在")
else:
logger.info("正在创建 S3 Bucket")
await self.s3file.make_bucket("abot7f8befa44d10")
logger.success("S3 Bucket 创建成功")
test_text = secrets.token_hex(16).encode()
if await self.s3file.object_exists(".keep"):
await self.s3file.remove_object(".keep")
put_test = await self.s3file.put_object(".keep", test_text)
if put_test:
logger.info("S3 Bucket 可写")
else:
logger.error("S3 Bucket 不可写")
msg = "S3 Bucket 不可写"
raise S3FileError(msg)
read_test: ClientResponse = await self.s3file.get_object(".keep")
if await read_test.read() == test_text:
logger.info("S3 Bucket 可读")
else:
logger.error("S3 Bucket 不可读")
msg = "S3 Bucket 不可读"
raise S3FileError(msg)
logger.success("S3 Bucket 测试完成")
# Path: utils/builder.py
class AGroupBuilder(GroupData):
@classmethod
async def init(cls, group: GroupData | str | int) -> GroupData:
if isinstance(group, str | int):
group_id = str(group)
elif not isinstance(group, GroupData):
msg = f"无法识别的群组类型: {type(group)}"
raise TypeError(msg)
else:
group_id = group.group_id
if isinstance(group, GroupData):
return group
group_ = await GroupData.find_one(Eq(GroupData.group_id, group_id))
if group is None:
await cls.insert(GroupData(group_id=group_id))
group_ = await GroupData.find_one(Eq(GroupData.group_id, group_id))
logger.info(f"[Core.db] 已初始化群: {group_id}")
return cast(GroupData, group_)
return cast(GroupData, group_)
async def ban(self, reason: str, source: str) -> bool:
if self.banned:
return False
self.banned = True
await GroupData.save() # type: ignore
await BanLog.insert(
BanLog(
target_id=self.group_id,
target_type="group",
action="ban",
ban_reason=reason,
ban_source=source,
)
)
return True
async def unban(self, reason: str, source: str) -> bool:
if not self.banned:
return False
self.banned = False
await GroupData.save() # type: ignore
await BanLog.insert(
BanLog(
target_id=self.group_id,
target_type="group",
action="unban",
ban_reason=reason,
ban_source=source,
)
)
return True
async def disable_function(self, function: str, meta: "FuncItem") -> bool:
if function in self.disable_functions or not meta.can_be_disabled:
return False
self.disable_functions.append(function)
await GroupData.save() # type: ignore
return True
async def enable_function(self, function: str, meta: "FuncItem") -> bool:
if function not in self.disable_functions or meta.maintain:
return False
self.disable_functions.remove(function)
await GroupData.save() # type: ignore
return True
# Path: utils/builder.py
class AUserBuilder(AUser):
@classmethod
async def get_user(cls, user: AUser | str | int, create_type: Literal["cid", "aid"] = "cid") -> AUser | None:
if isinstance(user, str | int):
if isinstance(user, str) and not user.isdigit():
msg = f"无法识别的用户类型: {type(user)}"
raise ValueError(msg)
user_id = str(user)
elif not isinstance(user, AUser):
msg = f"无法识别的用户类型: {type(user)}"
raise TypeError(msg)
else:
user_id = user.cid
if isinstance(user, AUser):
return user
if create_type == "cid":
user_: AUser | None = await AUser.find_one(Eq(AUser.cid, user_id))
elif create_type == "aid":
user_: AUser | None = await AUser.find_one(Eq(AUser.aid, int(user_id)))
else:
msg = f"无法识别的用户类型: {create_type}"
raise TypeError(msg)
return user_
@classmethod
async def init(cls, user: AUser | str | int, create_type: Literal["cid", "aid"] = "cid") -> AUser:
if isinstance(user, str | int):
if isinstance(user, str) and not user.isdigit():
msg = f"无法识别的用户类型: {user}"
raise ValueError(msg)
user_id = str(user)
elif not isinstance(user, AUser):
msg = f"无法识别的用户类型: {type(user)}"
raise TypeError(msg)
else:
user_id = user.cid
if isinstance(user, AUser):
return user
if create_type == "cid":
user_ = await AUser.find_one(Eq(AUser.cid, user_id))
elif create_type == "aid":
user_ = await AUser.find_one(Eq(AUser.aid, int(user_id)))
else:
msg = f"无法识别的用户类型: {create_type}"
raise TypeError(msg)
if user_ is None:
last_userid = await AUser.find_one(sort=[("_id", SortDirection.DESCENDING)])
aid = int(last_userid.aid) + 1 if last_userid else 1
await AUser.insert(AUser(aid=aid, cid=user_id))
user_ = await AUser.find_one(Eq(AUser.aid, aid))
logger.info(f"[Core.db] 已初始化用户: {user_id}")
return cast(AUser, user_)
return cast(AUser, user_)
# Path: utils/db/group.py
class GroupData(Document):
group_id: str
disable_functions: list[str] = []
banned: bool = False
class Settings:
name = "core_group"
indexes = [IndexModel("group_id", unique=True)]
# Path: utils/db/user.py
class AUser(Document):
aid: int
cid: str
coin: int = 10
nickname: str | None = None
is_sign: bool = False
is_chat: bool = False
today_transferred: int = 0
total_sign: int = 0
totle_talk: int = 0
continue_sign: int = 0
exp: int = 0
banned: bool = False
join_time: datetime = Field(default_factory=datetime.now, tzinfo=CHINA_TZ)
class Settings:
name = "core_user"
indexes = [IndexModel("aid", unique=True), IndexModel("cid", unique=True)]
@property
def level(self) -> int:
"""计算用户等级"""
level_map = [0, 200, 1500, 4500, 10800, 28800, 74100, 190300, 488300]
for i, v in enumerate(level_map):
if self.exp < v:
return i
return len(level_map)
@property
def next_level_exp(self) -> int:
"""计算下一等级需要达到的经验"""
level_map = [0, 200, 1500, 4500, 10800, 28800, 74100, 190300, 488300]
for v in level_map:
if self.exp < v:
return v
return 0
@property
def next_level_need(self) -> int:
"""计算距离升级下一等级还需要多少经验"""
return self.next_level_exp - self.exp
@property
def exp_to_next_level(self) -> int:
"""计算当前等级升级到下一等级需要多少经验"""
level_map = [0, 200, 1500, 4500, 10800, 28800, 74100, 190300, 488300]
current_level = self.level
if current_level >= len(level_map):
return 0 # 当前等级已是最高等级,没有下一级
return level_map[current_level] - level_map[current_level - 1]
@property
def progress_bar(self) -> str:
"""计算用户经验进度条"""
total_length = 30
# 计算当前等级起始的经验值
current_level_start_exp = self.next_level_exp - self.exp_to_next_level
# 从当前等级升级到下一等级所需的总经验
exp_for_next_level = self.exp_to_next_level
if exp_for_next_level == 0:
# 当前等级已是最高等级,进度条为满
return "[" + "#" * total_length + "] 100.0%"
# 计算当前经验在当前等级的进度比例
progress_ratio = (self.exp - current_level_start_exp) / exp_for_next_level
progress_ratio = min(progress_ratio, 0.999) # 确保进度条不会因为计算误差而显示为 100%
progress_length = int(progress_ratio * total_length)
progress_bar = "#" * progress_length
remaining_bar = "-" * (total_length - progress_length)
return f"[{progress_bar}{remaining_bar}] {progress_ratio * 100:.1f}%"
async def sign(self, group_id: str | int) -> bool:
if self.is_sign:
return False
self.is_sign = True
self.total_sign += 1
self.continue_sign += 1
await self.save() # type: ignore
await SignLog.insert(SignLog(qid=self.cid, group_id=str(group_id)))
return True
async def ban(self, reason: str, source: str) -> bool:
if self.banned:
return False
self.banned = True
await self.save() # type: ignore
await BanLog.insert(
BanLog(
target_id=self.cid,
target_type="user",
action="ban",
ban_reason=reason,
ban_source=source,
)
)
return True
async def unban(self, reason: str, source: str) -> bool:
if not self.banned:
return False
self.banned = False
await self.save() # type: ignore
await BanLog.insert(
BanLog(
target_id=self.cid,
target_type="user",
action="unban",
ban_reason=reason,
ban_source=source,
)
)
return True
async def add_coin(
self,
num: int,
group_id: str | int | None = None,
source: str = "未知",
detail: str = "",
) -> None:
self.coin += num
await self.save() # type: ignore
await CoinLog.insert(
CoinLog(
qid=self.cid,
group_id=str(group_id),
coin=num,
source=source,
detail=detail,
)
)
async def reduce_coin(
self,
num: int,
group_id: str | int | None = None,
source: str = "未知",
detail: str = "",
*,
force: bool = False,
) -> int | bool:
if self.coin < num:
if not force:
return False
now_coin = self.coin
self.coin = 0
await self.save() # type: ignore
await CoinLog.insert(
CoinLog(
qid=self.cid,
group_id=str(group_id),
coin=-now_coin,
source=source,
detail=detail,
)
)
return now_coin
self.coin -= num
await self.save() # type: ignore
await CoinLog.insert(
CoinLog(
qid=self.cid,
group_id=str(group_id),
coin=-num,
source=source,
detail=detail,
)
)
return True
async def add_talk(self) -> None:
self.totle_talk += 1
self.is_chat = True
await self.save() # type: ignore
async def set_nickname(self, nickname: str | None) -> None:
self.nickname = nickname
await self.save() # type: ignore
# Path: utils/saya/dispachers.py
from aiohttp import ClientSession
from avilla.core import MessageReceived
from graia.amnesia.builtins.memcache import Memcache, MemcacheService
from graia.broadcast.entities.dispatcher import BaseDispatcher
from graia.broadcast.interfaces.dispatcher import DispatcherInterface
from launart import Launart
from loguru import logger
from services import AiohttpClientService, S3File, S3FileService
from utils.builder import AGroupBuilder, AUserBuilder
from utils.db import AUser, GroupData
class ABotDispatcher(BaseDispatcher):
@staticmethod
async def catch(
interface: DispatcherInterface[MessageReceived],
) -> AUser | GroupData | S3File | ClientSession | Memcache | None:
ctx = interface.event.context
if interface.annotation == AUser:
cid = ctx.client.last_value
if not await AUser.find_one(AUser.cid == cid):
last_userid = await AUser.find_one(sort=[("_id", -1)])
user_id = int(last_userid.aid) + 1 if last_userid else 1
await AUser(aid=user_id, cid=cid).insert() # type: ignore[call-arg]
logger.info(f"[Core.db] 已初始化用户: {cid}")
user = await AUser.find_one(AUser.cid == cid)
if not user:
msg = f"未找到用户: {cid}"
raise ValueError(msg)
return await AUserBuilder.init(user)
if interface.annotation == GroupData:
if ctx.scene.path_without_land in {"guild.channel", "guild.user"}:
group_id = ctx.scene["guild"]
else:
| group_id = ctx.scene["group"] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Varexa/Gateway
# Path: chat_exporter/ext/discord_import.py
# Path: chat_exporter/construct/assets/embed.py
class Embed:
r: str
g: str
b: str
title: str
description: str
author: str
image: str
thumbnail: str
footer: str
fields: str
check_against = None
def __init__(self, embed, guild):
self.embed: discord.Embed = embed
self.guild: discord.Guild = guild
async def flow(self):
self.check_against = _gather_checker()
self.build_colour()
await self.build_title()
await self.build_description()
await self.build_fields()
await self.build_author()
await self.build_image()
await self.build_thumbnail()
await self.build_footer()
await self.build_embed()
return self.embed
def build_colour(self):
self.r, self.g, self.b = (
(self.embed.colour.r, self.embed.colour.g, self.embed.colour.b)
if self.embed.colour != self.check_against else (0x20, 0x22, 0x25) # default colour
)
async def build_title(self):
self.title = html.escape(self.embed.title) if self.embed.title != self.check_against else ""
if self.title:
self.title = await fill_out(self.guild, embed_title, [
("EMBED_TITLE", self.title, PARSE_MODE_MARKDOWN)
])
async def build_description(self):
self.description = html.escape(self.embed.description) if self.embed.description != self.check_against else ""
if self.description:
self.description = await fill_out(self.guild, embed_description, [
("EMBED_DESC", self.embed.description, PARSE_MODE_EMBED)
])
async def build_fields(self):
self.fields = ""
# This does not have to be here, but Pycord.
if not self.embed.fields:
return
for field in self.embed.fields:
field.name = html.escape(field.name)
field.value = html.escape(field.value)
if field.inline:
self.fields += await fill_out(self.guild, embed_field_inline, [
("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED),
("FIELD_VALUE", field.value, PARSE_MODE_EMBED)
])
else:
self.fields += await fill_out(self.guild, embed_field, [
("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED),
("FIELD_VALUE", field.value, PARSE_MODE_EMBED)])
async def build_author(self):
self.author = html.escape(self.embed.author.name) if self.embed.author.name != self.check_against else ""
self.author = f'<a class="chatlog__embed-author-name-link" href="{self.embed.author.url}">{self.author}</a>' \
if self.embed.author.url != self.check_against \
else self.author
author_icon = await fill_out(self.guild, embed_author_icon, [
("AUTHOR", self.author, PARSE_MODE_NONE),
("AUTHOR_ICON", self.embed.author.icon_url, PARSE_MODE_NONE)
]) if self.embed.author.icon_url != self.check_against else ""
if author_icon == "" and self.author != "":
self.author = await fill_out(self.guild, embed_author, [("AUTHOR", self.author, PARSE_MODE_NONE)])
else:
self.author = author_icon
async def build_image(self):
self.image = await fill_out(self.guild, embed_image, [
("EMBED_IMAGE", str(self.embed.image.proxy_url), PARSE_MODE_NONE)
]) if self.embed.image.url != self.check_against else ""
async def build_thumbnail(self):
self.thumbnail = await fill_out(self.guild, embed_thumbnail, [
("EMBED_THUMBNAIL", str(self.embed.thumbnail.url), PARSE_MODE_NONE)]) \
if self.embed.thumbnail.url != self.check_against else ""
async def build_footer(self):
self.footer = html.escape(self.embed.footer.text) if self.embed.footer.text != self.check_against else ""
footer_icon = self.embed.footer.icon_url if self.embed.footer.icon_url != self.check_against else None
if not self.footer:
return
if footer_icon is not None:
self.footer = await fill_out(self.guild, embed_footer_icon, [
("EMBED_FOOTER", self.footer, PARSE_MODE_NONE),
("EMBED_FOOTER_ICON", footer_icon, PARSE_MODE_NONE)
])
else:
self.footer = await fill_out(self.guild, embed_footer, [
("EMBED_FOOTER", self.footer, PARSE_MODE_NONE)])
async def build_embed(self):
self.embed = await fill_out(self.guild, embed_body, [
("EMBED_R", str(self.r)),
("EMBED_G", str(self.g)),
("EMBED_B", str(self.b)),
("EMBED_AUTHOR", self.author, PARSE_MODE_NONE),
("EMBED_TITLE", self.title, PARSE_MODE_NONE),
("EMBED_IMAGE", self.image, PARSE_MODE_NONE),
("EMBED_THUMBNAIL", self.thumbnail, PARSE_MODE_NONE),
("EMBED_DESC", self.description, PARSE_MODE_NONE),
("EMBED_FIELDS", self.fields, PARSE_MODE_NONE),
("EMBED_FOOTER", self.footer, PARSE_MODE_NONE),
])
# Path: chat_exporter/construct/assets/reaction.py
class Reaction:
def __init__(self, reaction, guild):
self.reaction = reaction
self.guild = guild
async def flow(self):
await self.build_reaction()
return self.reaction
async def build_reaction(self):
if ":" in str(self.reaction.emoji):
emoji_animated = re.compile(r"<a:.*:.*>")
if emoji_animated.search(str(self.reaction.emoji)):
await self.create_discord_reaction("gif")
else:
await self.create_discord_reaction("png")
else:
await self.create_standard_emoji()
async def create_discord_reaction(self, emoji_type):
pattern = r":.*:(\d*)"
emoji_id = re.search(pattern, str(self.reaction.emoji)).group(1)
self.reaction = await fill_out(self.guild, custom_emoji, [
("EMOJI", str(emoji_id), PARSE_MODE_NONE),
("EMOJI_COUNT", str(self.reaction.count), PARSE_MODE_NONE),
("EMOJI_FILE", emoji_type, PARSE_MODE_NONE)
])
async def create_standard_emoji(self):
react_emoji = await convert_emoji(self.reaction.emoji)
self.reaction = await fill_out(self.guild, emoji, [
("EMOJI", str(react_emoji), PARSE_MODE_NONE),
("EMOJI_COUNT", str(self.reaction.count), PARSE_MODE_NONE)
])
# Path: chat_exporter/construct/assets/attachment.py
class Attachment:
def __init__(self, attachments, guild):
self.attachments = attachments
self.guild = guild
async def flow(self):
await self.build_attachment()
return self.attachments
async def build_attachment(self):
if self.attachments.content_type is not None:
if "image" in self.attachments.content_type:
return await self.image()
elif "video" in self.attachments.content_type:
return await self.video()
elif "audio" in self.attachments.content_type:
return await self.audio()
await self.file()
async def image(self):
self.attachments = await fill_out(self.guild, img_attachment, [
("ATTACH_URL", self.attachments.proxy_url, PARSE_MODE_NONE),
("ATTACH_URL_THUMB", self.attachments.proxy_url, PARSE_MODE_NONE)
])
async def video(self):
self.attachments = await fill_out(self.guild, video_attachment, [
("ATTACH_URL", self.attachments.proxy_url, PARSE_MODE_NONE)
])
async def audio(self):
file_icon = DiscordUtils.file_attachment_audio
file_size = self.get_file_size(self.attachments.size)
self.attachments = await fill_out(self.guild, audio_attachment, [
("ATTACH_ICON", file_icon, PARSE_MODE_NONE),
("ATTACH_URL", self.attachments.url, PARSE_MODE_NONE),
("ATTACH_BYTES", str(file_size), PARSE_MODE_NONE),
("ATTACH_AUDIO", self.attachments.proxy_url, PARSE_MODE_NONE),
("ATTACH_FILE", str(self.attachments.filename), PARSE_MODE_NONE)
])
async def file(self):
file_icon = await self.get_file_icon()
file_size = self.get_file_size(self.attachments.size)
self.attachments = await fill_out(self.guild, msg_attachment, [
("ATTACH_ICON", file_icon, PARSE_MODE_NONE),
("ATTACH_URL", self.attachments.url, PARSE_MODE_NONE),
("ATTACH_BYTES", str(file_size), PARSE_MODE_NONE),
("ATTACH_FILE", str(self.attachments.filename), PARSE_MODE_NONE)
])
@staticmethod
def get_file_size(file_size):
if file_size == 0:
return "0 bytes"
size_name = ("bytes", "KB", "MB")
i = int(math.floor(math.log(file_size, 1024)))
p = math.pow(1024, i)
s = round(file_size / p, 2)
return "%s %s" % (s, size_name[i])
async def get_file_icon(self) -> str:
acrobat_types = "pdf"
webcode_types = "html", "htm", "css", "rss", "xhtml", "xml"
code_types = "py", "cgi", "pl", "gadget", "jar", "msi", "wsf", "bat", "php", "js"
document_types = (
"txt", "doc", "docx", "rtf", "xls", "xlsx", "ppt", "pptx", "odt", "odp", "ods", "odg", "odf", "swx",
"sxi", "sxc", "sxd", "stw"
)
archive_types = (
"br", "rpm", "dcm", "epub", "zip", "tar", "rar", "gz", "bz2", "7x", "deb", "ar", "Z", "lzo", "lz", "lz4",
"arj", "pkg", "z"
)
extension = self.attachments.url.rsplit('.', 1)[1]
if extension in acrobat_types:
return DiscordUtils.file_attachment_acrobat
elif extension in webcode_types:
return DiscordUtils.file_attachment_webcode
elif extension in code_types:
return DiscordUtils.file_attachment_code
elif extension in document_types:
return DiscordUtils.file_attachment_document
elif extension in archive_types:
return DiscordUtils.file_attachment_archive
else:
return DiscordUtils.file_attachment_unknown
# Path: chat_exporter/construct/assets/component.py
class Component:
styles = {
"primary": "#5865F2",
"secondary": "#4F545C",
"success": "#2D7D46",
"danger": "#D83C3E",
"blurple": "#5865F2",
"grey": "#4F545C",
"gray": "#4F545C",
"green": "#2D7D46",
"red": "#D83C3E",
"link": "#4F545C",
}
components: str = ""
menus: str = ""
buttons: str = ""
menu_div_id: int = 0
def __init__(self, component, guild):
self.component = component
self.guild = guild
async def build_component(self, c):
if isinstance(c, discord.Button):
await self.build_button(c)
elif isinstance(c, discord.SelectMenu):
await self.build_menu(c)
Component.menu_div_id += 1
async def build_button(self, c):
url = c.url if c.url else ""
label = c.label if c.label else ""
style = self.styles[str(c.style).split(".")[1]]
icon = DiscordUtils.button_external_link if url else ""
emoji = str(c.emoji) if c.emoji else ""
self.buttons += await fill_out(self.guild, component_button, [
("DISABLED", "chatlog__component-disabled" if c.disabled else "", PARSE_MODE_NONE),
("URL", str(url), PARSE_MODE_NONE),
("LABEL", str(label), PARSE_MODE_MARKDOWN),
("EMOJI", str(emoji), PARSE_MODE_EMOJI),
("ICON", str(icon), PARSE_MODE_NONE),
("STYLE", style, PARSE_MODE_NONE)
])
async def build_menu(self, c):
placeholder = c.placeholder if c.placeholder else ""
options = c.options
content = ""
if not c.disabled:
content = await self.build_menu_options(options)
self.menus += await fill_out(self.guild, component_menu, [
("DISABLED", "chatlog__component-disabled" if c.disabled else "", PARSE_MODE_NONE),
("ID", str(self.menu_div_id), PARSE_MODE_NONE),
("PLACEHOLDER", str(placeholder), PARSE_MODE_MARKDOWN),
("CONTENT", str(content), PARSE_MODE_NONE),
("ICON", DiscordUtils.interaction_dropdown_icon, PARSE_MODE_NONE),
])
async def build_menu_options(self, options):
content = []
for option in options:
if option.emoji:
content.append(await fill_out(self.guild, component_menu_options_emoji, [
("EMOJI", str(option.emoji), PARSE_MODE_EMOJI),
("TITLE", str(option.label), PARSE_MODE_MARKDOWN),
("DESCRIPTION", str(option.description) if option.description else "", PARSE_MODE_MARKDOWN)
]))
else:
content.append(await fill_out(self.guild, component_menu_options, [
("TITLE", str(option.label), PARSE_MODE_MARKDOWN),
("DESCRIPTION", str(option.description) if option.description else "", PARSE_MODE_MARKDOWN)
]))
if content:
content = f'<div id="dropdownMenu{self.menu_div_id}" class="dropdownContent">{"".join(content)}</div>'
return content
async def flow(self):
for c in self.component.children:
await self.build_component(c)
if self.menus:
self.components += f'<div class="chatlog__components">{self.menus}</div>'
if self.buttons:
self.components += f'<div class="chatlog__components">{self.buttons}</div>'
return self.components
# Path: chat_exporter/ext/discord_utils.py
class DiscordUtils:
logo: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-logo.svg'
default_avatar: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-default.png'
pinned_message_icon: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-pinned.svg'
thread_channel_icon: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-thread.svg'
file_attachment_audio: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-audio.svg'
file_attachment_acrobat: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-acrobat.svg'
file_attachment_webcode: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-webcode.svg'
file_attachment_code: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-code.svg'
file_attachment_document: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-document.svg'
file_attachment_archive: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-archive.svg'
file_attachment_unknown: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-unknown.svg'
button_external_link: str = '<img class="chatlog__reference-icon" src="https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-external-link.svg">'
reference_attachment_icon: str = '<img class="chatlog__reference-icon" src="https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-attachment.svg">'
interaction_command_icon: str = '<img class="chatlog__interaction-icon" src="https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-command.svg">'
interaction_dropdown_icon: str = '<img class="chatlog__dropdown-icon" src="https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-dropdown.svg">'
# Path: chat_exporter/ext/html_generator.py
PARSE_MODE_NONE = 0
PARSE_MODE_NO_MARKDOWN = 1
PARSE_MODE_MARKDOWN = 2
PARSE_MODE_EMBED = 3
PARSE_MODE_SPECIAL_EMBED = 4
PARSE_MODE_REFERENCE = 5
PARSE_MODE_EMOJI = 6
async def fill_out(guild, base, replacements):
def read_file(filename):
# Path: chat_exporter/construct/message.py
import html
from typing import List, Optional, Union
from pytz import timezone
from datetime import timedelta
from chat_exporter.ext.discord_import import discord
from chat_exporter.construct.assets import Attachment, Component, Embed, Reaction
from chat_exporter.ext.discord_utils import DiscordUtils
from chat_exporter.ext.html_generator import (
fill_out,
bot_tag,
bot_tag_verified,
message_body,
message_pin,
message_thread,
message_content,
message_reference,
message_reference_unknown,
message_interaction,
img_attachment,
start_message,
end_message,
PARSE_MODE_NONE,
PARSE_MODE_MARKDOWN,
PARSE_MODE_REFERENCE,
)
else DiscordUtils.default_avatar
)
user_joined_at = self.message.author.joined_at if hasattr(self.message.author, "joined_at") else None
user_display_name = (
f'<div class="meta__display-name">{self.message.author.display_name}</div>'
if self.message.author.display_name != self.message.author.name
else ""
)
self.meta_data[user_id] = [
user_name_discriminator, user_created_at, user_bot, user_avatar, 1, user_joined_at, user_display_name
]
async def build_content(self):
if not self.message.content:
self.message.content = ""
return
if self.message_edited_at:
self.message_edited_at = _set_edit_at(self.message_edited_at)
self.message.content = html.escape(self.message.content)
self.message.content = await fill_out(self.guild, message_content, [
("MESSAGE_CONTENT", self.message.content, PARSE_MODE_MARKDOWN),
("EDIT", self.message_edited_at, PARSE_MODE_NONE)
])
async def build_reference(self):
if not self.message.reference:
self.message.reference = ""
return
try:
message: discord.Message = await self.message.channel.fetch_message(self.message.reference.message_id)
except (discord.NotFound, discord.HTTPException) as e:
self.message.reference = ""
if isinstance(e, discord.NotFound):
self.message.reference = message_reference_unknown
return
is_bot = _gather_user_bot(message.author)
user_colour = await self._gather_user_colour(message.author)
if not message.content and not message.interaction:
message.content = "Click to see attachment"
elif not message.content and message.interaction:
message.content = "Click to see command"
icon = ""
if not message.interaction and (message.embeds or message.attachments):
icon = DiscordUtils.reference_attachment_icon
elif message.interaction:
icon = DiscordUtils.interaction_command_icon
_, message_edited_at = self.set_time(message)
if message_edited_at:
message_edited_at = _set_edit_at(message_edited_at)
avatar_url = message.author.display_avatar if message.author.display_avatar else DiscordUtils.default_avatar
self.message.reference = await fill_out(self.guild, message_reference, [
("AVATAR_URL", str(avatar_url), PARSE_MODE_NONE),
("BOT_TAG", is_bot, PARSE_MODE_NONE),
("NAME_TAG", "%s#%s" % (message.author.name, message.author.discriminator), PARSE_MODE_NONE),
("NAME", str(html.escape(message.author.display_name))),
("USER_COLOUR", user_colour, PARSE_MODE_NONE),
("CONTENT", message.content, PARSE_MODE_REFERENCE),
("EDIT", message_edited_at, PARSE_MODE_NONE),
("ICON", icon, PARSE_MODE_NONE),
("USER_ID", str(message.author.id), PARSE_MODE_NONE),
("MESSAGE_ID", str(self.message.reference.message_id), PARSE_MODE_NONE),
])
async def build_interaction(self):
if not self.message.interaction:
self.message.interaction = ""
return
user: Union[discord.Member, discord.User] = self.message.interaction.user
is_bot = _gather_user_bot(user)
user_colour = await self._gather_user_colour(user)
avatar_url = user.display_avatar if user.display_avatar else DiscordUtils.default_avatar
self.message.interaction = await fill_out(self.guild, message_interaction, [
("AVATAR_URL", str(avatar_url), PARSE_MODE_NONE),
("BOT_TAG", is_bot, PARSE_MODE_NONE),
("NAME_TAG", "%s#%s" % (user.name, user.discriminator), PARSE_MODE_NONE),
("NAME", str(html.escape(user.display_name))),
("USER_COLOUR", user_colour, PARSE_MODE_NONE),
("FILLER", "used ", PARSE_MODE_NONE),
("COMMAND", "/" + self.message.interaction.name, PARSE_MODE_NONE),
("USER_ID", str(user.id), PARSE_MODE_NONE),
("INTERACTION_ID", str(self.message.interaction.id), PARSE_MODE_NONE),
])
async def build_sticker(self):
if not self.message.stickers or not hasattr(self.message.stickers[0], "url"):
return
sticker_image_url = self.message.stickers[0].url
if sticker_image_url.endswith(".json"):
sticker = await self.message.stickers[0].fetch()
sticker_image_url = (
f"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/stickers/{sticker.pack_id}/{sticker.id}.gif"
)
self.message.content = await fill_out(self.guild, img_attachment, [
("ATTACH_URL", str(sticker_image_url), PARSE_MODE_NONE),
("ATTACH_URL_THUMB", str(sticker_image_url), PARSE_MODE_NONE)
])
async def build_assets(self):
for e in self.message.embeds:
self.embeds += await Embed(e, self.guild).flow()
for a in self.message.attachments:
self.attachments += await Attachment(a, self.guild).flow()
for c in self.message.components:
self.components += await Component(c, self.guild).flow()
| for r in self.message.reactions:
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: mariaalfaroc/a2s-transformer
# Path: networks/crnn/model.py
class CTCTrainedCRNN(LightningModule):
def __init__(
self, w2i, i2w, ytest_i2w=None, max_audio_len=100, frame_multiplier_factor=8
):
super(CTCTrainedCRNN, self).__init__()
# Save hyperparameters
self.save_hyperparameters()
# Dictionaries
self.w2i = w2i
self.i2w = i2w
self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w
# Model
self.model = CRNN(
output_size=len(self.w2i) + 1,
frame_multiplier_factor=frame_multiplier_factor,
)
self.width_reduction = self.model.cnn.width_reduction
self.summary(max_audio_len)
# Loss: the target index cannot be blank!
self.compute_ctc_loss = CTCLoss(blank=len(self.w2i), zero_infinity=False)
# Predictions
self.Y = []
self.YHat = []
def summary(self, max_audio_len):
summary(self.model, input_size=[1, NUM_CHANNELS, IMG_HEIGHT, max_audio_len])
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=1e-3, weight_decay=1e-6)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, xl, y, yl = batch
yhat = self.model(x)
# ------ CTC Requirements ------
# yhat: [batch, frames, vocab_size]
yhat = yhat.log_softmax(dim=2)
yhat = yhat.permute(1, 0, 2).contiguous()
# ------------------------------
loss = self.compute_ctc_loss(yhat, y, xl, yl)
self.log("train_loss", loss, prog_bar=True, logger=True, on_epoch=True)
return loss
def ctc_greedy_decoder(self, y_pred, i2w):
# y_pred = [seq_len, num_classes]
# Best path
y_pred_decoded = torch.argmax(y_pred, dim=1)
# Merge repeated elements
y_pred_decoded = torch.unique_consecutive(y_pred_decoded, dim=0).tolist()
# Convert to string; len(i2w) -> CTC-blank
y_pred_decoded = [i2w[i] for i in y_pred_decoded if i != len(i2w)]
return y_pred_decoded
def validation_step(self, batch, batch_idx):
x, y = batch # batch_size = 1
# Model prediction (decoded using the vocabulary on which it was trained)
yhat = self.model(x)[0]
yhat = yhat.log_softmax(dim=-1).detach().cpu()
yhat = self.ctc_greedy_decoder(yhat, self.i2w)
# Decoded ground truth
y = [self.ytest_i2w[i.item()] for i in y[0]]
# Append to later compute metrics
self.Y.append(y)
self.YHat.append(yhat)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_validation_epoch_end(self, name="val", print_random_samples=False):
metrics = compute_metrics(y_true=self.Y, y_pred=self.YHat)
for k, v in metrics.items():
self.log(f"{name}_{k}", v, prog_bar=True, logger=True, on_epoch=True)
# Print random samples
if print_random_samples:
index = random.randint(0, len(self.Y) - 1)
print(f"Ground truth - {self.Y[index]}")
print(f"Prediction - {self.YHat[index]}")
# Clear predictions
self.Y.clear()
self.YHat.clear()
return metrics
def on_test_epoch_end(self):
return self.on_validation_epoch_end(name="test", print_random_samples=True)
# Path: networks/transformer/model.py
class A2STransformer(LightningModule):
def __init__(
self,
max_seq_len,
max_audio_len,
w2i,
i2w,
ytest_i2w=None,
attn_window=-1,
teacher_forcing_prob=0.5,
):
super(A2STransformer, self).__init__()
# Save hyperparameters
self.save_hyperparameters()
# Dictionaries
self.w2i = w2i
self.i2w = i2w
self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w
self.padding_idx = w2i["<PAD>"]
# Model
self.max_seq_len = max_seq_len
self.teacher_forcing_prob = teacher_forcing_prob
self.encoder = Encoder(in_channels=NUM_CHANNELS)
self.pos_2d = PositionalEncoding2D(
num_channels=256,
max_height=math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION),
max_width=math.ceil(max_audio_len / WIDTH_REDUCTION),
)
self.decoder = Decoder(
output_size=len(self.w2i),
max_seq_len=max_seq_len,
num_embeddings=len(self.w2i),
padding_idx=self.padding_idx,
attn_window=attn_window,
)
self.summary(max_audio_len)
# Loss
self.compute_loss = CrossEntropyLoss(ignore_index=self.padding_idx)
# Predictions
self.Y = []
self.YHat = []
def summary(self, max_audio_len):
print("Encoder")
summary(self.encoder, input_size=[1, NUM_CHANNELS, IMG_HEIGHT, max_audio_len])
print("Decoder")
tgt_size = [1, self.max_seq_len]
memory_size = [
1,
math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION)
* math.ceil(max_audio_len / WIDTH_REDUCTION),
256,
]
memory_len_size = [1]
summary(
self.decoder,
input_size=[tgt_size, memory_size, memory_len_size],
dtypes=[torch.int64, torch.float32, torch.int64],
)
def configure_optimizers(self):
return torch.optim.Adam(
list(self.encoder.parameters()) + list(self.decoder.parameters()),
lr=1e-4,
amsgrad=False,
)
def forward(self, x, xl, y_in):
# Encoder
x = self.encoder(x=x)
# Prepare for decoder
# 2D PE + flatten + permute
x = self.pos_2d(x)
x = x.flatten(2).permute(0, 2, 1).contiguous()
# Decoder
y_out_hat = self.decoder(tgt=y_in, memory=x, memory_len=xl)
return y_out_hat
def apply_teacher_forcing(self, y):
# y.shape = [batch_size, seq_len]
y_errored = y.clone()
for i in range(y_errored.size(0)):
for j in range(y_errored.size(1)):
if (
random.random() < self.teacher_forcing_prob
and y[i, j] != self.padding_idx
):
y_errored[i, j] = random.randint(0, len(self.w2i) - 1)
return y_errored
def training_step(self, batch, batch_idx):
x, xl, y_in, y_out = batch
y_in = self.apply_teacher_forcing(y_in)
yhat = self.forward(x=x, xl=xl, y_in=y_in)
loss = self.compute_loss(yhat, y_out)
self.log("train_loss", loss, prog_bar=True, logger=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
assert x.size(0) == 1, "Inference only supports batch_size = 1"
# Encoder
x = self.encoder(x=x)
# Prepare for decoder
# 2D PE + flatten + permute
x = self.pos_2d(x)
x = x.flatten(2).permute(0, 2, 1).contiguous()
# Autoregressive decoding
y_in = torch.tensor([self.w2i[SOS_TOKEN]]).unsqueeze(0).long().to(x.device)
yhat = []
for _ in range(self.max_seq_len):
y_out_hat = self.decoder(tgt=y_in, memory=x, memory_len=None)
y_out_hat = y_out_hat[0, :, -1] # Last token
y_out_hat_token = y_out_hat.argmax(dim=-1).item()
y_out_hat_word = self.i2w[y_out_hat_token]
yhat.append(y_out_hat_word)
if y_out_hat_word == EOS_TOKEN:
break
y_in = torch.cat(
[y_in, torch.tensor([[y_out_hat_token]]).long().to(x.device)], dim=1
)
# Decoded ground truth
y = [self.ytest_i2w[i.item()] for i in y[0][1:]] # Remove SOS_TOKEN
# Append to later compute metrics
self.Y.append(y)
self.YHat.append(yhat)
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def on_validation_epoch_end(self, name="val", print_random_samples=False):
metrics = compute_metrics(y_true=self.Y, y_pred=self.YHat)
for k, v in metrics.items():
self.log(f"{name}_{k}", v, prog_bar=True, logger=True, on_epoch=True)
# Print random samples
if print_random_samples:
index = random.randint(0, len(self.Y) - 1)
print(f"Ground truth - {self.Y[index]}")
print(f"Prediction - {self.YHat[index]}")
# Clear predictions
self.Y.clear()
self.YHat.clear()
return metrics
def on_test_epoch_end(self):
return self.on_validation_epoch_end(name="test", print_random_samples=True)
# Path: my_utils/ctc_dataset.py
class CTCDataModule(LightningDataModule):
def __init__(
self,
ds_name: str,
use_voice_change_token: bool = False,
batch_size: int = 16,
num_workers: int = 20,
width_reduction: int = 2,
):
super(CTCDataModule, self).__init__()
self.ds_name = ds_name
self.use_voice_change_token = use_voice_change_token
self.batch_size = batch_size
self.num_workers = num_workers
self.width_reduction = (
width_reduction # Must be overrided with that of the model!
)
def setup(self, stage: str):
if stage == "fit":
self.train_ds = CTCDataset(
ds_name=self.ds_name,
partition_type="train",
width_reduction=self.width_reduction,
use_voice_change_token=self.use_voice_change_token,
)
self.val_ds = CTCDataset(
ds_name=self.ds_name,
partition_type="val",
width_reduction=self.width_reduction,
use_voice_change_token=self.use_voice_change_token,
)
if stage == "test" or stage == "predict":
self.test_ds = CTCDataset(
ds_name=self.ds_name,
partition_type="test",
width_reduction=self.width_reduction,
use_voice_change_token=self.use_voice_change_token,
)
def train_dataloader(self):
return DataLoader(
self.train_ds,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=ctc_batch_preparation,
) # prefetch_factor=2
def val_dataloader(self):
return DataLoader(
self.val_ds,
batch_size=1,
shuffle=False,
num_workers=self.num_workers,
) # prefetch_factor=2
def test_dataloader(self):
return DataLoader(
self.test_ds,
batch_size=1,
shuffle=False,
num_workers=self.num_workers,
) # prefetch_factor=2
def predict_dataloader(self):
print("Using test_dataloader for predictions.")
return self.test_dataloader(self)
def get_w2i_and_i2w(self):
try:
return self.train_ds.w2i, self.train_ds.i2w
except AttributeError:
return self.test_ds.w2i, self.test_ds.i2w
def get_max_seq_len(self):
try:
return self.train_ds.max_seq_len
except AttributeError:
return self.test_ds.max_seq_len
def get_max_audio_len(self):
try:
return self.train_ds.max_audio_len
except AttributeError:
return self.test_ds.max_audio_len
def get_frame_multiplier_factor(self):
try:
return self.train_ds.frame_multiplier_factor
except AttributeError:
return self.test_ds.frame_multiplier_factor
# Path: my_utils/ar_dataset.py
class ARDataModule(LightningDataModule):
def __init__(
self,
ds_name: str,
use_voice_change_token: bool = False,
batch_size: int = 16,
num_workers: int = 20,
):
super(ARDataModule, self).__init__()
self.ds_name = ds_name
self.use_voice_change_token = use_voice_change_token
self.batch_size = batch_size
self.num_workers = num_workers
def setup(self, stage: str):
if stage == "fit":
self.train_ds = ARDataset(
ds_name=self.ds_name,
partition_type="train",
use_voice_change_token=self.use_voice_change_token,
)
self.val_ds = ARDataset(
ds_name=self.ds_name,
partition_type="val",
use_voice_change_token=self.use_voice_change_token,
)
if stage == "test" or stage == "predict":
self.test_ds = ARDataset(
ds_name=self.ds_name,
partition_type="test",
use_voice_change_token=self.use_voice_change_token,
)
def train_dataloader(self):
return DataLoader(
self.train_ds,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
collate_fn=ar_batch_preparation,
) # prefetch_factor=2
def val_dataloader(self):
return DataLoader(
self.val_ds,
batch_size=1,
shuffle=False,
num_workers=self.num_workers,
) # prefetch_factor=2
def test_dataloader(self):
return DataLoader(
self.test_ds,
batch_size=1,
shuffle=False,
num_workers=self.num_workers,
) # prefetch_factor=2
def predict_dataloader(self):
print("Using test_dataloader for predictions.")
return self.test_dataloader(self)
def get_w2i_and_i2w(self):
try:
return self.train_ds.w2i, self.train_ds.i2w
except AttributeError:
return self.test_ds.w2i, self.test_ds.i2w
def get_max_seq_len(self):
try:
return self.train_ds.max_seq_len
except AttributeError:
return self.test_ds.max_seq_len
def get_max_audio_len(self):
try:
return self.train_ds.max_audio_len
except AttributeError:
return self.test_ds.max_audio_len
# Path: my_utils/seed.py
def seed_everything(seed: int, deterministic: bool = True, benchmark: bool = True):
import random, os
import numpy as np
import torch
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = deterministic
torch.backends.cudnn.benchmark = benchmark
# Path: test.py
import gc
import os
import fire
import torch
from lightning.pytorch import Trainer
from lightning.pytorch.loggers.wandb import WandbLogger
from networks.crnn.model import CTCTrainedCRNN
from networks.transformer.model import A2STransformer
from my_utils.ctc_dataset import CTCDataModule
from my_utils.ar_dataset import ARDataModule
from my_utils.seed import seed_everything
seed_everything(42, benchmark=False)
def test(
ds_name,
model_type: str = "crnn",
use_voice_change_token: bool = False,
checkpoint_path: str = "",
):
gc.collect()
torch.cuda.empty_cache()
# Check if checkpoint path is empty or does not exist
if checkpoint_path == "":
print("Checkpoint path not provided")
return
if not os.path.exists(checkpoint_path):
print(f"Checkpoint path {checkpoint_path} does not exist")
return
# Get source dataset name
src_ds_name = os.path.basename(checkpoint_path).split(".")[0]
# Experiment info
print("TEST EXPERIMENT")
print(f"\tSource dataset: {src_ds_name}")
print(f"\tTest dataset: {ds_name}")
print(f"\tModel type: {model_type}")
print(f"\tUse voice change token: {use_voice_change_token}")
print(f"\tCheckpoint path: {checkpoint_path}")
if model_type == "crnn":
# Data module
datamodule = CTCDataModule(
ds_name=ds_name,
use_voice_change_token=use_voice_change_token,
)
datamodule.setup(stage="test")
ytest_i2w = datamodule.test_ds.i2w
# Model
model = CTCTrainedCRNN.load_from_checkpoint(
checkpoint_path, ytest_i2w=ytest_i2w
)
elif model_type == "transformer":
# Data module
datamodule = ARDataModule(
ds_name=ds_name,
use_voice_change_token=use_voice_change_token,
)
datamodule.setup(stage="test")
ytest_i2w = datamodule.test_ds.i2w
# Model
model = A2STransformer.load_from_checkpoint(
checkpoint_path, ytest_i2w=ytest_i2w
)
| else: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: yacinxx/dnakey
# Path: prime_key_config.py
class PrimeKeyConfig:
def agent_prime_key(self, hash_key:str) -> str | int:
MAX_LENGTH = 56
self.hash_key = hash_key
if (self.hash_key) and (len(self.hash_key) == MAX_LENGTH) and (self.hash_key.startswith("dnakey$")):
positions_to_remove = [10, 20, 30, 40, 48]
self.hash_key = self.hash_key.replace('dnakey$', '')
is_prime = ''.join([self.hash_key[i] for i in positions_to_remove])
if is_prime == "PRIME":
valid_hash_key = ''.join([self.hash_key[i] for i in range(len(self.hash_key)) if i not in positions_to_remove])
config_has_key = f"dnakey${valid_hash_key[:32:2]}"
config_manager = list(ConfigManager(config_has_key).configuration().keys())
if config_has_key in config_manager:
toast("**:blue[The Prime key is valid!]**", icon="🧁")
time.sleep(1)
return valid_hash_key
else:
info("This Prime key not registered yet!", icon="😮")
return 1
else:
error("This is not a Prime key!")
return 1
elif self.hash_key and len(self.hash_key) != MAX_LENGTH:
error("The Prime key is not valid!")
return 1
else:
return 1
# Path: profile_config/config_manager.py
class ConfigManager:
def __init__(self, prime_key:str) -> None:
with open("profile_config/profile_config.json", "r") as f:
self.profile_data = __import__("json").loads(f.read())
self.profile_config = self.profile_data["profiles_config"]
self.prime_key = prime_key
self.create_date = datetime.datetime.now()
self.formatted_datetime = self.create_date.isoformat()
def configuration(self):
return self.profile_config
def update_created_profiles(self):
self.profile_config[self.prime_key]["created_profiles"] +=1
toast(":orange[**1 Profile has been added to your prime key**]", icon="🍨")
return self.profile_config[self.prime_key]["created_profiles"]
def get_date_time(self):
return self.profile_config[self.prime_key]["date_time"]
def update_date_time(self):
if self.profile_config[self.prime_key]["date_time"] is None:
self.profile_config[self.prime_key].update({"date_time": self.formatted_datetime})
success("**You 'Prime Key' has been activated successfully!**", icon="🍧")
snow()
def update_profile_activity(self, id_profile:int, activate_merge:bool, save_cookies:bool, formatted_datetime:str) -> None:
self.action = self.profile_config[self.prime_key]["profile_activity"]["action"]
if id_profile not in self.action:
self.action.update({id_profile:{
"active_usage": 0,
"active_merge": activate_merge,
"date_time": formatted_datetime,
"request_status": "online",
"save_cookies": save_cookies,
"version": VERSION}
})
def get_created_profiles(self):
return self.profile_config[self.prime_key]["created_profiles"]
def get_active_profiles(self):
active_profiles_ids = []
active_profiles = 0
active_profiles_list = list(self.profile_config[self.prime_key]["profile_activity"]["action"])
for i in active_profiles_list:
if self.profile_config[self.prime_key]["profile_activity"]["action"][i]["active_usage"] != 0:
active_profiles+=1
active_profiles_ids.append(f"id:{i}")
return active_profiles, active_profiles_ids if len(active_profiles_ids) != 0 else ""
def get_online_profiles(self):
all_profiles_online = []
active_profiles_list = list(self.profile_config[self.prime_key]["profile_activity"]["action"])
for i in active_profiles_list:
if self.profile_config[self.prime_key]["profile_activity"]["action"][i]["request_status"] == "online":
all_profiles_online.append("online")
else:
all_profiles_online.append("offline")
if all(profile == "online" for profile in all_profiles_online):
return "Online!"
else:
return "Not all profiles are online!"
def check_active_usage(self):
all_profiles_active_usage = []
for i in list(self.profile_config[self.prime_key]["profile_activity"]["action"]):
all_profiles_active_usage.append(self.profile_config[self.prime_key]["profile_activity"]["action"][i]["active_usage"])
if all(profile == 0 for profile in all_profiles_active_usage):
return "first_time"
def get_profile_active_usage(self, id_profile:str) -> int:
return self.profile_config[self.prime_key]["profile_activity"]["action"][id_profile]["active_usage"]
def update_profile_active_usage(self, id_profile:str) -> None:
self.profile_config[self.prime_key]["profile_activity"]["action"][id_profile]["active_usage"] +=1
def get_merge_active_usage(self):
return len(list(self.profile_config[self.prime_key]["profile_activity"]["action_merge"]))
def get_profile_action_merge(self, id_profile:str) -> list[int]:
get_merge = self.profile_config[self.prime_key]["profile_activity"]["action_merge"][id_profile]
action_merge_len = len(list(get_merge.keys()))
action_merge = sum(list(get_merge.values()))
return action_merge_len, action_merge
def update_profile_action_merge(self, id_profile:str, merge_with:str) -> None:
action_merge = self.profile_config[self.prime_key]["profile_activity"]["action_merge"]
if id_profile not in list(action_merge.keys()):
action_merge.update({id_profile:{f"({id_profile},{merge_with})": 0}})
if id_profile in list(action_merge.keys()):
if f"({id_profile},{merge_with})" in list(action_merge[id_profile].keys()):
action_merge[id_profile][f"({id_profile},{merge_with})"] +=1
else:
action_merge[id_profile].update({f"({id_profile},{merge_with})": 0})
action_merge[id_profile][f"({id_profile},{merge_with})"] +=1
def update_config(self):
with open("profile_config/profile_config.json", "w") as f:
__import__("json").dump(self.profile_data, f, indent=3)
# Path: main_builder.py
class MainBuilder:
def __init__(self, valid_hash_key:str, data:dict) -> None:
self.valid_hash_key = valid_hash_key
self.config_manager = ConfigManager(self.valid_hash_key)
self.created_profiles = self.config_manager.get_created_profiles()
self.data = data
self.profile_brain = self.data.get("profile_brain", {})
self.profile_name = self.data.get("meta_data", {}).get("profile_name", None)
self.action_status = self.data.get("meta_data", {}).get("action_status", None)
self.length = self.data.get("meta_data", {}).get("profile_length", None)
self.profile_id = self.data.get("meta_data", {}).get("profile_id", None)
self.activate_merge = self.data.get("meta_data", {}).get("activate_merge", None)
self.request_status = self.data.get("meta_data", {}).get("request_status", None)
self.save_cookies = self.data.get("meta_data", {}).get("save_cookies", None)
self.date_time = self.data.get("meta_data", {}).get("date_time", None)
self.profile_version = self.data.get("meta_data", {}).get("version", None)
self.profile_id_str = str(self.profile_id)
self.length_convert = self.length // 4
self.hash_text, self.prompt_list = [], []
if self.action_status is not None:
self.merge_with = self.data.get("meta_data", {}).get("merge_with", None)
def dna_builder(self, prompt:str, activate_random_on:bool) -> str:
self.prompt = prompt
self.numbers = [str(number) for number in string.digits]
idx, idx_add_on = 0, 4
for letter in prompt:
if letter.isupper() or letter in self.numbers:
self.hash_text.append(self.profile_brain[letter][0]
["number" if letter in self.numbers else "upper"][idx : idx + idx_add_on])
else:
self.hash_text.append(self.profile_brain[letter.upper()][0]["lower"][idx : idx + idx_add_on])
idx += idx_add_on
if activate_random_on:
self.random_result()
else:
self.result_prompt()
def random_result(self):
if self.action_status is None:
self.config_manager.update_profile_active_usage(self.profile_id_str)
self.config_manager.update_config()
else:
self.config_manager.update_profile_action_merge(self.profile_id_str, self.merge_with)
self.config_manager.update_config()
hash_length = len(self.hash_text)
condition = ["Weak", "#ffa347"] if hash_length < 4 else ["Medium", "#3cb371"] \
if hash_length < 10 else ["Strong", "#ec002b"]
r_hash = self.create_random_result()
_hash = ''.join(self.hash_text)
hash_text_len = len(_hash)
_token = f"{_hash[::4]}-{r_hash[: hash_text_len - 4]}"
st.divider()
with st.expander("Expand for random password result [See More...]"):
self.col1, self.col2 = st.columns(2)
with self.col1:
st.write("Password: (:green[Copy])")
st.code(f'"{_token}"')
st.caption(f"Activate_key: {self.prompt}")
st.caption(":red[Warning]: This is a random password (:blue[temporarily])")
self.profile_details()
# Generate and display QR code
with self.col2:
self.qr_code(_token, condition[1])
self.hash_text.clear()
def create_random_result(self):
def has_lower():
return random.choice(string.ascii_lowercase)
def has_upper():
return random.choice(string.ascii_uppercase)
def has_number():
return random.choice(string.digits)
_char = {"lower": has_lower,
"upper": has_upper,
"number": has_number}
r_hash = ""
for _ in range(self.length):
random_choice = random.choice(list(_char.keys()))
r_hash += _char[random_choice]()
return r_hash
def result_prompt(self):
if self.action_status is None:
self.config_manager.update_profile_active_usage(self.profile_id_str)
self.config_manager.update_config()
else:
self.config_manager.update_profile_action_merge(self.profile_id_str, self.merge_with)
self.config_manager.update_config()
jhash = ''.join(self.hash_text)
hash_length = len(self.hash_text)
condition = ["Weak", "#ffa347", "orange"] if hash_length < 4 else ["Medium", "#3cb371", "green"] \
if hash_length < 10 else ["Strong", "#ec002b", "red"]
# Display the generated token as code
st.divider()
with st.expander("Expand for password result [See More...]"):
self.col1, self.col2 = st.columns(2)
with self.col1:
st.write("Password: (:green[Copy])")
st.code(f'"{jhash}"')
st.caption(f"Condition: [:{condition[2]}[{condition[0]}]]")
st.caption(":green[Note]: You dont have to save this password just remember your key")
self.profile_details()
with self.col2:
# Generate and display QR code
self.qr_code(jhash, condition[1])
self.hash_text.clear()
def profile_details(self):
with st.expander("Expand to see your 'Profile' Details [See More...]"):
self.col_details1, self.col_details2 = st.columns(2)
with self.col_details1:
if self.action_status is None:
active_usage = self.config_manager.get_profile_active_usage(self.profile_id_str)
st.text("Profile MetaData!")
profile_meta_data = ['profile_id', 'profile_name', 'active_usage', 'max_length', 'profile_status', 'activate_merge', 'save_cookies', 'date_time', 'version']
profile_values = [self.profile_id, self.profile_name, active_usage, self.length_convert, self.request_status, self.activate_merge, self.save_cookies, self.date_time, self.profile_version]
self.profile_data_frame(profile_meta_data, profile_values)
else:
merge_with, action_merge = self.config_manager.get_profile_action_merge(self.profile_id_str)
st.text("Profile Merge MetaData!")
action_merge_name = f'{action_merge} times' if action_merge != 1 else f'{action_merge} time'
merge_with_name = f'{merge_with} profiles' if merge_with != 1 else f'{merge_with} profile'
profile_meta_data = ['profile_id', 'profile_name', 'action_merge', 'merge_with', 'max_length']
profile_values = [self.profile_id, self.profile_name, action_merge_name, merge_with_name, self.length_convert]
self.profile_data_frame(profile_meta_data, profile_values)
with self.col_details2:
st.text("PrimeKey MetaData!")
active_profiles, active_profiles_ids = self.config_manager.get_active_profiles()
active_profiles_merge = self.config_manager.get_merge_active_usage()
profiles_status = self.config_manager.get_online_profiles()
st.code(f"""
prime_key_id: '{self.valid_hash_key}'
created_profiles: {self.created_profiles}
active_profiles: {active_profiles} {active_profiles_ids}
active_profiles_merge: {active_profiles_merge}
profiles_status: '{profiles_status}'""")
st.info("Here is your profile and prime key metadata. You can save in by download it or copy the text just to be updated!", icon="❕")
st.warning("DnaKey not responsible if you delete your profiles or give it to someone!", icon="📛")
def profile_data_frame(self, profile_meta_data:list[str], profile_values:list[str | int | bool]) -> None:
def load_data():
return pd.DataFrame(
{
"Profile MetaData": profile_meta_data,
"Profile Values": profile_values,
}
)
# Boolean to resize the dataframe, stored as a session state variable
data_df = load_data()
data_df['Profile Values'] = data_df['Profile Values'].astype(str)
st.dataframe(data_df, use_container_width=500, hide_index=True)
def qr_code(self, data_qr:str, color:str) -> None:
# File name for the generated QR code image
self.file_name = "qr_code.png"
# Text to be encoded in the QR code
qr_data_text = f"-TOKEN: {data_qr}"
# Generate the QR code image
self.generate_qr_code(qr_data_text, self.file_name, color)
# Display the generated QR code image
st.image(image=self.file_name)
st.caption(f"You can scan this **:green[QR code]** to see your password in your phone faster!")
st.toast(":green[Your (QR) code has been created successfully!]", icon='🎉')
time.sleep(1)
# Check if the image file exists
if os.path.exists(self.file_name):
# Delete the image file
os.remove(self.file_name)
print(f"Deleted the image file: {self.file_name}")
else:
print(f"The image file does not exist: {self.file_name}")
def generate_qr_code(self, data:str, file_name:str, color:str) -> None:
# Generate a QR code with the given data, file name, and color
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=5, border=2)
qr.add_data(data)
qr.make(fit=True)
self.qr_img = qr.make_image(fill_color=color, back_color="#f5f5f5")
# Save the generated QR code image to a file
self.qr_img.save(file_name)
# Path: create_password.py
import streamlit as st
import json, time, re
from cryptography.fernet import Fernet
from prime_key_config import PrimeKeyConfig
from profile_config.config_manager import ConfigManager
from main_builder import MainBuilder
from create_merge import *
class CreatePassword(PrimeKeyConfig):
def create_new_password(self):
key_id = "prime-key-password"
self.hash_key = st.text_input("Enter Your Prime Key: (:red[Required])",
type="password",
help="Prime Key is your login token method so 'DnaKey' can recognize you!",
key=key_id)
self.upload_hash_key = self.agent_prime_key(self.hash_key)
if self.upload_hash_key == 1:
self.tab_name = "Create Password"
return
else:
self.tab_name = "Create Password"
self.config_has_key = f"dnakey${self.upload_hash_key[:32:2]}"
self.config_manager = ConfigManager(self.config_has_key)
self.config_manager.update_date_time()
self.config_manager.update_config()
self.first_time = self.config_manager.check_active_usage()
self.col3, self.col4 = st.columns(2)
with self.col3:
if self.first_time == "first_time":
st.info("Hey there. It's look like your first time using your profile!", icon="🏁")
with st.expander("Upload Profile [See More...]"):
self.uploaded_file = st.file_uploader(label="Upload Profile",
help="Required a dkp file only!",
accept_multiple_files=True,
type=["dkp"], key="file-01",
label_visibility="collapsed")
self.profile_data()
self.uploaded_files_merge = self.uploaded_file
self.create_password_advance_settings()
with self.col4:
if self.first_time == "first_time":
st.success("Let me help you get your first password easily!", icon="⚙")
if not self.uploaded_file:
st.caption("Hello👋, To start creating your new password you need to upload your profile!")
if (self.uploaded_file) and (self.first_time == "first_time"):
st.info("**_Active Profile For Now_**: Here you have to select the profile that you wanna use!", icon="🛠")
self.input_for_user_key()
self.decode_text_button()
def profile_data(self):
if not self.uploaded_file:
return 1
self.create_a_fernet_object()
self.verify_uploaded_file()
def create_a_fernet_object(self):
# Create a Fernet object with the secret key
secret_key = self.upload_hash_key.encode("utf-8")
self.fernet = Fernet(secret_key)
def verify_uploaded_file(self):
self.uploaded_file_unpack = []
for file in range(len(self.uploaded_file)):
encrypted_data = self.uploaded_file[file].read().decode("utf-8")
try:
decrypted_data = self.fernet.decrypt(encrypted_data)
decrypted_string = decrypted_data.decode()
self.data = json.loads(decrypted_string)
self.uploaded_file_unpack.append(self.data)
except Exception:
with self.col4:
invalid_profile_name = self.uploaded_file[file].name[:-4].replace('dnakey_', '')
st.error(f"This is not a dnakey profile! '{invalid_profile_name}'")
time.sleep(0.5)
st.info("If you don't know you can create a 'dnakey' profile in 'Create Profile window' in your left!", icon="ℹ️")
st.stop()
if len(self.uploaded_file_unpack) == 1:
st.toast("**:green[The Profile Data Is Live...]**", icon="🍰")
time.sleep(1)
else:
st.toast("**:blue[Your Profiles Data Is Live...]**", icon="🍬")
time.sleep(1)
def create_password_advance_settings(self):
if self.uploaded_file and self.upload_hash_key:
if self.first_time == "first_time":
st.info("**_Advance Settings_**: Here you have the Merge it's an advanced method. You won't need it for now!", icon="🧪")
with st.expander("Advance Settings [See More...]"):
self.new_merge_profile = CreateMergeProfile(self.uploaded_file_unpack, self.uploaded_files_merge)
self.new_merge_profile.merge_builder()
st.caption(":red[Warning]: The profiles must have the same length!")
if self.first_time == "first_time":
st.warning("**_Activate Random_**: If you activate this it gonna give you a temporary random password!", icon="⚠")
self.activate_random_on = st.toggle("Activate Random: (:green[Optional])")
| def select_file_option(self):
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: tamnva/hydroecolstm
# Path: hydroecolstm/utility/scaler.py
class Scaler:
def fit(self, x=None, method=None):
# concatenat all object_id
for i, object_id in zip(range(len(x)), x):
if i == 0:
cat_x = x[object_id]
else:
cat_x = torch.cat((cat_x, x[object_id]),0)
# Get either min max or mean and standard deviation
self.mins = _column_mins(cat_x)
self.maxs = _column_maxs(cat_x)
# Get means and standar deviation
self.means = _column_means(cat_x)
self.stds = _column_stds(cat_x)
scaler_a = []
scaler_b = []
for i, method_name in zip(range(len(method)), method):
if method_name == "MinMaxScaler":
scaler_a.append(self.mins[i])
scaler_b.append(self.maxs[i] - self.mins[i])
elif method_name=="Z-score":
scaler_a.append(self.means[i])
scaler_b.append(self.stds[i])
elif method_name=="None":
scaler_a.append(0.0)
scaler_b.append(1.0)
else:
print("Error: unknown scaler")
SystemExit("Program stop, please change scaler")
scaler_ab = torch.cat((torch.tensor(scaler_a, dtype=torch.float32),
torch.tensor(scaler_b, dtype=torch.float32)), 0)
self.scaler_parameter = torch.reshape(scaler_ab,
(2,len(scaler_a)))
def transform(self, x:dict[str:torch.tensor]=None) -> list:
x_scale = {}
for object_id in x:
x_scale[object_id] = torch.div(torch.sub(x[object_id],
self.scaler_parameter[0,:]),
self.scaler_parameter[1,:])
return x_scale
def inverse(self, x:list=None) -> list:
x_inverse = {}
for object_id in x:
x_inverse[object_id] = torch.add(self.scaler_parameter[0,:],
x[object_id]*self.scaler_parameter[1,:])
return x_inverse
# Path: hydroecolstm/utility/scaler.py
def get_scaler_name(config):
if "input_static_features" not in config.keys():
no_static_features = 0
else:
no_static_features = len(config["input_static_features"])
# Get name of scaler for dynamic input
scaler_name_input = config["scaler_input_dynamic_features"]*\
len(config["input_dynamic_features"])
# replicate this n times
if no_static_features > 0 and\
"scaler_input_static_features" in config:
for name in config["scaler_input_static_features"]*no_static_features:
scaler_name_input.append(name)
# scaler name target
scaler_name_target = config["scaler_target_features"]*len(config["target_features"])
return scaler_name_input, scaler_name_target
# Path: hydroecolstm/data/read_data.py
def read_train_test_data(config:dict=None) -> dict:
# Read input data
dynamic_data = pd.read_csv(config['dynamic_data_file'][0],
delimiter=",", header=0)
dynamic_data["time"] = pd.to_datetime(dynamic_data["time"],
format = "%Y-%m-%d %H:%M")
# The column names must contains the following names
require_columns = ["object_id","time"]
require_columns.extend(config['input_dynamic_features'])
require_columns.extend(config['target_features'])
# Check if data header contains required names
for name in require_columns:
if name not in dynamic_data.columns:
raise NameError(f"Error: missing column '{name}' in dynamic data file \n")
# Subset of dynamic_data - only use the required columns, rows
dynamic_data = dynamic_data[require_columns]
dynamic_data.set_index("object_id", inplace=True)
dynamic_data = dynamic_data.loc[config["object_id"]]
config["train_period"] = pd.to_datetime(config["train_period"],
format = "%Y-%m-%d %H:%M")
train_data = dynamic_data[(dynamic_data["time"] >= config["train_period"][0]) &
(dynamic_data["time"] <= config["train_period"][1])]
# Colum name of the ouput tensor
x_column_name = config['input_dynamic_features'].copy()
y_column_name = config['target_features'].copy()
# split to train data by object id
x_train = _split_by_object_id(train_data[x_column_name], config["object_id"])
y_train = _split_by_object_id(train_data[y_column_name], config["object_id"])
time_train = _time_by_object_id(train_data, config["object_id"])
config["test_period"] = pd.to_datetime(config["test_period"], format = "%Y-%m-%d")
test_data = dynamic_data[(dynamic_data["time"] >= config["test_period"][0]) &
(dynamic_data["time"] <= config["test_period"][1])]
x_test = _split_by_object_id(test_data[x_column_name], config["object_id"])
y_test = _split_by_object_id(test_data[y_column_name], config["object_id"])
time_test = _time_by_object_id(test_data, config["object_id"])
# Read static input data file
if 'input_static_features' in config:
if len(config['input_static_features']) > 0:
static_data = pd.read_csv(config['static_data_file'][0], delimiter=",",
header=0)
# The column names must contains the following names
require_columns = ["object_id"]
require_columns.extend(config['input_static_features'])
# Check if data header contains required names
for name in require_columns:
if name not in static_data.columns:
raise NameError(f"Error: missing column '{name}' in static data\n")
# Subset of dynamic_data - only use the required columns and rows
static_data = static_data[require_columns]
static_data.set_index("object_id", inplace=True)
static_data = torch.tensor(static_data.loc[config["object_id"]].values,
dtype=torch.float32)
# Update columne name
x_column_name.extend(config['input_static_features'])
else:
static_data = None
# add static data to x_train and y_train
if static_data is not None:
for i, object_id in zip(range(len(x_train)), x_train):
rep_static_data = static_data[i,].repeat(x_train[object_id].shape[0],1)
x_train[object_id] = torch.cat((x_train[object_id], rep_static_data), 1)
rep_static_data = static_data[i,].repeat(x_test[object_id].shape[0],1)
x_test[object_id] = torch.cat((x_test[object_id], rep_static_data), 1)
return {"x_train":x_train, "y_train": y_train, "time_train" : time_train,
"x_test":x_test, "y_test": y_test, "time_test": time_test,
"x_column_name": x_column_name, "y_column_name": y_column_name}
# Path: hydroecolstm/data/read_config.py
def read_config(config_file):
# Open and read the configureation file
with open(config_file, 'r') as file:
cfg = yaml.safe_load(file)
# All required keywords
key = ["object_id","input_dynamic_features", "target_features",
"train_period", "test_period", "n_epochs", "learning_rate",
"dynamic_data_file"]
# Check if required keywords are missing
for keyword in key:
if keyword not in cfg:
raise NameError(f"Error in configuration file, keyword '{keyword}' is missing")
#checkiftraiistest
# Return output
return cfg
# Path: hydroecolstm/model/lstm_linears.py
class Lstm_Linears(nn.Module):
def __init__(self, config, **kwargs):
super(Lstm_Linears, self).__init__()
# Model structure parametery
self.input_size = self.get_input_size(config)
self.output_size = len(config["target_features"])
self.hidden_size = config["hidden_size"]
self.num_layers = config["num_layers"]
self.dropout = config["dropout"]*min(1.0, self.num_layers - 1.0)
self.linears_num_layers = config["REG"]["num_layers"]
self.linears_activation_function = config["REG"]["activation_function"]
self.linears_num_neurons = self.find_num_neurons(config=config)
# Standard LSTM from torch
self.lstm = nn.LSTM(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
dropout=self.dropout,
**kwargs)
# Fully-connected layer connect hidden and output
self.linear = Linears(num_layers=self.linears_num_layers,
activation_function=self.linears_activation_function,
num_neurons=self.linears_num_neurons)
def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
y_predict = {}
for key in x.keys():
# get standard LSTM outputs
y_lstm, _ = self.lstm(x[key])
# get final output
y_predict[key] = self.linear(y_lstm)
# return output
return y_predict
# get input size
def get_input_size(self, config) -> int:
if "input_static_features" in config:
input_size = (len(config["input_dynamic_features"]) +
len(config["input_static_features"]))
else:
input_size = len(config["input_dynamic_features"])
return input_size
# Find number of neuron in each linear layers, including the input layer
def find_num_neurons(self, config) -> int:
# First number of neurons from the input layers ()
num_neurons = [self.hidden_size]
if "REG" in config:
if len(config["REG"]["num_neurons"]) > 1:
for i in range(len(config["REG"]["num_neurons"])-1):
num_neurons.append(config["REG"]["num_neurons"][i])
num_neurons.append(self.output_size)
return num_neurons
# Path: hydroecolstm/model/ea_lstm.py
class Ea_Lstm_Linears(nn.Module):
def __init__(self, config):
super(Ea_Lstm_Linears, self).__init__()
self.static_size = len(config["input_static_features"])
self.dynamic_size = len(config["input_dynamic_features"])
self.num_layers = config["num_layers"]
self.hidden_size = config["hidden_size"]
self.output_size = len(config["target_features"])
self.linears_num_layers = config["REG"]["num_layers"]
self.linears_activation_function = config["REG"]["activation_function"]
self.linears_num_neurons = self.find_num_neurons(config=config)
# Model structure
self.i = nn.Sequential(nn.Linear(self.static_size, self.hidden_size), nn.Sigmoid())
self.f = MultiLinear(self.dynamic_size, self.hidden_size, self.hidden_size, nn.Sigmoid())
self.g = MultiLinear(self.dynamic_size, self.hidden_size, self.hidden_size, nn.Tanh())
self.o = MultiLinear(self.dynamic_size, self.hidden_size, self.hidden_size, nn.Sigmoid())
self.linear = Linears(num_layers=self.linears_num_layers,
activation_function=self.linears_activation_function,
num_neurons=self.linears_num_neurons)
# TODO: This forward function takes too much times, need to improve
def forward(self, x):
# Initial hidden, cell states
c_t = torch.randn(self.hidden_size).unsqueeze(0)
h_t = torch.randn(self.hidden_size).unsqueeze(0)
# Forward run
output = {}
for key in x.keys():
output[key] = torch.zeros(size=(x[key].shape[0],self.output_size))
for i in range(x[key].shape[0]):
i_t = self.i(x[key][i:i+1,self.dynamic_size:])
f_t = self.f(x[key][i:i+1,:self.dynamic_size], h_t)
g_t = self.g(x[key][i:i+1,:self.dynamic_size], h_t)
o_t = self.o(x[key][i:i+1,:self.dynamic_size], h_t)
c_t = f_t*c_t + i_t*g_t
h_t = o_t*torch.tanh(c_t)
output[key][i,:] = self.linear(h_t)
return output
# Find number of neuron in each linear layers, including the input layer
def find_num_neurons(self, config) -> int:
# First number of neurons from the input layers ()
num_neurons = [self.hidden_size]
if "REG" in config:
if len(config["REG"]["num_neurons"]) > 1:
for i in range(len(config["REG"]["num_neurons"])-1):
num_neurons.append(config["REG"]["num_neurons"][i])
num_neurons.append(self.output_size)
return num_neurons
# Path: hydroecolstm/model/train.py
class Train():
def __init__(self, config, model, **kwargs):
super(Train, self).__init__()
# Training parameters
self.lr = config["learning_rate"]
self.objective_function_name = config["objective_function_name"]
self.n_epochs = config["n_epochs"]
self.nskip = config["warmup_length"]
self.loss_function = LossFunction()
self.model = model
# Train function
def __call__(self, x: Dict[str, torch.Tensor], y: Dict[str, torch.Tensor]):
# Optimization function
optim = torch.optim.Adam(self.model.parameters(), lr=self.lr)
# Train the model
for epoch in range(self.n_epochs):
# Get model output
y_predict= self.model(x)
# Reset the gradients to zero
optim.zero_grad()
# Caculate loss function
loss, loss_avg =\
self.loss_function(y_true=y, y_predict=y_predict, nskip=self.nskip,
objective_function_name=self.objective_function_name)
# Error back propagation LSTM.state_dict()
loss_avg.backward()
# Update weights and biases
optim.step()
# Print to console
print(f"Epoch [{epoch+1}/{self.n_epochs}], avg_loss = {loss_avg:.8f}")
return self.model, y_predict
# Path: examples/customLSTM_deleteme.py
import numbers
import warnings
import torch
import torch.jit as jit
import torch.nn as nn
from collections import namedtuple
from typing import List, Tuple, Dict
from torch import Tensor
from torch.nn import Parameter
from hydroecolstm.utility.scaler import Scaler, get_scaler_name
from hydroecolstm.data.read_data import read_train_test_data
from hydroecolstm.data.read_config import read_config
from hydroecolstm.model.lstm_linears import Lstm_Linears
from hydroecolstm.model.ea_lstm import Ea_Lstm_Linears
from hydroecolstm.model.train import Train
class EALSTMCell(jit.ScriptModule):
def __init__(self, dynamic_input_size, static_input_size, hidden_size):
super().__init__()
self.dynamic_input_size = dynamic_input_size
self.static_input_size = static_input_size
self.hidden_size = hidden_size
self.weight_sh = Parameter(torch.randn(hidden_size, static_input_size))
self.weight_dh = Parameter(torch.randn(3 * hidden_size, dynamic_input_size))
self.weight_hh = Parameter(torch.randn(3 * hidden_size, hidden_size))
self.bias_sh = Parameter(torch.randn(hidden_size))
self.bias_dh = Parameter(torch.randn(3 * hidden_size))
self.bias_hh = Parameter(torch.randn(3 * hidden_size))
@jit.script_method
def forward(self, dynamic_input: Tensor, static_input: Tensor,
state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
# Initial state
hx, cx = state
# Gate input
gates = (torch.mm(dynamic_input, self.weight_dh.t())
+ self.bias_dh
+ torch.mm(hx, self.weight_hh.t())
+ self.bias_hh)
forgetgate, cellgate, outgate = gates.chunk(3, 1)
ingate = torch.mm(static_input, self.weight_sh.t()) + self.bias_sh
# Gate output
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
# Update state
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
# Return state output
return hy, (hy, cy)
class EALSTMLayer(jit.ScriptModule):
def __init__(self, config):
super().__init__()
self.dynamic_input_size = len(config["input_dynamic_features"])
self.static_input_size = len(config["input_static_features"])
self.hidden_size = config["hidden_size"]
self.cell = EALSTMCell(self.dynamic_input_size, self.static_input_size,
self.hidden_size)
@jit.script_method
def forward(self, dynamic_input,
static_input,
| state:Tuple[Tensor, Tensor]): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: camenduru/OpenLRM-hf
# Path: lrm/models/generator.py
class LRMGenerator(nn.Module):
"""
Full model of the large reconstruction model.
"""
def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,
transformer_dim: int, transformer_layers: int, transformer_heads: int,
triplane_low_res: int, triplane_high_res: int, triplane_dim: int,
encoder_freeze: bool = True, encoder_model_name: str = 'facebook/dino-vitb16', encoder_feat_dim: int = 768):
super().__init__()
# attributes
self.encoder_feat_dim = encoder_feat_dim
self.camera_embed_dim = camera_embed_dim
# modules
self.encoder = DinoWrapper(
model_name=encoder_model_name,
freeze=encoder_freeze,
)
self.camera_embedder = CameraEmbedder(
raw_dim=12+4, embed_dim=camera_embed_dim,
)
self.transformer = TriplaneTransformer(
inner_dim=transformer_dim, num_layers=transformer_layers, num_heads=transformer_heads,
image_feat_dim=encoder_feat_dim,
camera_embed_dim=camera_embed_dim,
triplane_low_res=triplane_low_res, triplane_high_res=triplane_high_res, triplane_dim=triplane_dim,
)
self.synthesizer = TriplaneSynthesizer(
triplane_dim=triplane_dim, samples_per_ray=rendering_samples_per_ray,
)
def forward_planes(self, image, camera):
# image: [N, C_img, H_img, W_img]
# camera: [N, D_cam_raw]
assert image.shape[0] == camera.shape[0], "Batch size mismatch for image and camera"
N = image.shape[0]
# encode image
image_feats = self.encoder(image)
assert image_feats.shape[-1] == self.encoder_feat_dim, \
f"Feature dimension mismatch: {image_feats.shape[-1]} vs {self.encoder_feat_dim}"
# embed camera
camera_embeddings = self.camera_embedder(camera)
assert camera_embeddings.shape[-1] == self.camera_embed_dim, \
f"Feature dimension mismatch: {camera_embeddings.shape[-1]} vs {self.camera_embed_dim}"
# transformer generating planes
planes = self.transformer(image_feats, camera_embeddings)
assert planes.shape[0] == N, "Batch size mismatch for planes"
assert planes.shape[1] == 3, "Planes should have 3 channels"
return planes
def forward(self, image, source_camera, render_cameras, render_size: int):
# image: [N, C_img, H_img, W_img]
# source_camera: [N, D_cam_raw]
# render_cameras: [N, M, D_cam_render]
# render_size: int
assert image.shape[0] == source_camera.shape[0], "Batch size mismatch for image and source_camera"
assert image.shape[0] == render_cameras.shape[0], "Batch size mismatch for image and render_cameras"
N, M = render_cameras.shape[:2]
planes = self.forward_planes(image, source_camera)
# render target views
render_results = self.synthesizer(planes, render_cameras, render_size)
assert render_results['images_rgb'].shape[0] == N, "Batch size mismatch for render_results"
assert render_results['images_rgb'].shape[1] == M, "Number of rendered views should be consistent with render_cameras"
return {
'planes': planes,
**render_results,
}
# Path: lrm/cam_utils.py
def build_camera_principle(RT: torch.Tensor, intrinsics: torch.Tensor):
"""
RT: (N, 3, 4)
intrinsics: (N, 3, 2), [[fx, fy], [cx, cy], [width, height]]
"""
fx, fy, cx, cy = get_normalized_camera_intrinsics(intrinsics)
return torch.cat([
RT.reshape(-1, 12),
fx.unsqueeze(-1), fy.unsqueeze(-1), cx.unsqueeze(-1), cy.unsqueeze(-1),
], dim=-1)
# Path: lrm/cam_utils.py
def build_camera_standard(RT: torch.Tensor, intrinsics: torch.Tensor):
"""
RT: (N, 3, 4)
intrinsics: (N, 3, 2), [[fx, fy], [cx, cy], [width, height]]
"""
E = compose_extrinsic_RT(RT)
fx, fy, cx, cy = get_normalized_camera_intrinsics(intrinsics)
I = torch.stack([
torch.stack([fx, torch.zeros_like(fx), cx], dim=-1),
torch.stack([torch.zeros_like(fy), fy, cy], dim=-1),
torch.tensor([[0, 0, 1]], dtype=torch.float32, device=RT.device).repeat(RT.shape[0], 1),
], dim=1)
return torch.cat([
E.reshape(-1, 16),
I.reshape(-1, 9),
], dim=-1)
# Path: lrm/cam_utils.py
def center_looking_at_camera_pose(camera_position: torch.Tensor, look_at: torch.Tensor = None, up_world: torch.Tensor = None):
"""
camera_position: (M, 3)
look_at: (3)
up_world: (3)
return: (M, 3, 4)
"""
# by default, looking at the origin and world up is pos-z
if look_at is None:
look_at = torch.tensor([0, 0, 0], dtype=torch.float32)
if up_world is None:
up_world = torch.tensor([0, 0, 1], dtype=torch.float32)
look_at = look_at.unsqueeze(0).repeat(camera_position.shape[0], 1)
up_world = up_world.unsqueeze(0).repeat(camera_position.shape[0], 1)
z_axis = camera_position - look_at
z_axis = z_axis / z_axis.norm(dim=-1, keepdim=True)
x_axis = torch.cross(up_world, z_axis)
x_axis = x_axis / x_axis.norm(dim=-1, keepdim=True)
y_axis = torch.cross(z_axis, x_axis)
y_axis = y_axis / y_axis.norm(dim=-1, keepdim=True)
extrinsics = torch.stack([x_axis, y_axis, z_axis, camera_position], dim=-1)
return extrinsics
# Path: lrm/inferrer.py
import torch
import math
import os
import imageio
import mcubes
import trimesh
import numpy as np
import argparse
from PIL import Image
from .models.generator import LRMGenerator
from .cam_utils import build_camera_principle, build_camera_standard, center_looking_at_camera_pose
[1, 0, 0, 0],
[0, 0, -1, -dist_to_center],
[0, 1, 0, 0],
]], dtype=torch.float32)
canonical_camera_intrinsics = self._default_intrinsics().unsqueeze(0)
source_camera = build_camera_principle(canonical_camera_extrinsics, canonical_camera_intrinsics)
return source_camera.repeat(batch_size, 1)
def _default_render_cameras(self, batch_size: int = 1):
# return: (N, M, D_cam_render)
render_camera_extrinsics = self._get_surrounding_views()
render_camera_intrinsics = self._default_intrinsics().unsqueeze(0).repeat(render_camera_extrinsics.shape[0], 1, 1)
render_cameras = build_camera_standard(render_camera_extrinsics, render_camera_intrinsics)
return render_cameras.unsqueeze(0).repeat(batch_size, 1, 1)
@staticmethod
def images_to_video(images, output_path, fps, verbose=False):
# images: (T, C, H, W)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
frames = []
for i in range(images.shape[0]):
frame = (images[i].permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
assert frame.shape[0] == images.shape[2] and frame.shape[1] == images.shape[3], \
f"Frame shape mismatch: {frame.shape} vs {images.shape}"
assert frame.min() >= 0 and frame.max() <= 255, \
f"Frame value out of range: {frame.min()} ~ {frame.max()}"
frames.append(frame)
# imageio.mimwrite(output_path, np.stack(frames), fps=fps, codec='mpeg4', quality=10)
imageio.mimwrite(output_path, np.stack(frames), fps=fps, quality=10)
if verbose:
print(f"Saved video to {output_path}")
def infer_single(self, image: torch.Tensor, render_size: int, mesh_size: int, export_video: bool, export_mesh: bool):
# image: [1, C_img, H_img, W_img]
mesh_thres = 1.0
chunk_size = 2
batch_size = 1
source_camera = self._default_source_camera(batch_size).to(self.device)
render_cameras = self._default_render_cameras(batch_size).to(self.device)
with torch.no_grad():
planes = self.model.forward_planes(image, source_camera)
results = {}
if export_video:
# forward synthesizer per mini-batch
frames = []
for i in range(0, render_cameras.shape[1], chunk_size):
frames.append(
self.model.synthesizer(
planes,
render_cameras[:, i:i+chunk_size],
render_size,
)
)
# merge frames
frames = {
k: torch.cat([r[k] for r in frames], dim=1)
for k in frames[0].keys()
}
# update results
results.update({
'frames': frames,
})
if export_mesh:
grid_out = self.model.synthesizer.forward_grid(
planes=planes,
grid_size=mesh_size,
)
vtx, faces = mcubes.marching_cubes(grid_out['sigma'].squeeze(0).squeeze(-1).cpu().numpy(), mesh_thres)
vtx = vtx / (mesh_size - 1) * 2 - 1
vtx_tensor = torch.tensor(vtx, dtype=torch.float32, device=self.device).unsqueeze(0)
vtx_colors = self.model.synthesizer.forward_points(planes, vtx_tensor)['rgb'].squeeze(0).cpu().numpy() # (0, 1)
vtx_colors = (vtx_colors * 255).astype(np.uint8)
mesh = trimesh.Trimesh(vertices=vtx, faces=faces, vertex_colors=vtx_colors)
results.update({
'mesh': mesh,
})
return results
def infer(self, source_image: str, dump_path: str, source_size: int, render_size: int, mesh_size: int, export_video: bool, export_mesh: bool):
source_image_size = source_size if source_size > 0 else self.infer_kwargs['source_size']
image = torch.tensor(np.array(Image.open(source_image))).permute(2, 0, 1).unsqueeze(0) / 255.0
# if RGBA, blend to RGB
print(f"[DEBUG] check 1.")
if image.shape[1] == 4:
image = image[:, :3, ...] * image[:, 3:, ...] + (1 - image[:, 3:, ...])
print(f"[DEBUG] image.shape={image.shape} and image[0,0,0,0]={image[0,0,0,0]}")
print(f"[DEBUG] check 2.")
image = torch.nn.functional.interpolate(image, size=(source_image_size, source_image_size), mode='bicubic', align_corners=True)
image = torch.clamp(image, 0, 1)
results = self.infer_single(
image.to(self.device),
render_size=render_size if render_size > 0 else self.infer_kwargs['render_size'],
mesh_size=mesh_size,
export_video=export_video,
export_mesh=export_mesh,
)
image_name = os.path.basename(source_image)
uid = image_name.split('.')[0]
os.makedirs(dump_path, exist_ok=True)
# dump video
if 'frames' in results:
renderings = results['frames']
for k, v in renderings.items():
if k == 'images_rgb':
self.images_to_video(
v[0],
| os.path.join(dump_path, f'{uid}.mp4'), |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: garinops/chat-E-AI
# Path: ai/openai/tools/TOOL_TIME.py
class ToolTime(object):
TOOL_MODEL = {
"type": "function",
"function": {
# [必填:value可编辑],注意所有Tools中保持唯一,且和下方的静态方法函数保持命名一致。
"name": "get_time",
# [必填:value可编辑],工具功能介绍。
"description": "获取指定地理位置的时间,如果未指定,默认为北京时间",
"parameters": {
"type": "object",
# [必填:value],联网工具参数。如果有参数,参照下方,自行配置增添所需参数,如果没有参数,则使用 "properties": {}, 。
"properties": {
# [选填:key可编辑],具体所需参数。
"location": {
# [必填:value可编辑],参数类型
"type": "string",
# [必填:value可编辑],参数描述。
"description": "中文地理位置。",
},
"offset_hours": {
# [必填:value可编辑],参数类型
"type": "string",
# [必填:value可编辑],参数描述。
"description": "该位置与UTC的小时偏移量,数字形式",
},
},
# [选填],需要OpenAI必须返回的参数,则在下方数组中指定。如果不需要,则使用 "required": [], 。
"required": ["location", "offset_hours"],
},
}
}
@staticmethod
def get_time(location: str, offset_hours: str) -> ResponseBase:
response_tool = ResponseBase(answer="", source="Time")
# 获取当前时间(UTC)
current_time = datetime.utcnow()
# 计算指定偏移量的时间
offset = timedelta(hours=int(offset_hours))
target_time = current_time + offset
# 格式化时间
format_time = target_time.strftime("%Y-%m-%d, %A, %H:%M:%S")
response_tool.answer = "{}时间,{}".format(location, format_time)
return response_tool
# Path: ai/openai/tools/WTTR_IN.py
class ToolWttrIn(object):
TOOL_MODEL = {
# 工具模型类型,必填,目前API支持 function
"type": "function",
"function": {
# 函数名称,必填,The name of the get_weather to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
"name": "get_weather",
# 函数描述,必填,A description of what the get_weather does, used by the model to choose when and how to call the get_weather.
"description": "获取给定地理位置的天气数据",
# 函数参数,必填。
"parameters": {
"type": "object",
# JSON键类型,及描述。
"properties": {
"location": {
"type": "string",
"description": "中文地理位置。",
},
},
"required": ["location"],
},
}
}
@staticmethod
@timeout_decorator.timeout(4)
def get_weather(location: str) -> ResponseBase:
language = pywttr.Language("zh-cn")
response_tool = ResponseBase(answer="", source="wttr•in")
""""疑难杂症处理"""
if location in ["南极"]:
loggerToolWttrIn.warning(f'This Entry Did Not Make a Request to the Weather Server Due to a Bug in the Weather Server, Which May Be Fix in the Future.')
response_tool.answer = "亲爱的,我无法获取该地区的天气信息,你输入的地理位置是否过于宽泛呢?"
return response_tool
if location == "北极":
location = "North Pole"
""""正经处理"""
try:
weather_wttr = pywttr.get_weather(location=location, language=language)
# print(weather_wttr)
weather = f"当前{location}{weather_wttr.current_condition[0].lang_zh_cn[0].value}," \
f"{weather_wttr.current_condition[0].temp_c}°C," \
f"风速{weather_wttr.current_condition[0].windspeed_kmph}km/h," \
f"湿度{weather_wttr.current_condition[0].humidity}%," \
f"降水{weather_wttr.current_condition[0].precip_mm}mm。"
response_tool.answer = weather
except timeout_decorator.TimeoutError:
loggerToolWttrIn.warning(f'Exception(timeout_decorator.TimeoutError) was encountered when get_weather({location})')
response_tool.answer = "亲爱的,wttr·in天气服务器可能发生了宕机,在使用过程中请节制访问。"
except requests.exceptions.ConnectionError:
loggerToolWttrIn.warning(f'Exception(requests.exceptions.ConnectionError) was encountered when get_weather({location})')
response_tool.answer = "亲爱的,我可能失去了天气服务的网络连接。"
except urllib3.exceptions.MaxRetryError:
loggerToolWttrIn.warning(f'Exception(urllib3.exceptions.MaxRetryError) was encountered when get_weather({location})')
response_tool.answer = "亲爱的,我遇到了障碍。\n\n这可能是有很多人在同时使用天气服务。"
except requests.exceptions.HTTPError:
loggerToolWttrIn.warning(f'Exception(requests.exceptions.HTTPError) was encountered when get_weather({location})')
response_tool.answer = "亲爱的,我无法获取该地区的天气信息,大概是我们的尚没有收录该地区的天气情况。\n\n当然你也可以给我提供其他语言,这可能会增进我的理解。"
except json.decoder.JSONDecodeError:
loggerToolWttrIn.warning(f'Exception(json.decoder.JSONDecodeError) was encountered when get_weather({location})')
response_tool.answer = "亲爱的,我无法获取该地区的天气信息,你输入的地理位置是否过于宽泛呢?\n\n当然你也可以给我提供其他语言,这可能会增进我的理解。"
return response_tool
# Path: ai/openai/tools/WWW_GARINASSET_COM.py
class ToolWwwGarinassetCom(object):
TOOL_MODEL = {
# 工具类型,必填,强制function
"type": "function",
"function": {
# 函数名称,必填,The name of the get_weather to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
"name": "get_indicator_overview",
# 函数描述,必填,A description of what the get_weather does, used by the model to choose when and how to call the get_weather.
"description": "获取给定国家、行政区域的宏微观经济数据,行业数据,消费品市场价格数据,例如中国GDP,汽车产量,鸡蛋价格,如果没有给定行政区域,默认为中国大陆。",
# 函数参数,必填。
"parameters": {
"type": "object",
# JSON键类型,及描述。
"properties": {
"region": {
"type": "string",
"description": "中文行政区域,例如,中国大陆、香港。",
},
"name": {
"type": "string",
"description": "中文指标名称,例如国内生产总值,GDP,鸡蛋价格。"
},
},
"required": ["region", "name"],
},
}
}
@staticmethod
def _handle_datetime(name_property_json, data_latest_time) -> str:
match name_property_json:
case "月值":
_date = pd.Timestamp(data_latest_time)
_dateTime = "{}年{}月".format(_date.year, _date.month)
return _dateTime
case "累月值":
_date = pd.Timestamp(data_latest_time)
_dateTime = "{}年前{}月".format(_date.year, _date.month)
return _dateTime
case "季值":
_date = pd.Timestamp(data_latest_time)
_dateTime = "{}年{}季度".format(_date.year, _date.quarter)
return _dateTime
case "年值":
_date = pd.Timestamp(data_latest_time)
_dateTime = "{}年".format(_date.year)
return _dateTime
case _:
_date = pd.Timestamp(data_latest_time)
_dateTime = "{}年{}月{}日".format(_date.year, _date.month, _date.day)
return _dateTime
@staticmethod
def _handle_element(element):
_name_attribute = None
_data_year_over_year_diff = None
_currency = None
_unit = None
if element['names']['name_attribute_json'] is not None:
_name_attribute = "(统计口径:{})".format(element['names']['name_attribute_json'][0])
if element['currencies'] is not None:
_currency = element['currencies']['currency_json'][0]
if element['units'] is not None:
_unit = element['units']['unit_json'][0]
if element['data_year_over_year'] is not None:
data_year_over_year = element['data_year_over_year']['data_latest_value']
data_year_over_year_fixed = element['data_year_over_year_fixed']
_data_year_over_year_diff = round(data_year_over_year - data_year_over_year_fixed, 2)
if _data_year_over_year_diff is not None and _data_year_over_year_diff > 0:
_data_year_over_year_diff = ",同比:+{}%。".format(_data_year_over_year_diff)
else:
_data_year_over_year_diff = ",同比:{}%。".format(_data_year_over_year_diff)
_data_latest_time = element['data']['data_latest_time']
_name_property_json = element['names']['name_property_json'][0]
_dateTime = ToolWwwGarinassetCom._handle_datetime(data_latest_time=_data_latest_time,
name_property_json=_name_property_json)
element_overview = "{},{}{}{}为{}{}{}{}".format(
_dateTime,
element['regions']['region_json'][0],
element['names']['name_json'][0],
_name_attribute if _name_attribute else "",
round(element['data']['data_latest_value'], 2),
_unit if _unit else "",
_currency if _currency else "",
_data_year_over_year_diff if _data_year_over_year_diff else "。",
)
return element_overview
@staticmethod
def get_indicator_overview(region: str, name: str) -> ResponseBase:
response_tool = ResponseBase(answer="", source="嘉林数据")
# API请求
query = "{} {}".format(region, name)
url = "https://api.garinasset.com/www/v1/searches/indicators"
params = {"q": query}
response_api = None
try:
response_api = requests.get(url, params=params)
# 检查响应状态码,如果不是 2xx,会引发异常
response_api.raise_for_status()
# 返回响应的 JSON 数据
response_api = response_api.json()
indicator_overview = ""
data_list = response_api['data']
if data_list:
for index, element in enumerate(data_list):
if index == 0:
indicator_overview = ToolWwwGarinassetCom._handle_element(element)
elif index == 1:
indicator_overview = indicator_overview + "\n\n" + "相关数据:\n" + "\n" + ToolWwwGarinassetCom._handle_element(
element)
elif index == 6:
break
else:
indicator_overview = indicator_overview + "\n\n" + ToolWwwGarinassetCom._handle_element(element)
response_tool.answer = indicator_overview
return response_tool
else:
response_tool.answer = "亲爱的,我无法获取该项数据信息,大概是数据商尚没有收录该数据。\n\n当然也可能是我错误理解了你的问题。"
return response_tool
except requests.exceptions.HTTPError:
_status_code = response_api.status_code
if _status_code == 422:
logger.warning(f"Exception(requests.exceptions.HTTPError{_status_code}) was encountered when get_indicator_overview({region},{name})")
response_tool.answer = "亲爱的,我无法提供相关的数据服务,你是否需要修改问题呢?"
return response_tool
elif _status_code == 401:
logger.warning(f"Exception(requests.exceptions.HTTPError[{_status_code}]) was encountered when get_indicator_overview({region},{name})")
response_tool.answer = "亲爱的,你没有嘉林数据的访问权限,暂时无法给你提供数据响应。"
return response_tool
elif _status_code >= 500:
logger.warning(f"Exception(requests.exceptions.HTTPError[{_status_code}]) was encountered when get_indicator_overview({region},{name})")
response_tool.answer = "亲爱的,宏微观经济数据库正在升级,暂时无法给你提供响应。"
return response_tool
else:
logger.warning(f"Exception(requests.exceptions.HTTPError[{_status_code}]) was encountered when get_indicator_overview({region},{name})")
response_tool.answer = "亲爱的,我遇到了未知的网络故障,这需要一定的处理时间。"
return response_tool
except requests.exceptions.ConnectionError as e:
logger.warning(f"Exception(requests.exceptions.ConnectionError was encountered when get_indicator_overview({region},{name})")
response_tool.answer = "亲爱的,我可能失去了宏微观经济数据库服务的网络连接。"
return response_tool
except requests.exceptions.RequestException:
logger.warning(f"Exception(requests.exceptions.RequestException was encountered when get_indicator_overview({region},{name})")
response_tool.answer = "亲爱的,我明白我现在的处境,程序运行发生了故障哦。"
return response_tool
# Path: ai/openai/tools/XUEQIU_COM.py
class ToolXueqiuCom(object):
TOOL_MODEL = {
# 工具模型类型,必填,目前API支持 function
"type": "function",
"function": {
# 函数名称,必填
"name": "get_stock",
# 函数描述,必填
"description": "获取给定股票代码的股票信息,如果获取不到股票代码询问市场及代码。",
# 函数参数,必填。
"parameters": {
"type": "object",
# JSON键类型,及描述。
"properties": {
"name": {
"type": "string",
"description": "股票名称",
},
"symbol": {
"type": "string",
"description": "股票代码,上海市场前缀有SH,例如:SH600104;深圳市场前有缀SZ,例如:SZ300315;香港市场代码为五位数字,没有前缀,不足五位前面补'0',例如腾讯代码是‘00700’;其他市场保持原样,例如美国市场苹果:AAPL。",
},
},
"required": ["name", "symbol"],
},
}
}
@staticmethod
@timeout_decorator.timeout(8)
def get_stock(name: str, symbol: str) -> ResponseBase:
response_tool = ResponseBase(answer="", source="xueqiu•com")
_stock = ''
try:
_quote = pysnowball.quotec(symbols=symbol)
_quoteD = _quote['data'][0]
_xueQiu = XueQiu.model_construct()
if _quoteD:
_xueQiu = XueQiu(**_quoteD)
_percent = ''
if _xueQiu.percent:
_percent = ("下跌" + str(_xueQiu.percent) + '%,') if _xueQiu.percent <0 else ("上涨+" + str(_xueQiu.percent) + '%,')
else:
_percent = ''
_stock = "{},代码{}。{}{}{}{}{}{}{}".format(
name,
_xueQiu.symbol,
("当前股价" + str(_xueQiu.current)) if _xueQiu.current else '当前股价未获得',
'(交易中),' if _xueQiu.is_trade else '(不在交易状态),',
_percent,
("成交金额" + str(_xueQiu.amount)+ '。') if _xueQiu.amount else '。',
("目前总市值" + str(_xueQiu.market_capital) + ',') if _xueQiu.market_capital else '',
("流动市值"+ str(_xueQiu.float_market_capital) + '。') if _xueQiu.float_market_capital else '',
("该股票年初至今涨跌" + str(_xueQiu.current_year_percent) + '%。') if _xueQiu.current_year_percent else '',
)
else:
raise ValueError
response_tool.answer = _stock
except ValueError:
logger.warning(f'Exception(ValueError) was encountered when get_stock({symbol})')
response_tool.answer = "亲爱的,你能告诉我股票所在市场以及股票代码吗?这样可以增进我的理解。"
except timeout_decorator.TimeoutError:
logger.warning(f'Exception(timeout_decorator.TimeoutError) was encountered when get_stock({symbol})')
response_tool.answer = "亲爱的,访问xueqiu•com服务超时,请在使用过程中保持节制。"
return response_tool
# Path: config/settings.py
OPENAI_TOOLS_CONFIG = [
{"enable": True, "Tool": ToolWwwGarinassetCom, "description": "嘉林数据-宏微观经济数据库"},
{"enable": True, "Tool": ToolWttrIn, "description": "wttr.in-天气"},
{"enable": True, "Tool": ToolTime, "description": "time-时间"},
{"enable": True, "Tool": ToolXueqiuCom, "description": "xueqiu.com-股票"},
]
# Path: models/response.py
class ResponseBase(BaseModel):
answer: str
source: str
# Path: ai/openai/tools/tools.py
from ai.openai.tools.TOOL_TIME import ToolTime
from ai.openai.tools.WTTR_IN import ToolWttrIn
from ai.openai.tools.WWW_GARINASSET_COM import ToolWwwGarinassetCom
from ai.openai.tools.XUEQIU_COM import ToolXueqiuCom
from config.settings import OPENAI_TOOLS_CONFIG
from models.response import ResponseBase
class OpenAITools:
@staticmethod
def get_tools() -> list:
tools = []
for tool_config in OPENAI_TOOLS_CONFIG:
if tool_config["enable"]:
tool_class = tool_config["Tool"]
| tools.append(tool_class.TOOL_MODEL) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ruudjuffermans/Event-Driven-Backtester
# Path: backtester/loop.py
class Loop:
def __init__(
self,
data_handler,
execution_handler,
portfolio,
strategy,
heartbeat,
):
self.heartbeat = heartbeat
self.data_handler = data_handler
self.execution_handler = execution_handler
self.portfolio = portfolio
self.strategy = strategy
self.events = queue.Queue()
self.signals = 0
self.orders = 0
self.fills = 0
self.num_strats = 1
self._set_datahandler()
self._set_portfolio()
self._set_execution_handler()
self._set_strategy()
def _set_datahandler(self):
if isinstance(self.data_handler, CSVGenerator):
self.data_handler.register(self.events)
else:
raise NotImplementedError("Data feed not implemented")
def _set_strategy(self):
self.strategy.register(self.data_handler, self.events)
def _set_portfolio(self):
self.portfolio.register(self.data_handler, self.events)
def _set_execution_handler(self):
self.execution_handler.register(self.events)
def _run_backtest(self):
"""
Executes the backtest.
"""
while True:
if self.data_handler.continue_backtest:
self.data_handler.update_bars()
else:
break
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
if event is not None:
if isinstance(event, MarketEvent):
self.strategy.calculate(event)
self.portfolio.update_timeindex(event)
elif isinstance(event, SignalEvent):
self.signals += 1
self.portfolio.update_signal(event)
elif isinstance(event, OrderEvent):
self.orders += 1
self.execution_handler.execute_order(event)
elif isinstance(event, FillEvent):
self.fills += 1
self.portfolio.update_fill(event)
time.sleep(self.heartbeat)
def _output_performance(self):
"""
Outputs the strategy performance from the backtest.
"""
self.portfolio.create_equity_curve_dataframe()
print("Creating summary stats...")
stats = self.portfolio.output_summary_stats()
print("Creating equity curve...")
print(self.portfolio.equity_curve.tail(10))
pprint.pprint(stats)
print("Signals: %s" % self.signals)
print("Orders: %s" % self.orders)
print("Fills: %s" % self.fills)
def start(self):
"""
Simulates the backtest and outputs portfolio performance.
"""
self._run_backtest()
self._output_performance()
# Path: backtester/generator/csvgenerator.py
class CSVGenerator(Generator):
def __init__(self, symbol_list):
self.csv_dir = Path.cwd() / "data"
self.symbol_list = symbol_list
self.symbol_data = {}
self.latest_symbol_data = {}
self.continue_backtest = True
self._load()
def register(self, events):
self.events = events
def _load(self):
combined_index = None
for symbol in self.symbol_list:
self.symbol_data[symbol] = pd.io.parsers.read_csv(
os.path.join(self.csv_dir, "%s.csv" % symbol),
header=0,
index_col=0,
names=[
"datetime",
"open",
"high",
"low",
"close",
"adj_close",
"volume",
],
)
# Combine the index to pad forward values
if combined_index is None:
combined_index = self.symbol_data[symbol].index
else:
combined_index.union(self.symbol_data[symbol].index)
# Set the latest symbol_data to None
self.latest_symbol_data[symbol] = []
# Reindex the dataframes
for symbol in self.symbol_list:
self.symbol_data[symbol] = (
self.symbol_data[symbol]
.reindex(index=combined_index, method="pad")
.iterrows()
)
def _get_new_bar(self, symbol):
for bar in self.symbol_data[symbol]:
yield bar
def get_latest_bar(self, symbol):
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return bars_list[-1]
def get_latest_bars(self, symbol, N=1):
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return bars_list[-N:]
def get_latest_bar_datetime(self, symbol):
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return bars_list[-1][0]
def get_latest_bar_value(self, symbol, value_type):
try:
bars_list = self.latest_symbol_data[symbol]
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return getattr(bars_list[-1][1], value_type)
def get_latest_bars_values(self, symbol, value_type, N=1):
try:
bars_list = self.get_latest_bars(symbol, N)
except KeyError:
print("That symbol is not available in the historical data set.")
raise
else:
return np.array([getattr(bar[1], value_type) for bar in bars_list])
def update_bars(self):
for symbol in self.symbol_list:
try:
bar = next(self._get_new_bar(symbol))
except StopIteration:
self.continue_backtest = False
else:
if bar is not None:
self.latest_symbol_data[symbol].append(bar)
self.events.put(MarketEvent())
# Path: backtester/execution.py
class SimulatedExecutionHandler(ExecutionHandler):
def __init__(self):
pass
def execute_order(self, event):
if isinstance(event, OrderEvent):
fill_event = FillEvent(
datetime.utcnow(),
event.symbol,
"BT",
event.quantity,
event.direction,
None,
)
self.events.put(fill_event)
# Path: backtester/portfolio.py
class Portfolio:
def __init__(self, window, initial_capital=100000.0):
self.start_date = window.start
self.initial_capital = initial_capital
def register(self, bars, events):
self.bars = bars
self.events = events
self.symbol_list = self.bars.symbol_list
self.current_positions = {symbol: 0 for symbol in self.symbol_list}
self.all_positions = self.define_all_positions()
self.all_holdings = self.define_all_holdings()
self.current_holdings = self.define_current_holdings()
def define_all_positions(self):
positions = {symbol: 0 for symbol in self.symbol_list}
positions["datetime"] = self.start_date
return [positions]
def define_all_holdings(self):
holdings = {symbol: 0.0 for symbol in self.symbol_list}
holdings["datetime"] = self.start_date
holdings["cash"] = self.initial_capital
holdings["commission"] = 0.0
holdings["total"] = self.initial_capital
return [holdings]
def define_current_holdings(self):
holdings = {symbol: 0.0 for symbol in self.symbol_list}
holdings["cash"] = self.initial_capital
holdings["commission"] = 0.0
holdings["total"] = self.initial_capital
return holdings
def update_timeindex(self, event):
latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])
positions = {
symbol: self.current_positions[symbol] for symbol in self.symbol_list
}
positions["datetime"] = latest_datetime
self.all_positions.append(positions)
holdings = {symbol: 0.0 for symbol in self.symbol_list}
holdings["datetime"] = latest_datetime
holdings["cash"] = self.current_holdings["cash"]
holdings["commission"] = self.current_holdings["commission"]
holdings["total"] = self.current_holdings["cash"]
for symbol in self.symbol_list:
market_value = self.current_positions[
symbol
] * self.bars.get_latest_bar_value(symbol, "close")
holdings[symbol] = market_value
holdings["total"] += market_value
self.all_holdings.append(holdings)
def update_positions_after_fill(self, fill):
fill_dir = 0
if fill.direction == "BUY":
fill_dir = 1
if fill.direction == "SELL":
fill_dir = -1
self.current_positions[fill.symbol] += fill_dir * fill.quantity
def update_holdings_after_fill(self, fill):
fill_dir = 0
if fill.direction == "BUY":
fill_dir = 1
if fill.direction == "SELL":
fill_dir = -1
fill_cost = self.bars.get_latest_bar_value(fill.symbol, "close")
cost = fill_dir * fill_cost * fill.quantity
self.current_holdings[fill.symbol] += cost
self.current_holdings["commission"] += fill.commission
self.current_holdings["cash"] -= cost + fill.commission
self.current_holdings["total"] -= cost + fill.commission
def update_fill(self, event):
if isinstance(event, FillEvent):
self.update_positions_after_fill(event)
self.update_holdings_after_fill(event)
def generate_naive_order(self, signal):
order = None
symbol = signal.symbol
direction = signal.signal_type
strength = signal.strength
mkt_quantity = floor(100 * strength)
current_quantity = self.current_positions[symbol]
order_type = "MKT"
if direction == "LONG" and current_quantity == 0:
order = OrderEvent(symbol, order_type, mkt_quantity, "BUY")
if direction == "SHORT" and current_quantity == 0:
order = OrderEvent(symbol, order_type, mkt_quantity, "SELL")
if direction == "EXIT" and current_quantity > 0:
order = OrderEvent(symbol, order_type, abs(current_quantity), "SELL")
if direction == "EXIT" and current_quantity < 0:
order = OrderEvent(symbol, order_type, abs(current_quantity), "BUY")
return order
def update_signal(self, event):
if isinstance(event, SignalEvent):
order_event = self.generate_naive_order(event)
self.events.put(order_event)
def create_equity_curve_dataframe(self):
equity_curve = pd.DataFrame(self.all_holdings)
equity_curve.set_index("datetime", inplace=True)
equity_curve["returns"] = equity_curve["total"].pct_change()
equity_curve["equity_curve"] = (1.0 + equity_curve["returns"]).cumprod()
self.equity_curve = equity_curve
def output_summary_stats(self):
total_return = self.equity_curve["equity_curve"][-1]
returns = self.equity_curve["returns"]
pnl = self.equity_curve["equity_curve"]
sharpe_ratio = create_sharpe_ratio(returns, periods=252 * 60 * 6.5)
drawdown, max_dd, max_dd_duration = create_drawdowns(pnl)
self.equity_curve["drawdown"] = drawdown
stats = [
("Total Return", "%0.2f%%" % ((total_return - 1.0) * 100.0)),
("Sharpe Ratio", "%0.2f" % sharpe_ratio),
("Max Drawdown", "%0.2f%%" % (max_dd * 100.0)),
("Max Drawdown Duration", "%d" % max_dd_duration),
]
self.equity_curve.to_csv("equity.csv")
return stats
# Path: backtester/strategy/ma_crossover.py
class MACrossOverStrategy(Strategy):
def __init__(self, short_window=100, long_window=400):
self.short_window = short_window
self.long_window = long_window
def register(self, bars, events):
self.bars = bars
self.events = events
self.symbol_list = self.bars.symbol_list
self.bought = self._calculate_initial_bought()
def _calculate_initial_bought(self):
bought = {symbol: "OUT" for symbol in self.symbol_list}
return bought
def calculate(self, event):
if isinstance(event, MarketEvent):
for symbol in self.symbol_list:
bars = self.bars.get_latest_bars_values(
symbol, "close", N=self.long_window
)
bar_datetime = self.bars.get_latest_bar_datetime(symbol)
if bars is not None and bars != []:
print(bar_datetime)
# long_sma = np.mean( bars[-self.long_window:] )
# dt = datetime.datetime.utcnow()
# signal_type = ""
# strength = 1.0
# if short_sma > long_sma and self.bought[symbol] == "OUT":
# print("LONG position at: %s" % bar_datetime)
# signal_type = "LONG"
# signal = SignalEvent(symbol, dt, signal_type, strength)
# self.events.put(signal)
# self.bought[symbol] = "LONG"
# elif short_sma < long_sma and self.bought[symbol] == "LONG":
# print("SHORT position at: %s" % bar_datetime)
# signal_type = "EXIT"
# signal = SignalEvent(symbol, dt, signal_type, strength)
# self.events.put(signal)
# self.bought[symbol] = "OUT"
# Path: backtester/types.py
class Window:
def __init__(self, start, end, interval):
self.start = start
self.end = end
self.interval = interval
# Path: example.py
from datetime import datetime
from backtester.loop import Loop
from backtester.generator import CSVGenerator
from backtester.execution import SimulatedExecutionHandler
from backtester.portfolio import Portfolio
from backtester.strategy import MACrossOverStrategy
from backtester.types import Window
symbol_list = ["BIG"]
window = Window(
start=datetime(2016, 1, 1, 0, 0, 0),
end=datetime(2021, 1, 1, 0, 0, 0),
interval="1d",
)
generator = CSVGenerator(symbol_list)
portfolio = Portfolio(window, 100000.0)
strategy = MACrossOverStrategy()
execution = SimulatedExecutionHandler()
backtest = Loop(generator, execution, portfolio, strategy, 0.0)
| backtest.start() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Samuel-Effiong/Django-Dynamic-Table
# Path: django_dynamic_table/models.py
class DynamicTable(models.Model):
table_name = models.CharField(_('Table Name'), max_length=255, unique=True)
table_description = models.TextField(_('Table Description'), blank=True)
date_created = models.DateTimeField(_('Date Created'), default=timezone.now)
table_columns = models.ManyToManyField('TableColumn', blank=True)
table_rows = models.ManyToManyField('TableRow', blank=True)
class Meta:
ordering = ('-date_created', )
def __str__(self) -> str:
return f"{self.table_name}"
def __total_table_rows(self) -> int:
field = self.table_columns.first()
if field and isinstance(field, TableColumn):
return self.table_columns.all().count()
else:
# the table is empty
return 0
def __total_table_columns(self) -> int:
return self.table_columns.all().count()
def table_info(self) -> dict[str, int]:
description = {
'rows': self.__total_table_rows(),
'columns': self.__total_table_columns()
}
return description
def is_empty(self) -> bool:
table_info = self.table_info()
rows = table_info['rows']
columns = table_info['columns']
return True if columns == 0 or rows == 0 else False
def is_column(self, column_name: str) -> bool:
if not isinstance(column_name, str):
raise ValueError("column name must be a str")
try:
column = self.table_columns.get(column_name=column_name)
return True
except TableColumn.DoesNotExist:
return False
def get_supported_data_types(self) -> list[str]:
return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__]
def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]:
supported_data_types = self.get_supported_data_types()
if isinstance(data_type, str):
return data_type.lower().strip() in supported_data_types
elif isinstance(data_type, (list, tuple, set)):
return [_type.lower().strip() in supported_data_types for _type in data_type]
else:
raise ValueError('arg must be either a str or a sequence')
def add_column(self, column_name: str, data_type: str):
if isinstance(column_name, str) and isinstance(data_type, str):
if not self.data_type_is_supported(data_type):
raise UnSupportedDataType()
if self.is_column(column_name):
raise DuplicateColumnInTable()
table_column = TableColumn(
table=self,
column_name=column_name,
column_data_type=data_type
)
table_column.save()
self.table_columns.add(table_column)
return table_column
else:
raise DynamicTableError("argument must be str, use self.bulk_add_columns to add multiple columns")
def bulk_add_columns(self, column_names: Sequence[str], data_types: Sequence[str]):
allowed_argument_type = (list, tuple, set)
if isinstance(column_names, allowed_argument_type) and isinstance(data_types, allowed_argument_type):
if len(column_names) != len(data_types):
raise DynamicTableError(f"len({column_names}) = {len(column_names)} != len({data_types}) = {len(data_types)}")
else:
# check if list of data_types contains any unsupported data type
supported_data_type = self.data_type_is_supported(data_types)
if False in supported_data_type:
raise UnSupportedDataType(f"{data_types} data type that are supported are: {supported_data_type}")
else:
# check if the provided column names contain duplicates, raise an error if it does
unique_column_names = set(column_names)
if len(column_names) != len(unique_column_names):
raise DuplicateColumnInTable()
is_column = [self.is_column(column) for column in column_names]
if True in is_column:
raise DuplicateColumnInTable()
columns = [
TableColumn.objects.create(
table=self,
column_name=column_name,
column_data_type=data_type
)
for column_name, data_type in zip(column_names, data_types, strict=True)
# the above further exception should not be activated, but adding it there,
# if just in case, for some unknown reason it escape the other safeguard.
]
self.table_columns.add(*columns)
return columns
else:
raise DynamicTableError("argument must be a sequence. use self.add_column to add a single column")
def add_row(self, value: dict):
if not isinstance(value, dict):
raise ValueError(f"{value} is not a list or a dict")
if self.__total_table_columns() == 0:
raise TableHaveNoColumn()
row = []
table_row = TableRow.objects.create(table=self)
for table_column in self.table_columns.all():
cell_value = value.get(table_column.column_name, "")
cell = CellValue.objects.create(
value=cell_value, table=self,
table_column=table_column,
table_row=table_row
)
row.append(cell)
# add cell to column
table_column.column_cells.add(cell)
# add cell to row
table_row.row_cells.add(*row)
# add row to table
self.table_rows.add(table_row)
return table_row
def bulk_add_rows(self, values: Sequence[dict]) -> list:
if not isinstance(values, (list, tuple, set)):
raise ValueError('values must be a sequence of dict')
rows = []
for row in values:
if not isinstance(row, dict):
raise ValueError('values must be a sequence of dict')
if self.__total_table_columns() == 0:
raise TableHaveNoColumn()
rows.append(self.add_row(row))
return rows
def delete_column(self, column_name):
# using get instead of filter if for some reason the unique parameter
# was disabled in the table column definition, this will doubly ensure
# that the field are unique else it will always raise an error if it
# encounter duplicates column names
if not isinstance(column_name, str):
raise ValueError('column_name must be a str')
try:
column = self.table_columns.get(column_name=column_name)
except TableColumn.MultipleObjectsReturned:
raise DuplicateColumnInTable()
except TableColumn.DoesNotExist:
raise ColumnNotInTable()
else:
# remove column from the table
self.table_columns.remove(column)
# delete the removed column and all the cells associated with it
column.delete()
return column
def delete_row(self, row_index=None):
"""if row_index is None remove the last row"""
if not isinstance(row_index, (int, type(None))):
raise TypeError("Row index value must be an integer")
try:
if row_index is None:
row = self.table_rows.last()
else:
row = self.table_rows.get(pk=row_index)
except TableRow.DoesNotExist:
raise RowNotInTable()
else:
# remove row from the table
self.table_rows.remove(row)
# delete the removed row and all the cells associated with it
row.delete()
return row
def get_cell(self, column_name, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
if not self.is_column(column_name):
raise ColumnNotInTable()
try:
cell = CellValue.objects.get(
table=self,
table_column__column_name=column_name,
table_row_id=row_index
)
return cell
except CellValue.DoesNotExist:
raise CellDoesNotExist
def get_column_cells(self, column_name):
if not self.is_column(column_name):
raise ColumnNotInTable()
column = TableColumn.objects.get(table=self, column_name=column_name)
column_cells = column.column_cells.all()
return list(column_cells)
def get_row_cells(self, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
try:
row = TableRow.objects.get(table=self, id=row_index)
row_cells = row.row_cells.all()
except TableRow.DoesNotExist:
raise RowNotInTable()
return list(row_cells)
# Path: django_dynamic_table/models.py
class TableColumn(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
column_name = models.CharField(max_length=255, unique=True)
column_data_type = models.CharField(max_length=15, choices=__SUPPORTED_DATA_TYPE_CHOICES__)
column_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.column_name}: {self.column_data_type} -- {self.table}"
def _get_column_values(self):
return self.column_cells.all()
# Path: django_dynamic_table/models.py
class TableRow(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
row_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.table} Table: Row no. {self.id}"
def to_dict(self):
values = {
item.column.column_name: item.value
for item in self.row_cells.all()
}
return values
# Path: django_dynamic_table/models.py
class CellValue(models.Model):
"""Synonymous with the cell in a spreadsheet, it contains the value of the
table along with relevant information about it position in the table"""
value = models.TextField(blank=True)
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
table_column = models.ForeignKey(TableColumn, on_delete=models.CASCADE)
table_row = models.ForeignKey(TableRow, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.value
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.full_clean()
super(CellValue, self).save()
def clean(self):
super(CellValue, self).clean()
self.__validate_data_type__(self.value, self.table_column.column_data_type)
def __validate_data_type__(self, value, data_type):
"""
Ensures that the values is saved in the database in the format that
can be easily be converted to the desired data type
"""
if data_type == 'char' or data_type == 'textfield':
self.value = str(value)
elif data_type == 'int':
if not isinstance(value, int):
try:
if value:
self.value = int(float(value))
else:
self.value = ""
except ValueError:
raise CantParseValueToDataType(f"{value} to {data_type}")
elif data_type == 'float':
if not isinstance(value, float):
try:
if value:
self.value = float(value)
else:
self.value = ""
except ValueError:
raise CantParseValueToDataType(f"{value} to {data_type}")
elif data_type == 'datetime':
if value:
# is it a str or a datetime object
if isinstance(value, str):
try:
value = self.value.strip().lower()
value = datetime.fromisoformat(value)
self.value = value.isoformat()
except ValueError:
self.value = ""
else:
try:
self.value = value.isoformat()
except Exception:
self.value = ''
else:
self.value = ""
elif data_type == 'bool':
if value:
if not isinstance(value, bool):
value = str(value).strip().title()
if value == 'True' or value == 'False':
self.value = eval(value)
else:
raise CantParseValueToDataType(f"{value} to {data_type}")
else:
self.value = ""
def get_value(self):
"""Get the value base on the data type
If the data type is of file, it will retrieve the file from where
it was uploaded, else format the value to the data type.
The value should not be accessed directly.
"""
data_type = self.table_column.column_data_type
if data_type == 'char' or data_type == 'textfield':
return self.value
elif data_type == 'int':
try:
return int(float(self.value))
except ValueError:
return self.value
elif data_type == 'float':
try:
return float(self.value)
except ValueError:
return self.value
elif data_type == 'bool':
try:
# FIXME: Put more restrictions on this
return eval(self.value)
except Exception:
return self.value
elif data_type == 'datetime':
try:
return datetime.fromisoformat(self.value)
except ValueError:
return self.value
# Path: django_dynamic_table/errors.py
class TableHaveNoRow(DynamicTableError):
pass
# Path: django_dynamic_table/errors.py
class TableHaveNoColumn(DynamicTableError):
pass
# Path: django_dynamic_table/errors.py
class ColumnNotInTable(DynamicTableError):
pass
# Path: django_dynamic_table/errors.py
class RowNotInTable(DynamicTableError):
pass
# Path: django_dynamic_table/errors.py
class DuplicateColumnInTable(DynamicTableError):
pass
# Path: django_dynamic_table/errors.py
class DynamicTableError(Exception):
pass
# Path: django_dynamic_table/errors.py
class UnSupportedDataType(TableColumnError):
pass
# Path: django_dynamic_table/errors.py
class CantParseValueToDataType(CellValueError):
pass
# Path: django_dynamic_table/errors.py
class CellDoesNotExist(CellValueError):
pass
# Path: django_dynamic_table/tests.py
import datetime
from typing import List
from django.test import TestCase
from django.utils import timezone
from .models import DynamicTable, TableColumn, TableRow, CellValue
from .errors import (
TableHaveNoRow, TableHaveNoColumn, ColumnNotInTable,
RowNotInTable, DuplicateColumnInTable, DynamicTableError,
UnSupportedDataType, CantParseValueToDataType, CellDoesNotExist
)
# Create your tests here.
class DynamicTableTest(TestCase):
def setUp(self) -> None:
self.name = 'Employee Records'
self.description = "Contains company employee personal information"
self.date_created = timezone.now().date()
self.column_name = 'First Name'
self.data_type = 'char'
self.supported_data_type = ['int', 'char', 'textfield', 'float', 'bool', 'date']
self.table = DynamicTable(
table_name=self.name,
table_description=self.description
)
self.table.save()
def test_table_creation_with_no_columns_and_rows(self):
self.assertEqual(self.name, str(self.table))
self.assertEqual(self.description, self.table.table_description)
self.assertEqual(self.date_created, self.table.date_created.date())
default_value = {
'rows': 0,
'columns': 0
}
self.assertDictEqual(default_value, self.table.table_info())
# Delete columns test
self.assertRaises(ColumnNotInTable, self.table.delete_column, column_name='Name')
# Delete rows test
self.assertRaises(RowNotInTable, self.table.delete_row, row_index=1)
self.assertRaises(TypeError, self.table.delete_row, row_index='1')
self.assertTrue(self.table.is_empty())
# ensures that rows can't be added to an empty table
self.assertRaises(TableHaveNoColumn, self.table.add_row, value={})
self.assertRaises(ValueError, self.table.add_row, value='love')
self.assertRaises(ValueError, self.table.add_row, value=[1, 2, 3])
self.assertRaises(ValueError, self.table.add_row, value=(1, 2, 3))
self.assertRaises(TableHaveNoColumn, self.table.bulk_add_rows, values=[{}, {}])
self.assertRaises(ValueError, self.table.bulk_add_rows, values={})
self.assertRaises(ValueError, self.table.bulk_add_rows, values='love')
self.assertRaises(ValueError, self.table.bulk_add_rows, values=(1, 2))
self.assertRaises(ValueError, self.table.bulk_add_rows, values=[1, '2'])
def test_supported_data_types(self):
self.assertListEqual(sorted(self.supported_data_type), sorted(self.table.get_supported_data_types()))
| self.assertTrue(self.table.data_type_is_supported(' CHAR'))
|
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: zhcui/polar_preview
# Path: polar/utils/misc.py
def mdot(*args):
"""
Reduced matrix dot.
"""
return reduce(np.dot, args)
# Path: polar/utils/misc.py
def kdot(a, b):
"""
Matrix dot with kpoints.
"""
ka, s1_a, _ = a.shape
kb, _, s2_b = b.shape
assert ka == kb
res = np.zeros((ka, s1_a, s2_b), dtype=np.result_type(a.dtype, b.dtype))
for k in range(ka):
np.dot(a[k], b[k], out=res[k])
return res
# Path: polar/utils/misc.py
def get_spin_dim(arrays, non_spin_dim=3):
"""
Get spin dimension for a list of arrays.
"""
spin = 1
for a in arrays:
a = np.asarray(a)
if a.ndim == non_spin_dim:
continue
elif a.ndim == non_spin_dim + 1:
spin = max(spin, a.shape[0])
else:
raise ValueError
return spin
# Path: polar/utils/misc.py
def add_spin_dim(H, spin, non_spin_dim=3):
"""
Add an additional dimension to array H.
"""
H = np.asarray(H)
if H.ndim == non_spin_dim:
H = H[None]
assert H.ndim == (non_spin_dim + 1)
if H.shape[0] < spin:
H = np.asarray((H[0],) * spin)
return H
# Path: polar/basis/trans_1e.py
import numpy as np
import scipy.linalg as la
from polar.utils.misc import (mdot, kdot, get_spin_dim, add_spin_dim)
dm_ao_ao[s] = mdot(C_ao_mo[s], dm_mo_mo[s], C_ao_mo[s].conj().T)
return dm_ao_ao
def trans_h1_to_mo_mol(h_ao_ao, C_ao_mo):
h_ao_ao = np.asarray(h_ao_ao)
C_ao_mo = np.asarray(C_ao_mo)
return trans_rdm1_to_ao_mol(h_ao_ao, np.swapaxes(C_ao_mo.conj(), -1, -2))
trans_h1_to_lo_mol = trans_h1_to_mo_mol
def trans_rdm1_to_mo_mol(rdm1_ao_ao, C_ao_mo, ovlp):
rdm1_ao_ao = np.asarray(rdm1_ao_ao)
C_ao_mo = np.asarray(C_ao_mo)
nao, nmo = C_ao_mo.shape[-2:]
ovlp = np.asarray(ovlp)
# spin should be encoded in C_ao_mo,
# rdm1_ao_ao may be spin unrelated
if C_ao_mo.ndim < rdm1_ao_ao.ndim:
C_ao_mo = add_spin_dim(C_ao_mo, rdm1_ao_ao.shape[0], non_spin_dim=2)
if C_ao_mo.ndim == 2:
C_inv = C_ao_mo.conj().T.dot(ovlp)
rdm1_mo_mo = mdot(C_inv, rdm1_ao_ao, C_inv.conj().T)
else:
spin = C_ao_mo.shape[0]
rdm1_ao_ao = add_spin_dim(rdm1_ao_ao, spin, non_spin_dim=2)
assert(rdm1_ao_ao.ndim == C_ao_mo.ndim)
rdm1_mo_mo = np.zeros((spin, nmo, nmo), dtype=C_ao_mo.dtype)
for s in range(spin):
if ovlp.ndim == 3:
C_inv = C_ao_mo[s].conj().T.dot(ovlp[s])
else:
C_inv = C_ao_mo[s].conj().T.dot(ovlp)
rdm1_mo_mo[s] = mdot(C_inv, rdm1_ao_ao[s], C_inv.conj().T)
return rdm1_mo_mo
trans_rdm1_to_lo_mol = trans_rdm1_to_mo_mol
def trans_rdm2_to_ao_mol(rdm2_mo, C_ao_mo, aabbab=True):
r"""
Transform rdm2 to ao basis. [For molecular calculations, no kpts]
\gamma^{AO} = C C \rdm2^{MO} C^{\dagger} C^{\dagger}
NOTE assume aaaa, bbbb, aabb order
"""
rdm2_mo = np.asarray(rdm2_mo)
C_ao_mo = np.asarray(C_ao_mo)
# spin should be encoded in C_ao_mo,
# rdm2_mo may be spin unrelated
if C_ao_mo.ndim == 2 and rdm2_mo.ndim == 5:
C_ao_mo = add_spin_dim(C_ao_mo, 2, non_spin_dim=2)
if C_ao_mo.ndim == 2:
rdm2_ao = _trans_rdm2_to_ao_mol(rdm2_mo, C_ao_mo)
else:
spin = rdm2_mo.shape[0]
nao = C_ao_mo.shape[-2]
rdm2_ao = np.zeros((spin, nao, nao, nao, nao), dtype=rdm2_mo.dtype)
# ZHC NOTE assume aaaa, aabb, bbbb order
if spin == 1:
rdm2_ao[0] = _trans_rdm2_to_ao_mol(rdm2_mo[0], C_ao_mo[0])
elif spin == 3:
if aabbab:
# aaaa
rdm2_ao[0] = _trans_rdm2_to_ao_mol(rdm2_mo[0], C_ao_mo[0])
# bbbb
rdm2_ao[1] = _trans_rdm2_to_ao_mol(rdm2_mo[1], C_ao_mo[1])
# aabb
rdm2_ao[2] = _trans_rdm2_to_ao_mol(rdm2_mo[2], C_ao_mo[0], C_ao_mo[1])
else:
# aaaa
rdm2_ao[0] = _trans_rdm2_to_ao_mol(rdm2_mo[0], C_ao_mo[0])
# aabb
rdm2_ao[1] = _trans_rdm2_to_ao_mol(rdm2_mo[1], C_ao_mo[0], C_ao_mo[1])
# bbbb
rdm2_ao[2] = _trans_rdm2_to_ao_mol(rdm2_mo[2], C_ao_mo[1])
else:
raise ValueError
return rdm2_ao
def _trans_rdm2_to_ao_mol(rdm2_mo, C_a, C_b=None):
if C_b is None:
C_b = C_a
assert C_a.shape == C_b.shape
#nao, nmo = C_a.shape[-2:]
## (M1M2|M3M4) -> (A1M2|M3M4)
#rdm2_ao = np.dot(C_a, rdm2_mo.reshape(nmo,-1))
## (A1M2|M3M4) -> (A1M2|M3B4)
#rdm2_ao = np.dot(rdm2_ao.reshape(-1,nmo), C_b.conj().T)
## (A1M2|M3B4) -> (M3B4|A1M2)
#rdm2_ao = rdm2_ao.reshape((nao,nmo,nmo,nao)).transpose(2,3,0,1)
## (M3B4|A1M2) -> (B3B4|A1M2)
#rdm2_ao = np.dot(C_b, rdm2_ao.reshape(nmo,-1))
## (B3B4|A1M2) -> (B3B4|A1A2)
#rdm2_ao = np.dot(rdm2_ao.reshape(-1,nmo), C_a.conj().T)
## (B3B4|A1A2) -> (A1A2|B3B4)
#rdm2_ao = rdm2_ao.reshape([nao]*4).transpose((2,3,0,1))
rdm2_ao = np.einsum("ijkl, pi, qj, rk, sl -> pqrs", rdm2_mo,
C_a.conj(), C_a, C_b.conj(), C_b, optimize=True)
return rdm2_ao
# *****************************************************************************
# basis rotation related
# *****************************************************************************
def tile_u_matrix(u_val, u_virt=None, u_core=None):
r"""
Tile the u matrix from different subspaces.
u has shape (nkpts, nmo, nlo)
return C_mo_lo.
Args:
u_val: valence
u_virt: virtual
| u_core: core |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: YaoFANGUK/video-subtitle-remover
# Path: backend/tools/train/trainer_sttn.py
class Trainer:
def __init__(self, config, debug=False):
# 训练器初始化
self.config = config # 保存配置信息
self.epoch = 0 # 当前训练所处的epoch
self.iteration = 0 # 当前训练迭代次数
if debug:
# 如果是调试模式,设置更频繁的保存和验证频率
self.config['trainer']['save_freq'] = 5
self.config['trainer']['valid_freq'] = 5
self.config['trainer']['iterations'] = 5
# 设置数据集和数据加载器
self.train_dataset = Dataset(config['data_loader'], split='train', debug=debug) # 创建训练集对象
self.train_sampler = None # 初始化训练集采样器为None
self.train_args = config['trainer'] # 训练过程参数
if config['distributed']:
# 如果是分布式训练,则初始化分布式采样器
self.train_sampler = DistributedSampler(
self.train_dataset,
num_replicas=config['world_size'],
rank=config['global_rank']
)
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.train_args['batch_size'] // config['world_size'],
shuffle=(self.train_sampler is None), # 如果没有采样器则进行打乱
num_workers=self.train_args['num_workers'],
sampler=self.train_sampler
)
# 设置损失函数
self.adversarial_loss = AdversarialLoss(type=self.config['losses']['GAN_LOSS']) # 对抗性损失
self.adversarial_loss = self.adversarial_loss.to(self.config['device']) # 将损失函数转移到相应设备
self.l1_loss = nn.L1Loss() # L1损失
# 初始化生成器和判别器模型
self.netG = InpaintGenerator() # 生成网络
self.netG = self.netG.to(self.config['device']) # 转移到设备
self.netD = Discriminator(
in_channels=3, use_sigmoid=config['losses']['GAN_LOSS'] != 'hinge'
)
self.netD = self.netD.to(self.config['device']) # 判别网络
# 初始化优化器
self.optimG = torch.optim.Adam(
self.netG.parameters(), # 生成器参数
lr=config['trainer']['lr'], # 学习率
betas=(self.config['trainer']['beta1'], self.config['trainer']['beta2'])
)
self.optimD = torch.optim.Adam(
self.netD.parameters(), # 判别器参数
lr=config['trainer']['lr'], # 学习率
betas=(self.config['trainer']['beta1'], self.config['trainer']['beta2'])
)
self.load() # 加载模型
if config['distributed']:
# 如果是分布式训练,则使用分布式数据并行包装器
self.netG = DDP(
self.netG,
device_ids=[self.config['local_rank']],
output_device=self.config['local_rank'],
broadcast_buffers=True,
find_unused_parameters=False
)
self.netD = DDP(
self.netD,
device_ids=[self.config['local_rank']],
output_device=self.config['local_rank'],
broadcast_buffers=True,
find_unused_parameters=False
)
# 设置日志记录器
self.dis_writer = None # 判别器写入器
self.gen_writer = None # 生成器写入器
self.summary = {} # 存放摘要统计
if self.config['global_rank'] == 0 or (not config['distributed']):
# 如果不是分布式训练或者为分布式训练的主节点
self.dis_writer = SummaryWriter(
os.path.join(config['save_dir'], 'dis')
)
self.gen_writer = SummaryWriter(
os.path.join(config['save_dir'], 'gen')
)
# 获取当前学习率
def get_lr(self):
return self.optimG.param_groups[0]['lr']
# 调整学习率
def adjust_learning_rate(self):
# 计算衰减的学习率
decay = 0.1 ** (min(self.iteration, self.config['trainer']['niter_steady']) // self.config['trainer']['niter'])
new_lr = self.config['trainer']['lr'] * decay
# 如果新的学习率和当前学习率不同,则更新优化器中的学习率
if new_lr != self.get_lr():
for param_group in self.optimG.param_groups:
param_group['lr'] = new_lr
for param_group in self.optimD.param_groups:
param_group['lr'] = new_lr
# 添加摘要信息
def add_summary(self, writer, name, val):
# 添加并更新统计信息,每次迭代都累加
if name not in self.summary:
self.summary[name] = 0
self.summary[name] += val
# 每100次迭代记录一次
if writer is not None and self.iteration % 100 == 0:
writer.add_scalar(name, self.summary[name] / 100, self.iteration)
self.summary[name] = 0
# 加载模型netG and netD
def load(self):
model_path = self.config['save_dir'] # 模型的保存路径
# 检测是否存在最近的模型检查点
if os.path.isfile(os.path.join(model_path, 'latest.ckpt')):
# 读取最后一个epoch的编号
latest_epoch = open(os.path.join(
model_path, 'latest.ckpt'), 'r').read().splitlines()[-1]
else:
# 如果不存在latest.ckpt,尝试读取存储好的模型文件列表,获取最近的一个
ckpts = [os.path.basename(i).split('.pth')[0] for i in glob.glob(
os.path.join(model_path, '*.pth'))]
ckpts.sort() # 排序模型文件,以获取最近的一个
latest_epoch = ckpts[-1] if len(ckpts) > 0 else None # 获取最近的epoch值
if latest_epoch is not None:
# 拼接得到生成器和判别器的模型文件路径
gen_path = os.path.join(
model_path, 'gen_{}.pth'.format(str(latest_epoch).zfill(5)))
dis_path = os.path.join(
model_path, 'dis_{}.pth'.format(str(latest_epoch).zfill(5)))
opt_path = os.path.join(
model_path, 'opt_{}.pth'.format(str(latest_epoch).zfill(5)))
# 如果是主节点,输出加载模型的信息
if self.config['global_rank'] == 0:
print('Loading model from {}...'.format(gen_path))
# 加载生成器模型
data = torch.load(gen_path, map_location=self.config['device'])
self.netG.load_state_dict(data['netG'])
# 加载判别器模型
data = torch.load(dis_path, map_location=self.config['device'])
self.netD.load_state_dict(data['netD'])
# 加载优化器状态
data = torch.load(opt_path, map_location=self.config['device'])
self.optimG.load_state_dict(data['optimG'])
self.optimD.load_state_dict(data['optimD'])
# 更新当前epoch和迭代次数
self.epoch = data['epoch']
self.iteration = data['iteration']
else:
# 如果没有找到模型文件,则输出警告信息
if self.config['global_rank'] == 0:
print('Warning: There is no trained model found. An initialized model will be used.')
# 保存模型参数,每次评估周期 (eval_epoch) 调用一次
def save(self, it):
# 只在全局排名为0的进程上执行保存操作,通常代表主节点
if self.config['global_rank'] == 0:
# 生成保存生成器模型状态字典的文件路径
gen_path = os.path.join(
self.config['save_dir'], 'gen_{}.pth'.format(str(it).zfill(5)))
# 生成保存判别器模型状态字典的文件路径
dis_path = os.path.join(
self.config['save_dir'], 'dis_{}.pth'.format(str(it).zfill(5)))
# 生成保存优化器状态字典的文件路径
opt_path = os.path.join(
self.config['save_dir'], 'opt_{}.pth'.format(str(it).zfill(5)))
# 打印消息表示模型正在保存
print('\nsaving model to {} ...'.format(gen_path))
# 判断模型是否是经过DataParallel或DDP包装的,若是则获取原始的模型
if isinstance(self.netG, torch.nn.DataParallel) or isinstance(self.netG, DDP):
netG = self.netG.module
netD = self.netD.module
else:
netG = self.netG
netD = self.netD
# 保存生成器和判别器的模型参数
torch.save({'netG': netG.state_dict()}, gen_path)
torch.save({'netD': netD.state_dict()}, dis_path)
# 保存当前的epoch、迭代次数和优化器的状态
torch.save({
'epoch': self.epoch,
'iteration': self.iteration,
'optimG': self.optimG.state_dict(),
'optimD': self.optimD.state_dict()
}, opt_path)
# 写入最新的迭代次数到"latest.ckpt"文件
os.system('echo {} > {}'.format(str(it).zfill(5),
os.path.join(self.config['save_dir'], 'latest.ckpt')))
# 训练入口
def train(self):
# 初始化进度条范围
pbar = range(int(self.train_args['iterations']))
# 如果是全局rank 0的进程,则设置显示进度条
if self.config['global_rank'] == 0:
pbar = tqdm(pbar, initial=self.iteration, dynamic_ncols=True, smoothing=0.01)
# 开始训练循环
while True:
self.epoch += 1 # epoch计数增加
if self.config['distributed']:
# 如果是分布式训练,则对采样器进行设置,保证每个进程获取的数据不同
self.train_sampler.set_epoch(self.epoch)
# 调用训练一个epoch的函数
self._train_epoch(pbar)
# 如果迭代次数超过配置中的迭代上限,则退出循环
if self.iteration > self.train_args['iterations']:
break
# 训练结束输出
print('\nEnd training....')
# 每个训练周期处理输入并计算损失
def _train_epoch(self, pbar):
device = self.config['device'] # 获取设备信息
# 遍历数据加载器中的数据
for frames, masks in self.train_loader:
# 调整学习率
self.adjust_learning_rate()
# 迭代次数+1
self.iteration += 1
# 将frames和masks转移到设备上
frames, masks = frames.to(device), masks.to(device)
b, t, c, h, w = frames.size() # 获取帧和蒙版的尺寸
masked_frame = (frames * (1 - masks).float()) # 应用蒙版到图像
pred_img = self.netG(masked_frame, masks) # 使用生成器生成填充图像
# 调整frames和masks的维度以符合网络的输入要求
frames = frames.view(b * t, c, h, w)
masks = masks.view(b * t, 1, h, w)
comp_img = frames * (1. - masks) + masks * pred_img # 生成最终的组合图像
gen_loss = 0 # 初始化生成器损失
dis_loss = 0 # 初始化判别器损失
# 判别器对抗性损失
real_vid_feat = self.netD(frames) # 判别器对真实图像判别
fake_vid_feat = self.netD(comp_img.detach()) # 判别器对生成图像判别,注意detach是为了不计算梯度
dis_real_loss = self.adversarial_loss(real_vid_feat, True, True) # 真实图像的损失
dis_fake_loss = self.adversarial_loss(fake_vid_feat, False, True) # 生成图像的损失
dis_loss += (dis_real_loss + dis_fake_loss) / 2 # 求平均的判别器损失
# 添加判别器损失到摘要
self.add_summary(self.dis_writer, 'loss/dis_vid_fake', dis_fake_loss.item())
self.add_summary(self.dis_writer, 'loss/dis_vid_real', dis_real_loss.item())
# 优化判别器
self.optimD.zero_grad()
dis_loss.backward()
self.optimD.step()
# 生成器对抗性损失
gen_vid_feat = self.netD(comp_img)
gan_loss = self.adversarial_loss(gen_vid_feat, True, False) # 生成器的对抗损失
gan_loss = gan_loss * self.config['losses']['adversarial_weight'] # 权重放大
gen_loss += gan_loss # 累加到生成器损失
# 添加生成器对抗性损失到摘要
self.add_summary(self.gen_writer, 'loss/gan_loss', gan_loss.item())
# 生成器L1损失
hole_loss = self.l1_loss(pred_img * masks, frames * masks) # 只计算有蒙版区域的损失
# 考虑蒙版的平均值,乘以配置中的hole_weight
hole_loss = hole_loss / torch.mean(masks) * self.config['losses']['hole_weight']
gen_loss += hole_loss # 累加到生成器损失
# 添加hole_loss到摘要
self.add_summary(self.gen_writer, 'loss/hole_loss', hole_loss.item())
# 计算蒙版外区域的L1损失
valid_loss = self.l1_loss(pred_img * (1 - masks), frames * (1 - masks))
# 考虑非蒙版区的平均值,乘以配置中的valid_weight
valid_loss = valid_loss / torch.mean(1 - masks) * self.config['losses']['valid_weight']
gen_loss += valid_loss # 累加到生成器损失
# 添加valid_loss到摘要
self.add_summary(self.gen_writer, 'loss/valid_loss', valid_loss.item())
# 生成器优化
self.optimG.zero_grad()
gen_loss.backward()
self.optimG.step()
# 控制台日志输出
if self.config['global_rank'] == 0:
pbar.update(1) # 进度条更新
pbar.set_description(( # 设置进度条描述
f"d: {dis_loss.item():.3f}; g: {gan_loss.item():.3f};" # 打印损失数值
f"hole: {hole_loss.item():.3f}; valid: {valid_loss.item():.3f}")
)
# 模型保存
if self.iteration % self.train_args['save_freq'] == 0:
self.save(int(self.iteration // self.train_args['save_freq']))
# 迭代次数终止判断
if self.iteration > self.train_args['iterations']:
break
# Path: backend/tools/train/utils_sttn.py
def get_world_size():
"""Find OMPI world size without calling mpi functions
:rtype: int
"""
if os.environ.get('PMI_SIZE') is not None:
return int(os.environ.get('PMI_SIZE') or 1)
elif os.environ.get('OMPI_COMM_WORLD_SIZE') is not None:
return int(os.environ.get('OMPI_COMM_WORLD_SIZE') or 1)
else:
return torch.cuda.device_count()
# Path: backend/tools/train/utils_sttn.py
def get_local_rank():
"""Find OMPI local rank without calling mpi functions
:rtype: int
"""
if os.environ.get('MPI_LOCALRANKID') is not None:
return int(os.environ.get('MPI_LOCALRANKID') or 0)
elif os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') is not None:
return int(os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') or 0)
else:
return 0
# Path: backend/tools/train/utils_sttn.py
def get_global_rank():
"""Find OMPI world rank without calling mpi functions
:rtype: int
"""
if os.environ.get('PMI_RANK') is not None:
return int(os.environ.get('PMI_RANK') or 0)
elif os.environ.get('OMPI_COMM_WORLD_RANK') is not None:
return int(os.environ.get('OMPI_COMM_WORLD_RANK') or 0)
else:
return 0
# Path: backend/tools/train/utils_sttn.py
def get_master_ip():
if os.environ.get('AZ_BATCH_MASTER_NODE') is not None:
return os.environ.get('AZ_BATCH_MASTER_NODE').split(':')[0]
elif os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE') is not None:
return os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE')
else:
return "127.0.0.1"
# Path: backend/tools/train/train_sttn.py
import os
import json
import argparse
import torch
import torch.multiprocessing as mp
from shutil import copyfile
from backend.tools.train.trainer_sttn import Trainer
from backend.tools.train.utils_sttn import (
get_world_size,
get_local_rank,
get_global_rank,
get_master_ip,
)
parser = argparse.ArgumentParser(description='STTN')
parser.add_argument('-c', '--config', default='configs_sttn/youtube-vos.json', type=str)
parser.add_argument('-m', '--model', default='sttn', type=str)
parser.add_argument('-p', '--port', default='23455', type=str)
parser.add_argument('-e', '--exam', action='store_true')
args = parser.parse_args()
def main_worker(rank, config):
# 如果配置中没有提到局部排序(local_rank),就给它和全局排序(global_rank)赋值为传入的排序(rank)
if 'local_rank' not in config:
config['local_rank'] = config['global_rank'] = rank
# 如果配置指定为分布式训练
if config['distributed']:
# 设置使用的CUDA设备为当前的本地排名对应的GPU
| torch.cuda.set_device(int(config['local_rank'])) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Genesis-Embodied-AI/RoboGen
# Path: manipulation/panda.py
class Panda(Robot):
def __init__(self, controllable_joints='right', slider=True, floating=False):
self.slider = slider
self.floating = floating
if not floating:
if not slider:
right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints
right_end_effector = 11 # Used to get the pose of the end effector
right_gripper_indices = [9, 10] # Gripper actuated joints
else:
right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
right_end_effector = 15 # Used to get the pose of the end effector
right_gripper_indices = [13, 14] # Gripper actuated joints
else:
right_arm_joint_indices = []
right_end_effector = -1
right_gripper_indices = [0, 1]
super(Panda, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)
def init(self, directory, id, np_random, fixed_base=False, use_suction=True):
self.body = p.loadURDF(os.path.join(directory, 'franka_mobile', 'panda_suction_slider_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)
for i in range(p.getNumJoints(self.body, physicsClientId=id)):
print(p.getJointInfo(self.body, i, physicsClientId=id))
link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')
print("link_name: ", link_name)
super(Panda, self).init(self.body, id, np_random)
# Path: manipulation/ur5.py
class UR5(Robot):
def __init__(self, controllable_joints='right', slider=True, floating=False):
self.slider = slider
self.floating = floating
if not floating:
if not slider:
right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints
right_end_effector = 11 # Used to get the pose of the end effector
right_gripper_indices = [9, 10] # Gripper actuated joints
else:
right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
right_end_effector = 21 # Used to get the pose of the end effector
right_gripper_indices = [21, 19] # Gripper actuated joints
else:
right_arm_joint_indices = []
right_end_effector = -1
right_gripper_indices = [0, 1]
super(UR5, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)
def init(self, directory, id, np_random, fixed_base=False, use_suction=True):
self.body = p.loadURDF(os.path.join(directory, 'ur5', 'ur5_robotiq85_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)
for i in range(p.getNumJoints(self.body, physicsClientId=id)):
print(p.getJointInfo(self.body, i, physicsClientId=id))
link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')
print("link_name: ", link_name)
all_joint_num = p.getNumJoints(self.body)
all_joint_idx = list(range(all_joint_num))
joint_idx = [j for j in all_joint_idx if self._is_not_fixed(j)]
self.right_arm_joint_indices = joint_idx
self.controllable_joint_indices = self.right_arm_joint_indices
super(UR5, self).init(self.body, id, np_random)
# Path: manipulation/sawyer.py
class Sawyer(Robot):
def __init__(self, controllable_joints='right', slider=True, floating=False):
self.slider = slider
self.floating = floating
if not floating:
if not slider:
right_arm_joint_indices = [0, 1, 2, 3, 4, 5, 6] # Controllable arm joints
right_end_effector = 11 # Used to get the pose of the end effector
right_gripper_indices = [9, 10] # Gripper actuated joints
else:
right_arm_joint_indices = [0, 1, 2, 4, 5, 6, 7, 8, 9, 10]
right_end_effector = 26 # Used to get the pose of the end effector
right_gripper_indices = [25, 23] # Gripper actuated joints
else:
right_arm_joint_indices = []
right_end_effector = -1
right_gripper_indices = [0, 1]
super(Sawyer, self).__init__(controllable_joints, right_arm_joint_indices, right_end_effector, right_gripper_indices)
def init(self, directory, id, np_random, fixed_base=False, use_suction=True):
self.body = p.loadURDF(os.path.join(directory, 'sawyer', 'sawyer_mobile.urdf'), useFixedBase=fixed_base, basePosition=[-1, -1, 0.5], flags=p.URDF_USE_SELF_COLLISION, physicsClientId=id)
for i in range(p.getNumJoints(self.body, physicsClientId=id)):
print(p.getJointInfo(self.body, i, physicsClientId=id))
link_name = p.getJointInfo(self.body, i, physicsClientId=id)[12].decode('utf-8')
print("link_name: ", link_name)
all_joint_num = p.getNumJoints(self.body)
all_joint_idx = list(range(all_joint_num))
joint_idx = [j for j in all_joint_idx if self._is_not_fixed(j)]
self.right_arm_joint_indices = joint_idx
self.controllable_joint_indices = self.right_arm_joint_indices
print("joint_idx: ", joint_idx)
super(Sawyer, self).init(self.body, id, np_random)
# Path: manipulation/utils.py
def parse_config(config, use_bard=True, obj_id=None, use_gpt_size=True, use_vhacd=True):
urdf_paths = []
urdf_sizes = []
urdf_locations = []
urdf_names = []
urdf_types = []
urdf_on_tables = []
urdf_movables = []
use_table = False
articulated_joint_angles = {}
spatial_relationships = []
distractor_config_path = None
for obj in config:
print(obj)
if "use_table" in obj.keys():
use_table = obj['use_table']
if "set_joint_angle_object_name" in obj.keys():
new_obj = copy.deepcopy(obj)
new_obj.pop('set_joint_angle_object_name')
articulated_joint_angles[obj['set_joint_angle_object_name']] = new_obj
if "spatial_relationships" in obj.keys():
spatial_relationships = obj['spatial_relationships']
if 'task_name' in obj.keys() or 'task_description' in obj.keys():
continue
if "distractor_config_path" in obj.keys():
distractor_config_path = obj['distractor_config_path']
if "type" not in obj.keys():
continue
if obj['type'] == 'mesh':
if 'uid' not in obj.keys():
continue
if obj_id is None:
uid = obj['uid'][np.random.randint(len(obj['uid']))]
else:
uid = obj['uid'][obj_id]
urdf_file_path = osp.join("objaverse_utils/data/obj", "{}".format(uid), "material.urdf")
if not os.path.exists(urdf_file_path):
down_load_single_object(name=obj['lang'], uids=[uid])
new_urdf_file_path = urdf_file_path.replace("material.urdf", "material_non_vhacd.urdf")
new_urdf_lines = []
with open(urdf_file_path, 'r') as f:
urdf_lines = f.readlines()
for line in urdf_lines:
if 'vhacd' in line:
new_line = line.replace("_vhacd", "")
new_urdf_lines.append(new_line)
else:
new_urdf_lines.append(line)
with open(new_urdf_file_path, 'w') as f:
f.writelines(new_urdf_lines)
urdf_file_path = new_urdf_file_path
print("object {} choosing uid {} urdf_path {}".format(obj['lang'], uid, urdf_file_path))
urdf_paths.append(urdf_file_path)
urdf_types.append('mesh')
urdf_movables.append(True) # all mesh objects are movable
elif obj['type'] == 'urdf':
try:
category = obj['lang']
possible_obj_path = partnet_mobility_dict[category]
except:
category = obj['name']
if category == 'Computer display':
category = 'Display'
possible_obj_path = partnet_mobility_dict[category]
if 'reward_asset_path' not in obj.keys():
obj_path = np.random.choice(possible_obj_path)
if category == 'Toaster':
obj_path = str(103486)
if category == 'Microwave':
obj_path = str(7310)
if category == "Oven":
obj_path = str(101808)
if category == 'Refrigerator':
obj_path = str(10638)
else:
obj_path = obj['reward_asset_path']
urdf_file_path = osp.join("data/dataset", obj_path, "mobility.urdf")
if use_vhacd:
new_urdf_file_path = urdf_file_path.replace("mobility.urdf", "mobility_vhacd.urdf")
if not osp.exists(new_urdf_file_path):
new_urdf_file_path = preprocess_urdf(urdf_file_path)
urdf_paths.append(new_urdf_file_path)
else:
urdf_paths.append(urdf_file_path)
urdf_types.append('urdf')
urdf_movables.append(obj.get('movable', False)) # by default, urdf objects are not movable, unless specified
urdf_sizes.append(obj['size'])
urdf_locations.append(parse_center(obj['center']))
urdf_names.append(obj['name'])
urdf_on_tables.append(obj.get('on_table', False))
return urdf_paths, urdf_sizes, urdf_locations, urdf_names, urdf_types, urdf_on_tables, use_table, \
articulated_joint_angles, spatial_relationships, distractor_config_path, urdf_movables
# Path: manipulation/utils.py
def load_env(env, load_path=None, state=None):
if load_path is not None:
with open(load_path, 'rb') as f:
state = pickle.load(f)
### set env to stored object position and orientation
for obj_name, obj_id in env.urdf_ids.items():
p.resetBasePositionAndOrientation(obj_id, state['object_base_position'][obj_name], state['object_base_orientation'][obj_name], physicsClientId=env.id)
### set env to stored object joint angles
for obj_name, obj_id in env.urdf_ids.items():
num_links = p.getNumJoints(obj_id, physicsClientId=env.id)
for link_idx in range(0, num_links):
joint_angle = state['object_joint_angle_dicts'][obj_name][link_idx]
p.resetJointState(obj_id, link_idx, joint_angle, physicsClientId=env.id)
### recover suction
env.activated = state['activated']
if state['activated']:
env.suction_obj_id = state['suction_object_id']
env.suction_contact_link = state['suction_contact_link']
env.suction_to_obj_pose = state['suction_to_obj_pose']
env.create_suction_constraint(env.suction_obj_id, env.suction_contact_link, env.suction_to_obj_pose)
if "urdf_paths" in state:
env.urdf_paths = state["urdf_paths"]
if "object_sizes" in state:
env.simulator_sizes = state["object_sizes"]
if "robot_name" in state:
env.robot_name = state["robot_name"]
if "table_path" in state and env.use_table:
env.table_path = state["table_path"]
return state
# Path: manipulation/utils.py
def download_and_parse_objavarse_obj_from_yaml_config(config_path, candidate_num=10, vhacd=True):
config = None
while config is None:
with open(config_path, 'r') as file:
config = yaml.safe_load(file)
task_name = None
task_description = None
for obj in config:
if 'task_name' in obj.keys():
task_name = obj['task_name']
task_description = obj['task_description']
break
for obj in config:
if 'type' in obj.keys() and obj['type'] == 'mesh' and 'uid' not in obj.keys():
print("{} trying to download object: {} {}".format("=" * 20, obj['lang'], "=" * 20))
success = down_load_single_object(obj["lang"], candidate_num=candidate_num, vhacd=vhacd,
task_name=task_name, task_description=task_description)
if not success:
print("failed to find suitable object to download {} quit building this task".format(obj["lang"]))
return False
obj['uid'] = text_to_uid_dict[obj["lang"]]
obj['all_uid'] = text_to_uid_dict[obj["lang"] + "_all"]
with open(config_path, 'w') as f:
yaml.dump(config, f, indent=4)
return True
# Path: manipulation/gpt_reward_api.py
def get_joint_id_from_name(simulator, object_name, joint_name):
object_id = simulator.urdf_ids[object_name]
num_joints = p.getNumJoints(object_id, physicsClientId=simulator.id)
joint_index = None
for i in range(num_joints):
joint_info = p.getJointInfo(object_id, i, physicsClientId=simulator.id)
if joint_info[1].decode("utf-8") == joint_name:
joint_index = i
break
return joint_index
# Path: manipulation/gpt_reward_api.py
def get_link_id_from_name(simulator, object_name, link_name):
object_id = simulator.urdf_ids[object_name]
num_joints = p.getNumJoints(object_id, physicsClientId=simulator.id)
joint_index = None
for i in range(num_joints):
joint_info = p.getJointInfo(object_id, i, physicsClientId=simulator.id)
if joint_info[12].decode("utf-8") == link_name:
joint_index = i
break
return joint_index
# Path: manipulation/sim.py
import numpy as np
import pybullet as p
import gym
import pickle
import yaml
import os.path as osp
from gym.utils import seeding
from gym import spaces
from collections import defaultdict
from scipy.spatial.transform import Rotation as R
from manipulation.panda import Panda
from manipulation.ur5 import UR5
from manipulation.sawyer import Sawyer
from manipulation.utils import parse_config, load_env, download_and_parse_objavarse_obj_from_yaml_config
from manipulation.gpt_reward_api import get_joint_id_from_name, get_link_id_from_name
from manipulation.table_utils import table_paths, table_scales, table_poses, table_bbox_scale_down_factors
if 'delta-axis-angle' in self.rotation_mode:
dtheta = np.linalg.norm(delta_rotation)
if dtheta > 0:
delta_rotation = delta_rotation / dtheta
dtheta = dtheta * self.max_rotation_angle / np.sqrt(3)
delta_rotation_matrix = R.from_rotvec(delta_rotation * dtheta).as_matrix()
else:
delta_rotation_matrix = np.eye(3)
current_matrix = np.array(p.getMatrixFromQuaternion(orient)).reshape(3, 3)
if self.rotation_mode == 'delta-axis-angle-local':
new_rotation = current_matrix @ delta_rotation_matrix
elif self.rotation_mode == 'delta-axis-angle-global':
new_rotation = delta_rotation_matrix @ current_matrix
orient = R.from_matrix(new_rotation).as_quat()
elif self.rotation_mode == 'delta-euler-angle':
euler_angle = delta_rotation / np.sqrt(3) * self.max_rotation_angle
delta_quaternion = p.getQuaternionFromEuler(euler_angle)
orient = delta_quaternion * orient
return orient
def activate_suction(self):
if not self.activated:
# assume the suction is attached to the right end effector
suction_id = self.suction_id
points = p.getContactPoints(bodyA=self.robot.body, linkIndexA=suction_id, physicsClientId=self.id)
if points:
# Handle contact between suction with a rigid object.
contact_object_id_link_cnts = defaultdict(int)
for point in points:
obj_id, contact_link, contact_position_on_obj = point[2], point[4], point[6]
if obj_id == self.urdf_ids['plane'] or obj_id == self.robot.body:
pass
else:
contact_object_id_link_cnts[(obj_id, contact_link)] += 1
if len(contact_object_id_link_cnts) > 0:
# find the object that has the most contact points
obj_id, contact_link = max(contact_object_id_link_cnts.items(), key=lambda x: x[1])[0]
# print("contact with object: ", obj_id, contact_link)
body_pose = p.getLinkState(self.robot.body, suction_id, physicsClientId=self.id)
if contact_link >= 0:
obj_link_pose = p.getLinkState(obj_id, contact_link, physicsClientId=self.id)
else:
obj_link_pose = p.getBasePositionAndOrientation(obj_id, physicsClientId=self.id)
world_to_body = p.invertTransform(body_pose[0], body_pose[1])
obj_to_body = p.multiplyTransforms(world_to_body[0],
world_to_body[1],
obj_link_pose[0], obj_link_pose[1])
suction_to_obj = p.invertTransform(obj_to_body[0], obj_to_body[1])
self.create_suction_constraint(obj_id, contact_link, suction_to_obj)
self.activated = True
self.suction_obj_id = obj_id
self.suction_contact_link = contact_link
self.suction_to_obj_pose = suction_to_obj
def create_suction_constraint(self, suction_obj_id, suction_contact_link, suction_to_obj_pose):
suction_id = self.suction_id
self.contact_constraint = p.createConstraint(
parentBodyUniqueId=self.robot.body,
parentLinkIndex=suction_id,
childBodyUniqueId=suction_obj_id,
childLinkIndex=suction_contact_link,
jointType=p.JOINT_FIXED,
jointAxis=(0, 0, 0),
parentFramePosition=(0, 0, 0),
parentFrameOrientation=(0, 0, 0),
childFramePosition=suction_to_obj_pose[0],
childFrameOrientation=suction_to_obj_pose[1],
physicsClientId=self.id)
p.changeConstraint(self.contact_constraint, maxForce=5000, physicsClientId=self.id)
def deactivate_suction(self):
self.activated = False
if self.contact_constraint is not None:
p.removeConstraint(self.contact_constraint, physicsClientId=self.id)
self.contact_constraint = None
def step(self, action):
self.time_step += 1
self.take_step(action)
obs = self._get_obs()
# to handle some stupid typing error in early prompts
try:
reward, success = self._compute_reward()
except:
reward, success = self.compute_reward()
self.success = success
done = self.time_step == self.horizon
info = self._get_info()
return obs, reward, done, info
def _get_info(self):
return {}
def _get_obs(self):
### For RL policy learning, observation space includes:
# 1. object positions and orientations (6 * num_objects)
# 2. object min and max bounding box (6 * num_objects)
# 3. articulated object joint angles (num_objects * num_joints)
# 4. articulated object link position and orientation (num_objects * num_joints * 6)
# 5. robot base position (xy)
# 6. robot end-effector position and orientation (6)
# 7. gripper suction activated/deactivate or gripper joint angle (if not using suction gripper) (1)
obs = np.zeros(self.base_observation_space.shape[0])
cnt = 0
for name, id in self.urdf_ids.items():
if name == 'plane' or name == 'robot':
continue
| if self.is_distractor[name]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: KoeAI/LLVC
# Path: minimal_rvc/models.py
class SynthesizerTrnMs256NSFSid(nn.Module):
def __init__(
self,
spec_channels,
segment_size,
inter_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
spk_embed_dim,
gin_channels,
emb_channels,
sr,
**kwargs
):
super().__init__()
if type(sr) == type("strr"):
sr = sr2sr[sr]
self.spec_channels = spec_channels
self.inter_channels = inter_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.resblock = resblock
self.resblock_kernel_sizes = resblock_kernel_sizes
self.resblock_dilation_sizes = resblock_dilation_sizes
self.upsample_rates = upsample_rates
self.upsample_initial_channel = upsample_initial_channel
self.upsample_kernel_sizes = upsample_kernel_sizes
self.segment_size = segment_size
self.gin_channels = gin_channels
self.emb_channels = emb_channels
self.sr = sr
# self.hop_length = hop_length#
self.spk_embed_dim = spk_embed_dim
self.enc_p = TextEncoder(
inter_channels,
hidden_channels,
filter_channels,
emb_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
)
self.dec = GeneratorNSF(
inter_channels,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=gin_channels,
sr=sr,
is_half=kwargs["is_half"],
)
self.enc_q = PosteriorEncoder(
spec_channels,
inter_channels,
hidden_channels,
5,
1,
16,
gin_channels=gin_channels,
)
self.flow = ResidualCouplingBlock(
inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
print(
"gin_channels:",
gin_channels,
"self.spk_embed_dim:",
self.spk_embed_dim,
"emb_channels:",
emb_channels,
)
def remove_weight_norm(self):
self.dec.remove_weight_norm()
self.flow.remove_weight_norm()
self.enc_q.remove_weight_norm()
def forward(
self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
): # 这里ds是id,[bs,1]
# print(1,pitch.shape)#[bs,t]
g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
z_p = self.flow(z, y_mask, g=g)
z_slice, ids_slice = commons.rand_slice_segments(
z, y_lengths, self.segment_size
)
# print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
# print(-2,pitchf.shape,z_slice.shape)
o = self.dec(z_slice, pitchf, g=g)
return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
g = self.emb_g(sid).unsqueeze(-1)
m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
z = self.flow(z_p, x_mask, g=g, reverse=True)
o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
return o, x_mask, (z, z_p, m_p, logs_p)
# Path: minimal_rvc/rmvpe.py
class RMVPE:
def __init__(self, model_path, is_half, device=None):
self.resample_kernel = {}
self.resample_kernel = {}
self.is_half = is_half
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
self.device = device
self.mel_extractor = MelSpectrogram(
is_half, 128, 16000, 1024, 160, None, 30, 8000
).to(device)
if "privateuseone" in str(device):
import onnxruntime as ort
ort_session = ort.InferenceSession(
"rmvpe.onnx", providers=["DmlExecutionProvider"]
)
self.model = ort_session
else:
model = E2E(4, 1, (2, 2))
ckpt = torch.load(model_path, map_location="cpu")
model.load_state_dict(ckpt)
model.eval()
if is_half == True:
model = model.half()
self.model = model
self.model = self.model.to(device)
cents_mapping = 20 * np.arange(360) + 1997.3794084376191
self.cents_mapping = np.pad(cents_mapping, (4, 4)) # 368
def mel2hidden(self, mel):
with torch.no_grad():
n_frames = mel.shape[-1]
mel = F.pad(
mel, (0, 32 * ((n_frames - 1) // 32 + 1) - n_frames), mode="reflect"
)
if "privateuseone" in str(self.device):
onnx_input_name = self.model.get_inputs()[0].name
onnx_outputs_names = self.model.get_outputs()[0].name
hidden = self.model.run(
[onnx_outputs_names],
input_feed={onnx_input_name: mel.cpu().numpy()},
)[0]
else:
hidden = self.model(mel)
return hidden[:, :n_frames]
def decode(self, hidden, thred=0.03):
cents_pred = self.to_local_average_cents(hidden, thred=thred)
f0 = 10 * (2 ** (cents_pred / 1200))
f0[f0 == 10] = 0
# f0 = np.array([10 * (2 ** (cent_pred / 1200)) if cent_pred else 0 for cent_pred in cents_pred])
return f0
def infer_from_audio(self, audio, thred=0.03):
# torch.cuda.synchronize()
t0 = ttime()
mel = self.mel_extractor(
torch.from_numpy(audio).float().to(self.device).unsqueeze(0), center=True
)
# print(123123123,mel.device.type)
# torch.cuda.synchronize()
t1 = ttime()
hidden = self.mel2hidden(mel)
# torch.cuda.synchronize()
t2 = ttime()
# print(234234,hidden.device.type)
if "privateuseone" not in str(self.device):
hidden = hidden.squeeze(0).cpu().numpy()
else:
hidden = hidden[0]
if self.is_half == True:
hidden = hidden.astype("float32")
f0 = self.decode(hidden, thred=thred)
# torch.cuda.synchronize()
t3 = ttime()
# print("hmvpe:%s\t%s\t%s\t%s"%(t1-t0,t2-t1,t3-t2,t3-t0))
return f0
def to_local_average_cents(self, salience, thred=0.05):
# t0 = ttime()
center = np.argmax(salience, axis=1) # 帧长#index
salience = np.pad(salience, ((0, 0), (4, 4))) # 帧长,368
# t1 = ttime()
center += 4
todo_salience = []
todo_cents_mapping = []
starts = center - 4
ends = center + 5
for idx in range(salience.shape[0]):
todo_salience.append(salience[:, starts[idx] : ends[idx]][idx])
todo_cents_mapping.append(self.cents_mapping[starts[idx] : ends[idx]])
# t2 = ttime()
todo_salience = np.array(todo_salience) # 帧长,9
todo_cents_mapping = np.array(todo_cents_mapping) # 帧长,9
product_sum = np.sum(todo_salience * todo_cents_mapping, 1)
weight_sum = np.sum(todo_salience, 1) # 帧长
devided = product_sum / weight_sum # 帧长
# t3 = ttime()
maxx = np.max(salience, axis=1) # 帧长
devided[maxx <= thred] = 0
# t4 = ttime()
# print("decode:%s\t%s\t%s\t%s" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
return devided
# Path: minimal_rvc/pipeline.py
import os
import traceback
import faiss
import numpy as np
import pyworld
import scipy.signal as signal
import torch
import torch.nn.functional as F
import torchcrepe
from typing import *
from fairseq.models.hubert import HubertModel
from torch import Tensor
from .models import SynthesizerTrnMs256NSFSid
from .rmvpe import RMVPE
):
feats = torch.from_numpy(audio)
if self.is_half:
feats = feats.half()
else:
feats = feats.float()
if feats.dim() == 2: # double channels
feats = feats.mean(-1)
assert feats.dim() == 1, feats.dim()
feats = feats.view(1, -1)
padding_mask = torch.BoolTensor(
feats.shape).to(self.device).fill_(False)
half_support = (
self.device.type == "cuda"
and torch.cuda.get_device_capability(self.device)[0] >= 5.3
)
is_feats_dim_768 = net_g.emb_channels == 768
if isinstance(model, tuple):
feats = model[0](
feats.squeeze(0).squeeze(0).to(self.device),
return_tensors="pt",
sampling_rate=16000,
)
if self.is_half:
feats = feats.input_values.to(self.device).half()
else:
feats = feats.input_values.to(self.device)
with torch.no_grad():
if is_feats_dim_768:
feats = model[1](feats).last_hidden_state
else:
feats = model[1](feats).extract_features
else:
inputs = {
"source": feats.half().to(self.device)
if half_support
else feats.to(self.device),
"padding_mask": padding_mask.to(self.device),
"output_layer": embedding_output_layer,
}
if not half_support:
model = model.float()
inputs["source"] = inputs["source"].float()
with torch.no_grad():
logits = model.extract_features(**inputs)
if is_feats_dim_768:
feats = logits[0]
else:
feats = model.final_proj(logits[0])
if (
isinstance(index, type(None)) == False
and isinstance(big_npy, type(None)) == False
and index_rate != 0
):
npy = feats[0].cpu().numpy()
if self.is_half:
npy = npy.astype("float32")
score, ix = index.search(npy, k=8)
weight = np.square(1 / score)
weight /= weight.sum(axis=1, keepdims=True)
npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
if self.is_half:
npy = npy.astype("float16")
feats = (
torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
+ (1 - index_rate) * feats
)
feats = F.interpolate(feats.permute(0, 2, 1),
scale_factor=2).permute(0, 2, 1)
p_len = audio.shape[0] // self.window
if feats.shape[1] < p_len:
p_len = feats.shape[1]
if pitch != None and pitchf != None:
pitch = pitch[:, :p_len]
pitchf = pitchf[:, :p_len]
p_len = torch.tensor([p_len], device=self.device).long()
with torch.no_grad():
if pitch != None and pitchf != None:
audio1 = (
(net_g.infer(feats, p_len, pitch,
pitchf, sid)[0][0, 0] * 32768)
.data.cpu()
.float()
.numpy()
.astype(np.int16)
)
else:
audio1 = (
(net_g.infer(feats, p_len, sid)[0][0, 0] * 32768)
.data.cpu()
.float()
.numpy()
.astype(np.int16)
)
del feats, p_len, padding_mask
if torch.cuda.is_available():
torch.cuda.empty_cache()
return audio1
def __call__(
self,
model: HubertModel,
embedding_output_layer: int,
net_g: SynthesizerTrnMs256NSFSid,
sid: int,
audio: np.ndarray,
transpose: int,
f0_method: str,
file_index: str,
index_rate: float,
if_f0: bool,
| f0_relative: bool, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: aurelio-labs/semantic-router
# Path: semantic_router/encoders/base.py
class BaseEncoder(BaseModel):
name: str
score_threshold: float
type: str = Field(default="base")
class Config:
arbitrary_types_allowed = True
def __call__(self, docs: List[str]) -> List[List[float]]:
raise NotImplementedError("Subclasses must implement this method")
# Path: semantic_router/encoders/bm25.py
class BM25Encoder(BaseEncoder):
model: Optional[Any] = None
idx_mapping: Optional[Dict[int, int]] = None
type: str = "sparse"
def __init__(
self,
name: str = "bm25",
score_threshold: float = 0.82,
use_default_params: bool = True,
):
super().__init__(name=name, score_threshold=score_threshold)
try:
from pinecone_text.sparse import BM25Encoder as encoder
except ImportError:
raise ImportError(
"Please install pinecone-text to use BM25Encoder. "
"You can install it with: `pip install 'semantic-router[hybrid]'`"
)
self.model = encoder()
if use_default_params:
logger.info("Downloading and initializing default sBM25 model parameters.")
self.model = encoder.default()
self._set_idx_mapping()
def _set_idx_mapping(self):
params = self.model.get_params()
doc_freq = params["doc_freq"]
if isinstance(doc_freq, dict):
indices = doc_freq["indices"]
self.idx_mapping = {int(idx): i for i, idx in enumerate(indices)}
else:
raise TypeError("Expected a dictionary for 'doc_freq'")
def __call__(self, docs: List[str]) -> List[List[float]]:
if self.model is None or self.idx_mapping is None:
raise ValueError("Model or index mapping is not initialized.")
if len(docs) == 1:
sparse_dicts = self.model.encode_queries(docs)
elif len(docs) > 1:
sparse_dicts = self.model.encode_documents(docs)
else:
raise ValueError("No documents to encode.")
embeds = [[0.0] * len(self.idx_mapping)] * len(docs)
for i, output in enumerate(sparse_dicts):
indices = output["indices"]
values = output["values"]
for idx, val in zip(indices, values):
if idx in self.idx_mapping:
position = self.idx_mapping[idx]
embeds[i][position] = val
return embeds
def fit(self, docs: List[str]):
if self.model is None:
raise ValueError("Model is not initialized.")
self.model.fit(docs)
self._set_idx_mapping()
# Path: semantic_router/encoders/cohere.py
class CohereEncoder(BaseEncoder):
client: Optional[cohere.Client] = None
type: str = "cohere"
def __init__(
self,
name: Optional[str] = None,
cohere_api_key: Optional[str] = None,
score_threshold: float = 0.3,
):
if name is None:
name = os.getenv("COHERE_MODEL_NAME", "embed-english-v3.0")
super().__init__(name=name, score_threshold=score_threshold)
cohere_api_key = cohere_api_key or os.getenv("COHERE_API_KEY")
if cohere_api_key is None:
raise ValueError("Cohere API key cannot be 'None'.")
try:
self.client = cohere.Client(cohere_api_key)
except Exception as e:
raise ValueError(
f"Cohere API client failed to initialize. Error: {e}"
) from e
def __call__(self, docs: List[str]) -> List[List[float]]:
if self.client is None:
raise ValueError("Cohere client is not initialized.")
try:
embeds = self.client.embed(docs, input_type="search_query", model=self.name)
return embeds.embeddings
except Exception as e:
raise ValueError(f"Cohere API call failed. Error: {e}") from e
# Path: semantic_router/encoders/openai.py
class OpenAIEncoder(BaseEncoder):
client: Optional[openai.Client]
type: str = "openai"
def __init__(
self,
name: Optional[str] = None,
openai_api_key: Optional[str] = None,
score_threshold: float = 0.82,
):
if name is None:
name = os.getenv("OPENAI_MODEL_NAME", "text-embedding-ada-002")
super().__init__(name=name, score_threshold=score_threshold)
api_key = openai_api_key or os.getenv("OPENAI_API_KEY")
if api_key is None:
raise ValueError("OpenAI API key cannot be 'None'.")
try:
self.client = openai.Client(api_key=api_key)
except Exception as e:
raise ValueError(
f"OpenAI API client failed to initialize. Error: {e}"
) from e
def __call__(self, docs: List[str]) -> List[List[float]]:
if self.client is None:
raise ValueError("OpenAI client is not initialized.")
embeds = None
error_message = ""
# Exponential backoff
for j in range(3):
try:
embeds = self.client.embeddings.create(input=docs, model=self.name)
if embeds.data:
break
except OpenAIError as e:
sleep(2**j)
error_message = str(e)
logger.warning(f"Retrying in {2**j} seconds...")
except Exception as e:
logger.error(f"OpenAI API call failed. Error: {error_message}")
raise ValueError(f"OpenAI API call failed. Error: {e}") from e
if (
not embeds
or not isinstance(embeds, CreateEmbeddingResponse)
or not embeds.data
):
raise ValueError(f"No embeddings returned. Error: {error_message}")
embeddings = [embeds_obj.embedding for embeds_obj in embeds.data]
return embeddings
# Path: semantic_router/encoders/tfidf.py
class TfidfEncoder(BaseEncoder):
idf: ndarray = np.array([])
word_index: Dict = {}
def __init__(self, name: str = "tfidf", score_threshold: float = 0.82):
# TODO default score_threshold not thoroughly tested, should optimize
super().__init__(name=name, score_threshold=score_threshold)
self.word_index = {}
self.idf = np.array([])
def __call__(self, docs: list[str]) -> list[list[float]]:
if len(self.word_index) == 0 or self.idf.size == 0:
raise ValueError("Vectorizer is not initialized.")
if len(docs) == 0:
raise ValueError("No documents to encode.")
docs = [self._preprocess(doc) for doc in docs]
tf = self._compute_tf(docs)
tfidf = tf * self.idf
return tfidf.tolist()
def fit(self, routes: list[Route]):
docs = []
for route in routes:
for doc in route.utterances:
docs.append(self._preprocess(doc))
self.word_index = self._build_word_index(docs)
self.idf = self._compute_idf(docs)
def _build_word_index(self, docs: list[str]) -> dict:
words = set()
for doc in docs:
for word in doc.split():
words.add(word)
word_index = {word: i for i, word in enumerate(words)}
return word_index
def _compute_tf(self, docs: list[str]) -> np.ndarray:
if len(self.word_index) == 0:
raise ValueError("Word index is not initialized.")
tf = np.zeros((len(docs), len(self.word_index)))
for i, doc in enumerate(docs):
word_counts = Counter(doc.split())
for word, count in word_counts.items():
if word in self.word_index:
tf[i, self.word_index[word]] = count
# L2 normalization
tf = tf / norm(tf, axis=1, keepdims=True)
return tf
def _compute_idf(self, docs: list[str]) -> np.ndarray:
if len(self.word_index) == 0:
raise ValueError("Word index is not initialized.")
idf = np.zeros(len(self.word_index))
for doc in docs:
words = set(doc.split())
for word in words:
if word in self.word_index:
idf[self.word_index[word]] += 1
idf = np.log(len(docs) / (idf + 1))
return idf
def _preprocess(self, doc: str) -> str:
lowercased_doc = doc.lower()
no_punctuation_doc = lowercased_doc.translate(
str.maketrans("", "", string.punctuation)
)
return no_punctuation_doc
# Path: semantic_router/encoders/zure.py
class AzureOpenAIEncoder(BaseEncoder):
client: Optional[openai.AzureOpenAI] = None
type: str = "azure"
api_key: Optional[str] = None
deployment_name: Optional[str] = None
azure_endpoint: Optional[str] = None
api_version: Optional[str] = None
model: Optional[str] = None
def __init__(
self,
api_key: Optional[str] = None,
deployment_name: Optional[str] = None,
azure_endpoint: Optional[str] = None,
api_version: Optional[str] = None,
model: Optional[str] = None,
score_threshold: float = 0.82,
):
name = deployment_name
if name is None:
name = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME", "text-embedding-ada-002")
super().__init__(name=name, score_threshold=score_threshold)
self.api_key = api_key
self.deployment_name = deployment_name
self.azure_endpoint = azure_endpoint
self.api_version = api_version
self.model = model
if self.api_key is None:
self.api_key = os.getenv("AZURE_OPENAI_API_KEY")
if self.api_key is None:
raise ValueError("No Azure OpenAI API key provided.")
if self.deployment_name is None:
self.deployment_name = os.getenv(
"AZURE_OPENAI_DEPLOYMENT_NAME", "text-embedding-ada-002"
)
# deployment_name may still be None, but it is optional in the API
if self.azure_endpoint is None:
self.azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
if self.azure_endpoint is None:
raise ValueError("No Azure OpenAI endpoint provided.")
if self.api_version is None:
self.api_version = os.getenv("AZURE_OPENAI_API_VERSION")
if self.api_version is None:
raise ValueError("No Azure OpenAI API version provided.")
if self.model is None:
self.model = os.getenv("AZURE_OPENAI_MODEL")
if self.model is None:
raise ValueError("No Azure OpenAI model provided.")
assert (
self.api_key is not None
and self.azure_endpoint is not None
and self.api_version is not None
and self.model is not None
)
try:
self.client = openai.AzureOpenAI(
azure_deployment=str(deployment_name) if deployment_name else None,
api_key=str(api_key),
azure_endpoint=str(azure_endpoint),
api_version=str(api_version),
# _strict_response_validation=True,
)
except Exception as e:
raise ValueError(
f"OpenAI API client failed to initialize. Error: {e}"
) from e
def __call__(self, docs: List[str]) -> List[List[float]]:
if self.client is None:
raise ValueError("OpenAI client is not initialized.")
embeds = None
error_message = ""
# Exponential backoff
for j in range(3):
try:
embeds = self.client.embeddings.create(
input=docs, model=str(self.model)
)
if embeds.data:
break
except OpenAIError as e:
# print full traceback
import traceback
traceback.print_exc()
sleep(2**j)
error_message = str(e)
logger.warning(f"Retrying in {2**j} seconds...")
except Exception as e:
logger.error(f"Azure OpenAI API call failed. Error: {error_message}")
raise ValueError(f"Azure OpenAI API call failed. Error: {e}") from e
if (
not embeds
or not isinstance(embeds, CreateEmbeddingResponse)
or not embeds.data
):
raise ValueError(f"No embeddings returned. Error: {error_message}")
embeddings = [embeds_obj.embedding for embeds_obj in embeds.data]
return embeddings
# Path: semantic_router/hybrid_layer.py
class HybridRouteLayer:
index = None
sparse_index = None
categories = None
score_threshold: float
def __init__(
self,
encoder: BaseEncoder,
sparse_encoder: Optional[BM25Encoder] = None,
routes: List[Route] = [],
alpha: float = 0.3,
):
self.encoder = encoder
self.score_threshold = self.encoder.score_threshold
if sparse_encoder is None:
logger.warning("No sparse_encoder provided. Using default BM25Encoder.")
self.sparse_encoder = BM25Encoder()
else:
self.sparse_encoder = sparse_encoder
self.alpha = alpha
self.routes = routes
if isinstance(self.sparse_encoder, TfidfEncoder) and hasattr(
self.sparse_encoder, "fit"
):
self.sparse_encoder.fit(routes)
# if routes list has been passed, we initialize index now
if routes:
# initialize index now
# for route in tqdm(routes):
# self._add_route(route=route)
self._add_routes(routes)
def __call__(self, text: str) -> Optional[str]:
results = self._query(text)
top_class, top_class_scores = self._semantic_classify(results)
passed = self._pass_threshold(top_class_scores, self.score_threshold)
if passed:
return top_class
else:
return None
def add(self, route: Route):
self._add_route(route=route)
def _add_route(self, route: Route):
self.routes += [route]
self.update_dense_embeddings_index(route.utterances)
if isinstance(self.sparse_encoder, TfidfEncoder) and hasattr(
self.sparse_encoder, "fit"
):
self.sparse_encoder.fit(self.routes)
# re-build index
self.sparse_index = None
all_utterances = [
utterance for route in self.routes for utterance in route.utterances
]
self.update_sparse_embeddings_index(all_utterances)
else:
self.update_sparse_embeddings_index(route.utterances)
# create route array
if self.categories is None:
self.categories = np.array([route.name] * len(route.utterances))
else:
str_arr = np.array([route.name] * len(route.utterances))
self.categories = np.concatenate([self.categories, str_arr])
self.routes.append(route)
def _add_routes(self, routes: List[Route]):
# create embeddings for all routes
logger.info("Creating embeddings for all routes...")
all_utterances = [
utterance for route in routes for utterance in route.utterances
]
self.update_dense_embeddings_index(all_utterances)
self.update_sparse_embeddings_index(all_utterances)
# create route array
route_names = [route.name for route in routes for _ in route.utterances]
route_array = np.array(route_names)
self.categories = (
np.concatenate([self.categories, route_array])
if self.categories is not None
else route_array
)
def update_dense_embeddings_index(self, utterances: list):
dense_embeds = np.array(self.encoder(utterances))
# create utterance array (the dense index)
self.index = (
np.concatenate([self.index, dense_embeds])
if self.index is not None
else dense_embeds
)
def update_sparse_embeddings_index(self, utterances: list):
sparse_embeds = np.array(self.sparse_encoder(utterances))
# create sparse utterance array
self.sparse_index = (
np.concatenate([self.sparse_index, sparse_embeds])
if self.sparse_index is not None
else sparse_embeds
)
def _query(self, text: str, top_k: int = 5):
"""Given some text, encodes and searches the index vector space to
retrieve the top_k most similar records.
"""
# create dense query vector
xq_d = np.array(self.encoder([text]))
xq_d = np.squeeze(xq_d) # Reduce to 1d array.
# create sparse query vector
xq_s = np.array(self.sparse_encoder([text]))
xq_s = np.squeeze(xq_s)
# convex scaling
xq_d, xq_s = self._convex_scaling(xq_d, xq_s)
if self.index is not None and self.sparse_index is not None:
# calculate dense vec similarity
index_norm = norm(self.index, axis=1)
xq_d_norm = norm(xq_d.T)
sim_d = np.dot(self.index, xq_d.T) / (index_norm * xq_d_norm)
# calculate sparse vec similarity
sparse_norm = norm(self.sparse_index, axis=1)
xq_s_norm = norm(xq_s.T)
sim_s = np.dot(self.sparse_index, xq_s.T) / (sparse_norm * xq_s_norm)
total_sim = sim_d + sim_s
# get indices of top_k records
top_k = min(top_k, total_sim.shape[0])
idx = np.argpartition(total_sim, -top_k)[-top_k:]
scores = total_sim[idx]
# get the utterance categories (route names)
routes = self.categories[idx] if self.categories is not None else []
return [{"route": d, "score": s.item()} for d, s in zip(routes, scores)]
else:
logger.warning("No index found. Please add routes to the layer.")
return []
def _convex_scaling(self, dense: np.ndarray, sparse: np.ndarray):
# scale sparse and dense vecs
dense = np.array(dense) * self.alpha
sparse = np.array(sparse) * (1 - self.alpha)
return dense, sparse
def _semantic_classify(self, query_results: List[Dict]) -> Tuple[str, List[float]]:
scores_by_class: Dict[str, List[float]] = {}
for result in query_results:
score = result["score"]
route = result["route"]
if route in scores_by_class:
scores_by_class[route].append(score)
else:
scores_by_class[route] = [score]
# Calculate total score for each class
total_scores = {route: sum(scores) for route, scores in scores_by_class.items()}
top_class = max(total_scores, key=lambda x: total_scores[x], default=None)
# Return the top class and its associated scores
if top_class is not None:
return str(top_class), scores_by_class.get(top_class, [])
else:
logger.warning("No classification found for semantic classifier.")
return "", []
def _pass_threshold(self, scores: List[float], threshold: float) -> bool:
if scores:
return max(scores) > threshold
else:
return False
# Path: semantic_router/route.py
class Route(BaseModel):
name: str
utterances: List[str]
description: Optional[str] = None
function_schema: Optional[Dict[str, Any]] = None
llm: Optional[BaseLLM] = None
def __call__(self, query: str) -> RouteChoice:
if self.function_schema:
if not self.llm:
raise ValueError(
"LLM is required for dynamic routes. Please ensure the `llm` "
"attribute is set."
)
# if a function schema is provided we generate the inputs
extracted_inputs = self.llm.extract_function_inputs(
query=query, function_schema=self.function_schema
)
func_call = extracted_inputs
else:
# otherwise we just pass None for the call
func_call = None
return RouteChoice(name=self.name, function_call=func_call)
def to_dict(self) -> Dict[str, Any]:
return self.dict()
@classmethod
def from_dict(cls, data: Dict[str, Any]):
return cls(**data)
@classmethod
def from_dynamic_route(cls, llm: BaseLLM, entity: Union[BaseModel, Callable]):
"""
Generate a dynamic Route object from a function or Pydantic model using LLM
"""
schema = function_call.get_schema(item=entity)
dynamic_route = cls._generate_dynamic_route(llm=llm, function_schema=schema)
dynamic_route.function_schema = schema
return dynamic_route
@classmethod
def _parse_route_config(cls, config: str) -> str:
# Regular expression to match content inside <config></config>
config_pattern = r"<config>(.*?)</config>"
match = re.search(config_pattern, config, re.DOTALL)
if match:
config_content = match.group(1).strip() # Get the matched content
return config_content
else:
raise ValueError("No <config></config> tags found in the output.")
@classmethod
def _generate_dynamic_route(cls, llm: BaseLLM, function_schema: Dict[str, Any]):
logger.info("Generating dynamic route...")
prompt = f"""
You are tasked to generate a JSON configuration based on the provided
function schema. Please follow the template below, no other tokens allowed:
<config>
{{
"name": "<function_name>",
"utterances": [
"<example_utterance_1>",
"<example_utterance_2>",
"<example_utterance_3>",
"<example_utterance_4>",
"<example_utterance_5>"]
}}
</config>
Only include the "name" and "utterances" keys in your answer.
The "name" should match the function name and the "utterances"
should comprise a list of 5 example phrases that could be used to invoke
the function. Use real values instead of placeholders.
Input schema:
{function_schema}
"""
llm_input = [Message(role="user", content=prompt)]
output = llm(llm_input)
if not output:
raise Exception("No output generated for dynamic route")
route_config = cls._parse_route_config(config=output)
logger.info(f"Generated route config:\n{route_config}")
if is_valid(route_config):
route_config_dict = json.loads(route_config)
route_config_dict["llm"] = llm
return Route.from_dict(route_config_dict)
raise Exception("No config generated")
# Path: tests/unit/test_hybrid_layer.py
import pytest
from semantic_router.encoders import (
AzureOpenAIEncoder,
BaseEncoder,
BM25Encoder,
CohereEncoder,
OpenAIEncoder,
TfidfEncoder,
)
from semantic_router.hybrid_layer import HybridRouteLayer
from semantic_router.route import Route
def mock_encoder_call(utterances):
# Define a mapping of utterances to return values
mock_responses = {
"Hello": [0.1, 0.2, 0.3],
"Hi": [0.4, 0.5, 0.6],
"Goodbye": [0.7, 0.8, 0.9],
"Bye": [1.0, 1.1, 1.2],
"Au revoir": [1.3, 1.4, 1.5],
}
return [mock_responses.get(u, [0, 0, 0]) for u in utterances]
@pytest.fixture
def base_encoder(mocker):
mock_base_encoder = BaseEncoder(name="test-encoder", score_threshold=0.5)
mocker.patch.object(BaseEncoder, "__call__", return_value=[[0.1, 0.2, 0.3]])
return mock_base_encoder
@pytest.fixture
def cohere_encoder(mocker):
mocker.patch.object(CohereEncoder, "__call__", side_effect=mock_encoder_call)
return CohereEncoder(name="test-cohere-encoder", cohere_api_key="test_api_key")
@pytest.fixture
def openai_encoder(mocker):
mocker.patch.object(OpenAIEncoder, "__call__", side_effect=mock_encoder_call)
return OpenAIEncoder(name="test-openai-encoder", openai_api_key="test_api_key")
@pytest.fixture
def azure_encoder(mocker):
mocker.patch.object(AzureOpenAIEncoder, "__call__", side_effect=mock_encoder_call)
return AzureOpenAIEncoder(
deployment_name="test-deployment",
azure_endpoint="test_endpoint",
api_key="test_api_key",
api_version="test_version",
model="test_model",
)
def bm25_encoder(mocker):
mocker.patch.object(BM25Encoder, "__call__", side_effect=mock_encoder_call)
return BM25Encoder(name="test-bm25-encoder")
@pytest.fixture
def tfidf_encoder(mocker):
mocker.patch.object(TfidfEncoder, "__call__", side_effect=mock_encoder_call)
return TfidfEncoder(name="test-tfidf-encoder")
@pytest.fixture
def routes():
return [
Route(name="Route 1", utterances=["Hello", "Hi"]),
Route(name="Route 2", utterances=["Goodbye", "Bye", "Au revoir"]),
]
sparse_encoder = BM25Encoder(use_default_params=False)
sparse_encoder.fit(["The quick brown fox", "jumps over the lazy dog", "Hello, world!"])
class TestHybridRouteLayer:
def test_initialization(self, openai_encoder, routes):
route_layer = HybridRouteLayer(
encoder=openai_encoder, sparse_encoder=sparse_encoder, routes=routes
)
assert route_layer.index is not None and route_layer.categories is not None
assert openai_encoder.score_threshold == 0.82
assert route_layer.score_threshold == 0.82
assert len(route_layer.index) == 5
assert len(set(route_layer.categories)) == 2
def test_initialization_different_encoders(self, cohere_encoder, openai_encoder):
route_layer_cohere = HybridRouteLayer(
encoder=cohere_encoder, sparse_encoder=sparse_encoder
)
assert route_layer_cohere.score_threshold == 0.3
route_layer_openai = HybridRouteLayer(
encoder=openai_encoder, sparse_encoder=sparse_encoder
)
assert route_layer_openai.score_threshold == 0.82
def test_add_route(self, openai_encoder):
route_layer = HybridRouteLayer(
encoder=openai_encoder, sparse_encoder=sparse_encoder
)
route = Route(name="Route 3", utterances=["Yes", "No"])
route_layer._add_routes([route])
assert route_layer.index is not None and route_layer.categories is not None
| assert len(route_layer.index) == 2 |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: baaivision/JudgeLM
# Path: judgelm/conversation.py
class SeparatorStyle(IntEnum):
"""Separator styles."""
ADD_COLON_SINGLE = auto()
ADD_COLON_TWO = auto()
ADD_COLON_SPACE_SINGLE = auto()
NO_COLON_SINGLE = auto()
NO_COLON_TWO = auto()
ADD_NEW_LINE_SINGLE = auto()
LLAMA2 = auto()
CHATGLM = auto()
CHATML = auto()
CHATINTERN = auto()
DOLLY = auto()
RWKV = auto()
PHOENIX = auto()
ROBIN = auto()
# Path: judgelm/constants.py
LOGDIR = os.getenv("LOGDIR", ".")
# Path: judgelm/constants.py
WORKER_API_TIMEOUT = int(os.getenv("JUDGELM_WORKER_API_TIMEOUT", 100))
# Path: judgelm/constants.py
class ErrorCode(IntEnum):
"""
https://platform.openai.com/docs/guides/error-codes/api-errors
"""
VALIDATION_TYPE_ERROR = 40001
INVALID_AUTH_KEY = 40101
INCORRECT_AUTH_KEY = 40102
NO_PERMISSION = 40103
INVALID_MODEL = 40301
PARAM_OUT_OF_RANGE = 40302
CONTEXT_OVERFLOW = 40303
RATE_LIMIT = 42901
QUOTA_EXCEEDED = 42902
ENGINE_OVERLOADED = 42903
INTERNAL_ERROR = 50001
CUDA_OUT_OF_MEMORY = 50002
GRADIO_REQUEST_ERROR = 50003
GRADIO_STREAM_UNKNOWN_ERROR = 50004
CONTROLLER_NO_WORKER = 50005
CONTROLLER_WORKER_TIMEOUT = 50006
# Path: judgelm/constants.py
MODERATION_MSG = "YOUR INPUT VIOLATES OUR CONTENT MODERATION GUIDELINES. PLEASE FIX YOUR INPUT AND TRY AGAIN."
# Path: judgelm/constants.py
CONVERSATION_LIMIT_MSG = "YOU HAVE REACHED THE CONVERSATION LENGTH LIMIT. PLEASE CLEAR HISTORY AND START A NEW CONVERSATION."
# Path: judgelm/constants.py
SERVER_ERROR_MSG = (
"**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
)
# Path: judgelm/constants.py
INACTIVE_MSG = "THIS SESSION HAS BEEN INACTIVE FOR TOO LONG. PLEASE REFRESH THIS PAGE."
# Path: judgelm/constants.py
INPUT_CHAR_LEN_LIMIT = int(os.getenv("JUDGELM_INPUT_CHAR_LEN_LIMIT", 2560))
# Path: judgelm/constants.py
CONVERSATION_TURN_LIMIT = 50
# Path: judgelm/constants.py
SESSION_EXPIRATION_TIME = 3600
# Path: judgelm/model/model_adapter.py
def get_conversation_template(model_path: str) -> Conversation:
"""Get the default conversation template."""
adapter = get_model_adapter(model_path)
return adapter.get_default_conv_template(model_path)
# Path: judgelm/model/model_registry.py
def register_model_info(
full_names: List[str], simple_name: str, link: str, description: str
):
def get_model_info(name: str) -> ModelInfo:
# Path: judgelm/serve/api_provider.py
def anthropic_api_stream_iter(model_name, prompt, temperature, top_p, max_new_tokens):
import anthropic
c = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
# Make requests
gen_params = {
"model": model_name,
"prompt": prompt,
"temperature": temperature,
"top_p": top_p,
}
logger.info(f"==== request ====\n{gen_params}")
res = c.completions.create(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_new_tokens,
temperature=temperature,
top_p=top_p,
model=model_name,
stream=True,
)
text = ""
for chunk in res:
text += chunk.completion
data = {
"text": text,
"error_code": 0,
}
yield data
# Path: judgelm/serve/api_provider.py
def openai_api_stream_iter(model_name, messages, temperature, top_p, max_new_tokens):
import openai
# Make requests
gen_params = {
"model": model_name,
"prompt": messages,
"temperature": temperature,
"top_p": top_p,
}
logger.info(f"==== request ====\n{gen_params}")
res = openai.ChatCompletion.create(
model=model_name, messages=messages, temperature=temperature, stream=True
)
text = ""
for chunk in res:
text += chunk["choices"][0]["delta"].get("content", "")
data = {
"text": text,
"error_code": 0,
}
yield data
# Path: judgelm/serve/api_provider.py
def palm_api_stream_iter(chat, message, temperature, top_p, max_new_tokens):
parameters = {
"temperature": temperature,
"top_p": top_p,
"max_output_tokens": max_new_tokens,
}
gen_params = {
"model": "palm-2",
"prompt": message,
}
gen_params.update(parameters)
logger.info(f"==== request ====\n{gen_params}")
response = chat.send_message(message, **parameters)
content = response.text
pos = 0
while pos < len(content):
# This is a fancy way to simulate token generation latency combined
# with a Poisson process.
pos += random.randint(10, 20)
time.sleep(random.expovariate(50))
data = {
"text": content[:pos],
"error_code": 0,
}
yield data
# Path: judgelm/serve/api_provider.py
def init_palm_chat(model_name):
import vertexai # pip3 install google-cloud-aiplatform
from vertexai.preview.language_models import ChatModel
project_id = os.environ["GCP_PROJECT_ID"]
location = "us-central1"
vertexai.init(project=project_id, location=location)
chat_model = ChatModel.from_pretrained(model_name)
chat = chat_model.start_chat(examples=[])
return chat
# Path: judgelm/utils.py
def extract_jsonl(file_path):
def save_jsonl(data_list, file_path):
def build_logger(logger_name, logger_filename):
def __init__(self, logger, log_level=logging.INFO):
def __getattr__(self, attr):
def write(self, buf):
def flush(self):
def disable_torch_init():
def get_gpu_memory(max_gpus=None):
def violates_moderation(text):
def clean_flant5_ckpt(ckpt_path):
def pretty_print_semaphore(semaphore):
def iter_over_async(
async_gen: AsyncGenerator, event_loop: AbstractEventLoop
) -> Generator:
async def get_next():
def detect_language(text: str) -> str:
def parse_gradio_auth_creds(filename: str):
def is_partial_stop(output: str, stop_str: str):
def _make_r_io_base(f, mode: str):
def jload(f, mode="r"):
def jlload(f, mode='r'):
def run_cmd(cmd: str):
def is_sentence_complete(output: str):
def get_context_length(config):
class StreamToLogger(object):
SEQUENCE_LENGTH_KEYS = [
"max_sequence_length",
"seq_length",
"max_position_embeddings",
"max_seq_len",
"model_max_length",
]
# Path: judgelm/serve/gradio_web_server_v2.py
import argparse
import datetime
import json
import os
import random
import time
import uuid
import gradio as gr
import requests
from collections import defaultdict
from judgelm.conversation import SeparatorStyle
from judgelm.constants import (
LOGDIR,
WORKER_API_TIMEOUT,
ErrorCode,
MODERATION_MSG,
CONVERSATION_LIMIT_MSG,
SERVER_ERROR_MSG,
INACTIVE_MSG,
INPUT_CHAR_LEN_LIMIT,
CONVERSATION_TURN_LIMIT,
SESSION_EXPIRATION_TIME,
)
from judgelm.model.model_adapter import get_conversation_template
from judgelm.model.model_registry import model_info
from judgelm.serve.api_provider import (
anthropic_api_stream_iter,
openai_api_stream_iter,
palm_api_stream_iter,
init_palm_chat,
)
from judgelm.utils import (
build_logger,
violates_moderation,
get_window_url_params_js,
parse_gradio_auth_creds,
)
repetition_penalty = 1.2
else:
repetition_penalty = 1.0
stream_iter = model_worker_stream_iter(
conv,
model_name,
worker_addr,
prompt,
temperature,
repetition_penalty,
top_p,
max_new_tokens,
)
conv.update_last_message("▌")
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 6
try:
for data in stream_iter:
if data["error_code"] == 0:
output = data["text"].strip()
if "vicuna" in model_name:
output = post_process_code(output)
conv.update_last_message(output + "▌")
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 6
else:
output = data["text"] + f"\n\n(error_code: {data['error_code']})"
conv.update_last_message(output)
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
disable_btn,
)
return
time.sleep(0.015)
except requests.exceptions.RequestException as e:
conv.update_last_message(
f"{SERVER_ERROR_MSG}\n\n"
f"(error_code: {ErrorCode.GRADIO_REQUEST_ERROR}, {e})"
)
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
disable_btn,
)
return
except Exception as e:
conv.update_last_message(
f"{SERVER_ERROR_MSG}\n\n"
f"(error_code: {ErrorCode.GRADIO_STREAM_UNKNOWN_ERROR}, {e})"
)
yield (state, state.to_gradio_chatbot()) + (
disable_btn,
disable_btn,
disable_btn,
enable_btn,
enable_btn,
disable_btn,
)
return
# Delete "▌"
conv.update_last_message(conv.messages[-1][-1][:-1])
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 6
finish_tstamp = time.time()
logger.info(f"{output}")
with open(get_conv_log_filename(), "a") as fout:
data = {
"tstamp": round(finish_tstamp, 4),
"type": "chat",
"model": model_name,
"gen_params": {
"temperature": temperature,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
},
"start": round(start_tstamp, 4),
"finish": round(finish_tstamp, 4),
"state": state.dict(),
"ip": request.client.host,
}
fout.write(json.dumps(data) + "\n")
def add_answer(answer_num):
return_list = []
for i in range(int(answer_num)+1):
return_list.append(gr.update(visible=True))
for i in range(int(answer_num)+1, 10):
return_list.append(gr.update(visible=False))
return (gr.update(visible=True), ) * int(answer_num) + (gr.update(visible=True, value=""), ) + ("", ) * (9-int(answer_num)) + (answer_num+1, enable_btn)
def reduce_answer(answer_num):
if int(answer_num) - 1 == 2:
return (gr.update(visible=True), ) * int(answer_num-1) + (gr.update(visible=False, value=""), ) + ("", ) * (10-int(answer_num)) + (answer_num-1, disable_btn)
else:
return (gr.update(visible=True), ) * int(answer_num-1) + (gr.update(visible=False, value=""), ) + ("", ) * (10-int(answer_num)) + (answer_num-1, enable_btn)
block_css = """
h1 {
text-align: center;
}
#notice_markdown {
font-size: 104%
}
#notice_markdown th {
display: none;
}
#notice_markdown td {
| padding-top: 6px; |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: EulerSearch/embedding_studio
# Path: embedding_studio/core/config.py
class Settings(BaseSettings):
API_V1_STR: str = "/api/v1"
SECRET_KEY: str = secrets.token_urlsafe(32)
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []
FINETUNING_MONGO_HOST: str = os.getenv("FINETUNING_MONGO_HOST", "mongo")
FINETUNING_MONGO_PORT: int = os.getenv("FINETUNING_MONGO_PORT", 27017)
FINETUNING_MONGO_DB_NAME: str = os.getenv(
"FINETUNING_MONGO_DB_NAME", "embedding_studio"
)
FINETUNING_MONGO_USERNAME: str = os.getenv(
"FINETUNING_MONGO_USERNAME", "root"
)
FINETUNING_MONGO_PASSWORD: str = os.getenv(
"FINETUNING_MONGO_PASSWORD", "mongopassword"
)
FINETUNING_MONGO_URL: str = (
f"mongodb://{FINETUNING_MONGO_USERNAME}:{FINETUNING_MONGO_PASSWORD}@"
f"{FINETUNING_MONGO_HOST}:{FINETUNING_MONGO_PORT}"
)
CLICKSTREAM_MONGO_HOST: str = os.getenv("CLICKSTREAM_MONGO_HOST", "mongo")
CLICKSTREAM_MONGO_PORT: int = os.getenv("CLICKSTREAM_MONGO_PORT", 27017)
CLICKSTREAM_MONGO_DB_NAME: str = os.getenv(
"CLICKSTREAM_MONGO_DB_NAME", "embedding_studio"
)
CLICKSTREAM_MONGO_USERNAME: str = os.getenv(
"CLICKSTREAM_MONGO_USERNAME", "root"
)
CLICKSTREAM_MONGO_PASSWORD: str = os.getenv(
"CLICKSTREAM_MONGO_PASSWORD", "mongopassword"
)
CLICKSTREAM_MONGO_URL: str = (
f"mongodb://{CLICKSTREAM_MONGO_USERNAME}:{CLICKSTREAM_MONGO_PASSWORD}@"
f"{CLICKSTREAM_MONGO_HOST}:{CLICKSTREAM_MONGO_PORT}"
)
REDIS_HOST: str = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT: int = os.getenv("REDIS_PORT", 6379)
REDIS_PASSWORD: str = os.getenv("REDIS_PASSWORD", "redispassword")
REDIS_URL: str = f"redis://{REDIS_HOST}:{REDIS_PORT}/0"
MINIO_HOST: str = os.getenv("MINIO_HOST", "localhost")
MINIO_PORT: int = os.getenv("MINIO_PORT", 9000)
MINIO_ROOT_USER: str = os.getenv("MINIO_ROOT_USER", "root")
MINIO_ROOT_PASSWORD: str = os.getenv(
"MINIO_ROOT_PASSWORD", "miniopassword"
)
MINIO_DEFAULT_BUCKETS: str = os.getenv(
"MINIO_DEFAULT_BUCKETS", "embeddingstudio"
)
MINIO_ACCESS_KEY: str = os.getenv(
"MINIO_ACCESS_KEY", "mtGNiEvoTL6C0EXAMPLE"
)
MINIO_SECRET_KEY: str = os.getenv(
"MINIO_SECRET_KEY", "HY5JserXAaWmphNyCpQPEXAMPLEKEYEXAMPLEKEY"
)
MYSQL_HOST: str = os.getenv("MYSQL_HOST", "localhost")
MYSQL_PORT: int = os.getenv("MYSQL_PORT", 3306)
MYSQL_DATABASE: str = os.getenv("MYSQL_DATABASE", "mlflow")
MYSQL_USER: str = os.getenv("MYSQL_USER", "mlflow_user")
MYSQL_PASSWORD: str = os.getenv("MYSQL_PASSWORD", "Baxp3O5rUvpIxiD77BfZ")
MYSQL_ROOT_PASSWORD: str = os.getenv(
"MYSQL_ROOT_PASSWORD", "PrK5qmPTDsm2IYKvHVG8"
)
MLFLOW_HOST: str = os.getenv("MLFLOW_HOST", "localhost")
MLFLOW_PORT: int = os.getenv("MLFLOW_PORT", 5001)
MLFLOW_TRACKING_URI: str = f"http://{MLFLOW_HOST}:{MLFLOW_PORT}"
ES_PLUGINS_PATH: str = os.getenv("ES_PLUGINS_PATH", "plugins")
FINE_TUNING_WORKER_MAX_RETRIES: int = os.getenv(
"FINE_TUNING_WORKER_MAX_RETRIES", 3
)
FINE_TUNING_WORKER_TIME_LIMIT: int = os.getenv(
"FINE_TUNING_WORKER_TIME_LIMIT", 18000000
)
DEFAULT_MAX_ATTEMPTS: int = os.getenv("DEFAULT_MAX_ATTEMPTS", 3)
DEFAULT_WAIT_TIME_SECONDS: float = os.getenv(
"DEFAULT_WAIT_TIME_SECONDS", 3.0
)
S3_READ_CREDENTIALS_ATTEMPTS: int = os.getenv(
"S3_READ_CREDENTIALS_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
S3_READ_WAIT_TIME_SECONDS: float = os.getenv(
"S3_READ_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
S3_DOWNLOAD_DATA_ATTEMPTS: int = os.getenv(
"S3_DOWNLOAD_DATA_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS: float = os.getenv(
"S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_LOG_METRIC_ATTEMPTS: int = os.getenv(
"MLFLOW_LOG_METRIC_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_LOG_PARAM_ATTEMPTS: int = os.getenv(
"MLFLOW_LOG_PARAM_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_LOG_MODEL_ATTEMPTS: int = os.getenv(
"MLFLOW_LOG_MODEL_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_LOAD_MODEL_ATTEMPTS: int = os.getenv(
"MLFLOW_LOAD_MODEL_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_DELETE_MODEL_ATTEMPTS: int = os.getenv(
"MLFLOW_DELETE_MODEL_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_SEARCH_RUNS_ATTEMPTS: int = os.getenv(
"MLFLOW_SEARCH_RUNS_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_END_RUN_ATTEMPTS: int = os.getenv(
"MLFLOW_END_RUN_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_END_RUN_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_END_RUN_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_GET_RUN_ATTEMPTS: int = os.getenv(
"MLFLOW_GET_RUN_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_GET_RUN_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_GET_RUN_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS: int = os.getenv(
"MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS",
DEFAULT_WAIT_TIME_SECONDS,
)
MLFLOW_DELETE_EXPERIMENT_ATTEMPTS: int = os.getenv(
"MLFLOW_DELETE_EXPERIMENT_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_CREATE_EXPERIMENT_ATTEMPTS: int = os.getenv(
"MLFLOW_CREATE_EXPERIMENT_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
MLFLOW_GET_EXPERIMENT_ATTEMPTS: int = os.getenv(
"MLFLOW_GET_EXPERIMENT_ATTEMPTS", DEFAULT_MAX_ATTEMPTS
)
MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(
"MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS", DEFAULT_WAIT_TIME_SECONDS
)
CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC: int = os.getenv(
"CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC", 12 * 60 * 60
)
CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC: int = os.getenv(
"CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC", 5 * 60
)
# Path: embedding_studio/embeddings/models/interface.py
class EmbeddingsModelInterface(pl.LightningModule):
def __init__(self, same_query_and_items: bool = False):
"""In search we have two entities, which could be multi domain: query and search result (item).
This is the interface we used in fine-tuning procedure.
:param same_query_and_items: are query and items models acutally the same model (default: False)
"""
super(EmbeddingsModelInterface, self).__init__()
self.same_query_and_items = same_query_and_items
@abstractmethod
def get_query_model_params(self) -> Iterator[Parameter]:
pass
@abstractmethod
def get_items_model_params(self) -> Iterator[Parameter]:
pass
@abstractmethod
def fix_query_model(self, num_fixed_layers: int):
"""One of fine-tuning hyperparams is num of fixed layers at a query model
:param num_fixed_layers: how many layers to fix
"""
@abstractmethod
def unfix_query_model(self):
"""Unfix all layers of a query model."""
@abstractmethod
def fix_item_model(self, num_fixed_layers: int):
"""One of fine-tuning hyperparams is num of fixed layers at an item model
:param num_fixed_layers: how many layers to fix
"""
@abstractmethod
def unfix_item_model(self):
"""Unfix all layers of an item model."""
@abstractmethod
def forward_query(self, query: Any) -> FloatTensor:
pass
@abstractmethod
def forward_items(self, items: List[Any]) -> FloatTensor:
pass
# Path: embedding_studio/utils/mlflow_utils.py
def get_experiment_id_by_name(experiment_name: str) -> str:
"""
Given an experiment name, this function returns the experiment ID.
"""
experiment = mlflow.get_experiment_by_name(experiment_name)
return experiment.experiment_id if experiment else None
# Path: embedding_studio/utils/mlflow_utils.py
def get_run_id_by_name(experiment_id: str, run_name: str) -> str:
"""
Given an experiment ID and run name, this function returns the run ID
associated with that run name within the specified experiment.
"""
runs = mlflow.search_runs(experiment_ids=[experiment_id])
if runs.shape[0] == 0:
return None
# Filter runs by the given run name
matching_runs = runs[runs.get("tags.mlflow.runName") == run_name]
# Return run_id if found, else return None
return matching_runs.iloc[0]["run_id"] if not matching_runs.empty else None
# Path: embedding_studio/workers/fine_tuning/experiments/finetuning_iteration.py
EXPERIMENT_PREFIX = "iteration"
# Path: embedding_studio/workers/fine_tuning/experiments/finetuning_iteration.py
class FineTuningIteration(BaseModel):
"""Fine tuning iteration.
:param batch_id: session batch id
:param plugin_name: name of tuned embedding (default: "")
"""
batch_id: str = ""
plugin_name: str = ""
class Config:
arbitrary_types_allowed = True
def __str__(self) -> str:
return (
f"{EXPERIMENT_PREFIX} / {self.plugin_name} / " + f"{self.batch_id}"
)
# Path: embedding_studio/workers/fine_tuning/experiments/finetuning_params.py
class FineTuningParams(BaseModel):
"""Params of fine-tuning procedure
:param num_fixed_layers: number of fixed embeddings layers
:param query_lr: learning rate of query model optimizer
:param items_lr: learning rate of items model optimizer
:param query_weight_decay: weight decay of query model optimizer
:param items_weight_decay: weight decay of items model optimizer
:param margin: margin from MarginRankingLoss
:param not_irrelevant_only: use only not irrelevant sessions
:param negative_downsampling: ratio of negative samples to be used
:param min_abs_difference_threshold: filter out soft pairs abs(neg_dist - pos_dist) < small value (default: 0.0)
:param max_abs_difference_threshold: filter out hard pairs abs(neg_dist - pos_dist) > huge value (default: 1.0)
:param examples_order: order of passing examples to a trainer (default: None)
"""
num_fixed_layers: int
query_lr: float
items_lr: float
query_weight_decay: float
items_weight_decay: float
margin: float
not_irrelevant_only: bool
negative_downsampling: float
min_abs_difference_threshold: float = 0.0
max_abs_difference_threshold: float = 1.0
examples_order: List[ExamplesType] = [ExamplesType.all_examples]
class Config:
arbitrary_types_allowed = True
@validator("examples_order", pre=True, always=True)
def validate_examples_order(cls, value):
if isinstance(value, str):
value = list(map(int, value.split(",")))
elif isinstance(value, tuple):
value = list(value)
return [ExamplesType(v) for v in value]
@validator("items_lr", "query_lr", pre=True, always=True)
def validate_positive_float(cls, value):
if not (isinstance(value, float) and value > 0):
raise ValueError(f"{value} must be a positive float")
return value
@validator(
"items_weight_decay", "query_weight_decay", pre=True, always=True
)
def validate_non_negative_float(cls, value):
if not (isinstance(value, float) and value >= 0):
raise ValueError(f"{value} must be a non-negative float")
return value
@validator("margin", pre=True, always=True)
def validate_non_negative_float_margin(cls, value):
if not (isinstance(value, float) and value >= 0):
raise ValueError(f"{value} must be a non-negative float")
return value
@validator("num_fixed_layers", pre=True, always=True)
def validate_non_negative_int(cls, value):
if not (isinstance(value, int) and value >= 0):
raise ValueError(f"{value} must be a non-negative integer")
return value
@root_validator(skip_on_failure=True)
def validate_example_order(cls, values):
examples_order = values.get("examples_order")
if examples_order:
if isinstance(examples_order, str):
examples_order = list(map(int, examples_order.split(",")))
elif isinstance(examples_order, tuple):
examples_order = list(examples_order)
values["examples_order"] = [
ExamplesType(v) for v in examples_order
]
return values
@property
def id(self) -> str:
# Convert the value to bytes (assuming it's a string)
value_bytes: bytes = str(self).encode("utf-8")
# Create a hash object
hash_object = hashlib.sha256()
# Update the hash object with the value
hash_object.update(value_bytes)
# Get the hexadecimal representation of the hash
unique_id: str = hash_object.hexdigest()
return unique_id
def __str__(self) -> str:
vals: List[str] = []
for key, value in sorted(dict(self).items()):
value = (
",".join(map(str, value)) if isinstance(value, list) else value
)
vals.append(f"{key}: {value}")
return " / ".join(vals)
# Path: embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py
class MetricsAccumulator:
def __init__(
self,
name: str,
calc_mean: bool = False,
calc_sliding: bool = False,
calc_min: bool = False,
calc_max: bool = False,
window_size: int = 10,
):
"""Accumulator of metric values + calculator of aggregations like mean, max, min, sliding_mean.
:param name: metric name (metrics with other name will be ignored)
:param calc_mean: should accumulator calculate mean value (default: False)
:param calc_sliding: should accumulator calculate sliding mean value (default: False)
:param calc_min: should accumulator calculate min value (default: False)
:param calc_max: should accumulator calculate max value (default: False)
:param window_size: size of sliding window (default: 10)
"""
if not isinstance(name, str) or len(name) == 0:
raise ValueError("MetricsAccumulator's name should not be empty")
self._name = name
if not isinstance(calc_mean, bool):
raise ValueError("calc_mean value should be bool")
self._calc_mean = calc_mean
if not isinstance(calc_sliding, bool):
raise ValueError("calc_sliding value should be bool")
self._calc_sliding = calc_sliding
if not isinstance(calc_min, bool):
raise ValueError("calc_min value should be bool")
self._calc_min = calc_min
if not isinstance(calc_max, bool):
raise ValueError("calc_max value should be bool")
self._calc_max = calc_max
if not isinstance(window_size, int) or window_size <= 1:
raise ValueError(
"window_size value should be integer with value more than 1"
)
self._window_size = window_size
self._values = []
@property
def name(self) -> str:
return self._name
def clear(self):
"""Clear accumulator"""
self._values = []
def accumulate(self, value: MetricValue) -> List[Tuple[str, float]]:
"""Add metric value to an accumulator.
:param value: metric to be accumulated
:return: aggregations
"""
if self.name == value.name:
self._values.append(value.value)
return self.aggregate()
return []
def aggregate(self) -> List[Tuple[str, float]]:
"""Aggregate accumulated metrics
:return: metric aggregations (last, mean, sliding, min, max)
"""
aggregations: List[Tuple[str, float]] = []
if len(self._values) > 0:
aggregations.append((self.name, self._values[-1]))
if self._calc_mean:
aggregations.append(
(f"mean_{self.name}", float(np.mean(self._values)))
)
if self._calc_sliding:
slide_value = float(
np.mean(self._values)
if len(self._values) < self._window_size
else np.mean(self._values[-self._window_size :])
)
aggregations.append((f"sliding_{self.name}", slide_value))
if self._calc_min:
aggregations.append((f"min_{self.name}", np.min(self._values)))
if self._calc_max:
aggregations.append((f"max_{self.name}", np.max(self._values)))
return aggregations
# Path: embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py
class MetricValue:
def __init__(self, name: str, value: float):
if not isinstance(name, str) or len(name) == 0:
raise ValueError("MetricValue's name should not be empty")
self._name = name
if not isinstance(value, float):
raise ValueError("MetricValue's value should not be numeric")
self._value = value
@property
def name(self) -> str:
return self._name
@property
def value(self) -> float:
return self._value
def add_prefix(self, prefix: str):
self._name = f"{prefix}_{self._name}"
return self
# Path: embedding_studio/workers/fine_tuning/utils/config.py
class RetryConfig(BaseModel):
default_params: RetryParams = RetryParams()
_specific_retries: Dict[str, RetryParams] = dict()
def __getitem__(self, item: str) -> RetryParams:
return self._specific_retries.get(item, self.default_params)
def __setitem__(self, key: str, value: RetryParams):
self._specific_retries[key] = value
# Path: embedding_studio/workers/fine_tuning/utils/config.py
class RetryParams(BaseModel):
max_attempts: int = 3
wait_time_seconds: int = 3
# Path: embedding_studio/workers/fine_tuning/utils/exceptions.py
class MaxAttemptsReachedException(Exception):
def __init__(self, attempts: int):
super(MaxAttemptsReachedException, self).__init__(
f"Reached maximum number of attempts: {attempts}"
)
# Path: embedding_studio/workers/fine_tuning/utils/retry.py
def retry_method(name: str = None):
def decorator(func):
"""Decorator to run provided class method with attempts"""
def wrapper(self, *args, **kwargs):
func_name = name if name else func.__name__
retry_params = self.retry_config[func_name]
if (
retry_params.max_attempts is None
or retry_params.max_attempts <= 1
):
return func(self, *args, **kwargs)
attempts = 0
exception = None
while attempts < retry_params.max_attempts:
try:
result = func(self, *args, **kwargs)
# If the function succeeds, return the result
return result
except RequestException as e:
if (
hasattr(e, "response")
and e.response is not None
and 500 <= e.response.status_code < 600
):
logger.error(
f"Server Error (5xx): {e.response.status_code}"
)
# Handle server error appropriately, e.g., retry, log, or raise a custom exception
exception = e
else:
logger.exception(f"Request Exception: {e}")
raise e
except Timeout as e:
logger.error(f"Timeout: {e}")
exception = e
except ConnectionError as e:
logger.error(f"Connection error: {e}")
exception = e
except Exception as e: # Handle other request exceptions
if (
hasattr(self, "attempt_exception_types")
and type(e) in self.attempt_exception_types
) or (
hasattr(self, "is_retryable_error")
and self.is_retryable_error(e)
):
logger.error(
f"Catch exception with type {type(e).__name__} that leads to new attempt"
)
exception = e
else:
raise
if exception is not None:
logger.info(
f"Attempt {attempts + 1} failed with error: {exception}"
)
attempts += 1
time.sleep(retry_params.wait_time_seconds)
raise MaxAttemptsReachedException(
retry_params.max_attempts
) from exception
return wrapper
return decorator
# Path: embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py
import logging
import os
import subprocess
import urllib.parse
import mlflow
import numpy as np
import pandas as pd
from socket import setdefaulttimeout
from typing import Dict, List, Optional, Tuple
from mlflow.entities import Experiment
from mlflow.exceptions import MlflowException, RestException
from embedding_studio.core.config import settings
from embedding_studio.embeddings.models.interface import (
EmbeddingsModelInterface,
)
from embedding_studio.utils.mlflow_utils import (
get_experiment_id_by_name,
get_run_id_by_name,
)
from embedding_studio.workers.fine_tuning.experiments.finetuning_iteration import (
EXPERIMENT_PREFIX,
FineTuningIteration,
)
from embedding_studio.workers.fine_tuning.experiments.finetuning_params import (
FineTuningParams,
)
from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import (
MetricsAccumulator,
MetricValue,
)
from embedding_studio.workers.fine_tuning.utils.config import (
RetryConfig,
RetryParams,
)
from embedding_studio.workers.fine_tuning.utils.exceptions import (
MaxAttemptsReachedException,
)
from embedding_studio.workers.fine_tuning.utils.retry import retry_method
INITIAL_EXPERIMENT_NAME: str = f"{EXPERIMENT_PREFIX} / initial"
INITIAL_RUN_NAME: str = "initial_model"
DEFAULT_TIMEOUT: int = 120000
MODEL_ARTIFACT_PATH = "model/data/model.pth"
# MLFlow upload models using urllib3, if model is heavy enough provided default timeout is not enough
# That's why increase it here. TODO: check from time to time whether this issue is resolved by MLFlow
setdefaulttimeout(DEFAULT_TIMEOUT)
logger = logging.getLogger(__name__)
def _get_base_requirements():
try:
logger.info("Generate requirements with poetry")
# Run the poetry export command
result = subprocess.run(
[
"poetry",
"export",
f"--directory={os.path.dirname(__file__)}",
"--with",
"ml",
"-f",
"requirements.txt",
"--without-hashes",
],
capture_output=True,
text=True,
check=True,
)
# Get the requirements from the standard output
requirements = result.stdout.strip().split("\n")
return requirements
except subprocess.CalledProcessError as e:
| print(f"Error running poetry export: {e}") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: innnky/ar-vits
# Path: module/commons.py
def init_weights(m, mean=0.0, std=0.01):
def get_padding(kernel_size, dilation=1):
def convert_pad_shape(pad_shape):
def intersperse(lst, item):
def kl_divergence(m_p, logs_p, m_q, logs_q):
def rand_gumbel(shape):
def rand_gumbel_like(x):
def slice_segments(x, ids_str, segment_size=4):
def rand_slice_segments(x, x_lengths=None, segment_size=4):
def get_timing_signal_1d(
length, channels, min_timescale=1.0, max_timescale=1.0e4):
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
def subsequent_mask(length):
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
def convert_pad_shape(pad_shape):
def shift_1d(x):
def sequence_mask(length, max_length=None):
def generate_path(duration, mask):
def clip_grad_value_(parameters, clip_value, norm_type=2):
def squeeze(x, x_mask=None, n_sqz=2):
def unsqueeze(x, x_mask=None, n_sqz=2):
# Path: module/modules.py
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
class ConvReluNorm(nn.Module):
class DDSConv(nn.Module):
class WN(torch.nn.Module):
class ResBlock1(torch.nn.Module):
class ResBlock2(torch.nn.Module):
class Log(nn.Module):
class Flip(nn.Module):
class ElementwiseAffine(nn.Module):
class ResidualCouplingLayer(nn.Module):
class ConvFlow(nn.Module):
class LinearNorm(nn.Module):
class Mish(nn.Module):
class Conv1dGLU(nn.Module):
class ConvNorm(nn.Module):
class MultiHeadAttention(nn.Module):
class ScaledDotProductAttention(nn.Module):
class MelStyleEncoder(nn.Module):
class MelStyleEncoderVAE(nn.Module):
class ActNorm(nn.Module):
class InvConvNear(nn.Module):
def __init__(self, channels, eps=1e-5):
def forward(self, x):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
def forward(self, x, x_mask):
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
def forward(self, x, x_mask, g=None):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
def forward(self, x, x_mask, g=None, **kwargs):
def remove_weight_norm(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
def forward(self, x, x_mask=None):
def remove_weight_norm(self):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
def forward(self, x, x_mask=None):
def remove_weight_norm(self):
def forward(self, x, x_mask, reverse=False, **kwargs):
def forward(self, x, *args, reverse=False, **kwargs):
def __init__(self, channels):
def forward(self, x, x_mask, reverse=False, **kwargs):
def __init__(self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False):
def forward(self, x, x_mask, g=None, reverse=False):
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
def forward(self, x, x_mask, g=None, reverse=False):
def __init__(self,
in_channels,
out_channels,
bias=True,
spectral_norm=False,
):
def forward(self, input):
def __init__(self):
def forward(self, x):
def __init__(self, in_channels, out_channels, kernel_size, dropout):
def forward(self, x):
def __init__(self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
spectral_norm=False,
):
def forward(self, input):
def __init__(self, n_head, d_model, d_k, d_v, dropout=0., spectral_norm=False):
def forward(self, x, mask=None):
def __init__(self, temperature, dropout):
def forward(self, q, k, v, mask=None):
def __init__(self, n_mel_channels=80,
style_hidden=128,
style_vector_dim=256,
style_kernel_size=5,
style_head=2,
dropout=0.1):
def temporal_avg_pool(self, x, mask=None):
def forward(self, x, mask=None):
def __init__(self, spec_channels, z_latent_dim, emb_dim):
def reparameterize(self, mu, logvar):
def forward(self, inputs, mask=None):
def infer(self, inputs=None, random_sample=False, manual_latent=None):
def __init__(self, channels, ddi=False, **kwargs):
def forward(self, x, x_mask=None, g=None, reverse=False, **kwargs):
def store_inverse(self):
def set_ddi(self, ddi):
def initialize(self, x, x_mask):
def __init__(self, channels, n_split=4, no_jacobian=False, **kwargs):
def forward(self, x, x_mask=None, g=None, reverse=False, **kwargs):
def store_inverse(self):
# Path: module/attentions.py
class Encoder(nn.Module):
class Decoder(nn.Module):
class MultiHeadAttention(nn.Module):
class FFN(nn.Module):
class Depthwise_Separable_Conv1D(nn.Module):
class Depthwise_Separable_TransposeConv1D(nn.Module):
class FFT(nn.Module):
class TransformerCouplingLayer(nn.Module):
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4,isflow=False, **kwargs):
def forward(self, x, x_mask, g=None):
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
def forward(self, x, x_mask, h, h_mask):
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
def forward(self, x, c, attn_mask=None):
def attention(self, query, key, value, mask=None):
def _matmul_with_relative_values(self, x, y):
def _matmul_with_relative_keys(self, x, y):
def _get_relative_embeddings(self, relative_embeddings, length):
def _relative_position_to_absolute_position(self, x):
def _absolute_position_to_relative_position(self, x):
def _attention_bias_proximal(self, length):
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
def forward(self, x, x_mask):
def _causal_padding(self, x):
def _same_padding(self, x):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias=True,
padding_mode='zeros', # TODO: refine this type
device=None,
dtype=None
):
def forward(self, input):
def weight_norm(self):
def remove_weight_norm(self):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
bias=True,
dilation=1,
padding_mode='zeros', # TODO: refine this type
device=None,
dtype=None
):
def forward(self, input):
def weight_norm(self):
def remove_weight_norm(self):
def weight_norm_modules(module, name='weight', dim=0):
def remove_weight_norm_modules(module, name='weight'):
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0.,
proximal_bias=False, proximal_init=True, isflow = False, **kwargs):
def forward(self, x, x_mask, g = None):
def __init__(self,
channels,
hidden_channels,
kernel_size,
n_layers,
n_heads,
p_dropout=0,
filter_channels=0,
mean_only=False,
wn_sharing_parameter=None,
gin_channels = 0
):
def forward(self, x, x_mask, g=None, reverse=False):
# Path: module/commons.py
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
# Path: module/commons.py
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
# Path: module/mrte_model.py
class MRTE(nn.Module):
def __init__(self,
content_enc_channels=192,
hidden_size=512,
out_channels=192,
kernel_size=5,
n_heads=4,
ge_layer = 2
):
super(MRTE, self).__init__()
self.cross_attention = MultiHeadAttention(hidden_size,hidden_size,n_heads)
self.c_pre = nn.Conv1d(content_enc_channels,hidden_size, 1)
self.text_pre = nn.Conv1d(content_enc_channels,hidden_size, 1)
self.c_post = nn.Conv1d(hidden_size,out_channels, 1)
def forward(self, ssl_enc, ssl_mask, text, text_mask, ge, test=None):
attn_mask = text_mask.unsqueeze(2) * ssl_mask.unsqueeze(-1)
ssl_enc = self.c_pre(ssl_enc * ssl_mask)
text_enc = self.text_pre(text * text_mask)
if test != None:
if test == 0:
x = self.cross_attention(ssl_enc * ssl_mask, text_enc * text_mask, attn_mask) + ssl_enc + ge
elif test == 1:
x = ssl_enc + ge
elif test ==2:
x = self.cross_attention(ssl_enc*0 * ssl_mask, text_enc * text_mask, attn_mask) + ge
else:
raise ValueError("test should be 0,1,2")
else:
x = self.cross_attention(ssl_enc * ssl_mask, text_enc * text_mask, attn_mask) + ssl_enc + ge
x = self.c_post(x * ssl_mask)
return x
# Path: module/quantize.py
class ResidualVectorQuantizer(nn.Module):
"""Residual Vector Quantizer.
Args:
dimension (int): Dimension of the codebooks.
n_q (int): Number of residual vector quantizers used.
bins (int): Codebook size.
decay (float): Decay for exponential moving average over the codebooks.
kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
kmeans_iters (int): Number of iterations used for kmeans initialization.
threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
that have an exponential moving average cluster size less than the specified threshold with
randomly selected vector from the current batch.
"""
def __init__(
self,
dimension: int = 256,
n_q: int = 8,
bins: int = 1024,
decay: float = 0.99,
kmeans_init: bool = True,
kmeans_iters: int = 50,
threshold_ema_dead_code: int = 2,
):
super().__init__()
self.n_q = n_q
self.dimension = dimension
self.bins = bins
self.decay = decay
self.kmeans_init = kmeans_init
self.kmeans_iters = kmeans_iters
self.threshold_ema_dead_code = threshold_ema_dead_code
self.vq = ResidualVectorQuantization(
dim=self.dimension,
codebook_size=self.bins,
num_quantizers=self.n_q,
decay=self.decay,
kmeans_init=self.kmeans_init,
kmeans_iters=self.kmeans_iters,
threshold_ema_dead_code=self.threshold_ema_dead_code,
)
def forward(self, x: torch.Tensor, n_q: tp.Optional[int] = None, layers: tp.Optional[list] = None) -> QuantizedResult:
"""Residual vector quantization on the given input tensor.
Args:
x (torch.Tensor): Input tensor.
n_q (int): Number of quantizer used to quantize. Default: All quantizers.
layers (list): Layer that need to return quantized. Defalt: None.
Returns:
QuantizedResult:
The quantized (or approximately quantized) representation with
the associated numbert quantizers and layer quantized required to return.
"""
n_q = n_q if n_q else self.n_q
if layers and max(layers) >= n_q:
raise ValueError(f'Last layer index in layers: A {max(layers)}. Number of quantizers in RVQ: B {self.n_q}. A must less than B.')
quantized, codes, commit_loss, quantized_list = self.vq(x, n_q=n_q, layers=layers)
return quantized, codes, torch.mean(commit_loss), quantized_list
def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None, st: tp.Optional[int] = None) -> torch.Tensor:
"""Encode a given input tensor with the specified sample rate at the given bandwidth.
The RVQ encode method sets the appropriate number of quantizer to use
and returns indices for each quantizer.
Args:
x (torch.Tensor): Input tensor.
n_q (int): Number of quantizer used to quantize. Default: All quantizers.
st (int): Start to encode input from which layers. Default: 0.
"""
n_q = n_q if n_q else self.n_q
st = st or 0
codes = self.vq.encode(x, n_q=n_q, st=st)
return codes
def decode(self, codes: torch.Tensor, st: int = 0) -> torch.Tensor:
"""Decode the given codes to the quantized representation.
Args:
codes (torch.Tensor): Input indices for each quantizer.
st (int): Start to decode input codes from which layers. Default: 0.
"""
quantized = self.vq.decode(codes, st=st)
return quantized
# Path: text/symbols.py
# Path: module/models.py
import copy
import math
import torch
from torch import nn
from torch.nn import functional as F
from module import commons
from module import modules
from module import attentions
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from module.commons import init_weights, get_padding
from module.mrte_model import MRTE
from module.quantize import ResidualVectorQuantizer
from text import symbols
from torch.cuda.amp import autocast
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.norm = modules.LayerNorm(out_channels)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
out = self.proj(x) * x_mask
out = self.norm(out)
return out
class Generator(torch.nn.Module):
def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates,
upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)),
k, u, padding=(k - u) // 2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
self.ups.apply(init_weights)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
def forward(self, x, g=None):
x = self.conv_pre(x)
if g is not None:
x = x + self.cond(g)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, modules.LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
self.use_spectral_norm = use_spectral_norm
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, modules.LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
| class DiscriminatorS(torch.nn.Module): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nv-tlabs/vid2player3d
# Path: vid2player/utils/konia_transform.py
@torch.jit.script
def quaternion_to_angle_axis(
quaternion: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ
) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
The quaternion should be in (x, y, z, w) or (w, x, y, z) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion: tensor with quaternions.
order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.
Return:
tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3
"""
if not quaternion.shape[-1] == 4:
raise ValueError(f"Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}")
if not torch.jit.is_scripting():
if order.name not in QuaternionCoeffOrder.__members__.keys():
raise ValueError(f"order must be one of {QuaternionCoeffOrder.__members__.keys()}")
if order == QuaternionCoeffOrder.XYZW:
warnings.warn(
"`XYZW` quaternion coefficient order is deprecated and"
" will be removed after > 0.6. "
"Please use `QuaternionCoeffOrder.WXYZ` instead."
)
# unpack input and compute conversion
q1: torch.Tensor = torch.tensor([])
q2: torch.Tensor = torch.tensor([])
q3: torch.Tensor = torch.tensor([])
cos_theta: torch.Tensor = torch.tensor([])
if order == QuaternionCoeffOrder.XYZW:
q1 = quaternion[..., 0]
q2 = quaternion[..., 1]
q3 = quaternion[..., 2]
cos_theta = quaternion[..., 3]
else:
cos_theta = quaternion[..., 0]
q1 = quaternion[..., 1]
q2 = quaternion[..., 2]
q3 = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt((sin_squared_theta).clamp_min(eps))
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0, torch_safe_atan2(-sin_theta, -cos_theta), torch_safe_atan2(sin_theta, cos_theta)
)
k_pos: torch.Tensor = safe_zero_division(two_theta, sin_theta, eps)
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
# Path: vid2player/utils/konia_transform.py
@torch.jit.script
def angle_axis_to_quaternion(
angle_axis: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ
) -> torch.Tensor:
r"""Convert an angle axis to a quaternion.
The quaternion vector has components in (x, y, z, w) or (w, x, y, z) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis: tensor with angle axis.
order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.
Return:
tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 3) # Nx3
>>> quaternion = angle_axis_to_quaternion(angle_axis, order=QuaternionCoeffOrder.WXYZ) # Nx4
"""
if not angle_axis.shape[-1] == 3:
raise ValueError(f"Input must be a tensor of shape Nx3 or 3. Got {angle_axis.shape}")
if not torch.jit.is_scripting():
if order.name not in QuaternionCoeffOrder.__members__.keys():
raise ValueError(f"order must be one of {QuaternionCoeffOrder.__members__.keys()}")
if order == QuaternionCoeffOrder.XYZW:
warnings.warn(
"`XYZW` quaternion coefficient order is deprecated and"
" will be removed after > 0.6. "
"Please use `QuaternionCoeffOrder.WXYZ` instead."
)
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt((theta_squared).clamp_min(eps))
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = safe_zero_division(torch.sin(half_theta), theta, eps)
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros(
size=angle_axis.shape[:-1] + (4,), dtype=angle_axis.dtype, device=angle_axis.device
)
if order == QuaternionCoeffOrder.XYZW:
quaternion[..., 0:1] = a0 * k
quaternion[..., 1:2] = a1 * k
quaternion[..., 2:3] = a2 * k
quaternion[..., 3:4] = w
else:
quaternion[..., 1:2] = a0 * k
quaternion[..., 2:3] = a1 * k
quaternion[..., 3:4] = a2 * k
quaternion[..., 0:1] = w
return quaternion
# Path: vid2player/utils/konia_transform.py
@torch.jit.script
def quaternion_to_rotation_matrix(
quaternion: torch.Tensor, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ
) -> torch.Tensor:
r"""Converts a quaternion to a rotation matrix.
The quaternion should be in (x, y, z, w) or (w, x, y, z) format.
Args:
quaternion: a tensor containing a quaternion to be converted.
The tensor can be of shape :math:`(*, 4)`.
order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.
Return:
the rotation matrix of shape :math:`(*, 3, 3)`.
Example:
>>> quaternion = torch.tensor((0., 0., 0., 1.))
>>> quaternion_to_rotation_matrix(quaternion, order=QuaternionCoeffOrder.WXYZ)
tensor([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(quaternion)}")
if not quaternion.shape[-1] == 4:
raise ValueError(f"Input must be a tensor of shape (*, 4). Got {quaternion.shape}")
if not torch.jit.is_scripting():
if order.name not in QuaternionCoeffOrder.__members__.keys():
raise ValueError(f"order must be one of {QuaternionCoeffOrder.__members__.keys()}")
if order == QuaternionCoeffOrder.XYZW:
warnings.warn(
"`XYZW` quaternion coefficient order is deprecated and"
" will be removed after > 0.6. "
"Please use `QuaternionCoeffOrder.WXYZ` instead."
)
# normalize the input quaternion
quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)
# unpack the normalized quaternion components
if order == QuaternionCoeffOrder.XYZW:
x, y, z, w = quaternion_norm[..., 0], quaternion_norm[..., 1], quaternion_norm[..., 2], quaternion_norm[..., 3]
else:
w, x, y, z = quaternion_norm[..., 0], quaternion_norm[..., 1], quaternion_norm[..., 2], quaternion_norm[..., 3]
# compute the actual conversion
tx: torch.Tensor = 2.0 * x
ty: torch.Tensor = 2.0 * y
tz: torch.Tensor = 2.0 * z
twx: torch.Tensor = tx * w
twy: torch.Tensor = ty * w
twz: torch.Tensor = tz * w
txx: torch.Tensor = tx * x
txy: torch.Tensor = ty * x
txz: torch.Tensor = tz * x
tyy: torch.Tensor = ty * y
tyz: torch.Tensor = tz * y
tzz: torch.Tensor = tz * z
one: torch.Tensor = torch.tensor(1.0)
matrix: torch.Tensor = torch.stack(
(
one - (tyy + tzz),
txy - twz,
txz + twy,
txy + twz,
one - (txx + tzz),
tyz - twx,
txz - twy,
tyz + twx,
one - (txx + tyy),
),
dim=-1,
).view(quaternion.shape[:-1] + (3, 3))
# if len(quaternion.shape) == 1:
# matrix = torch.squeeze(matrix, dim=0)
return matrix
# Path: vid2player/utils/konia_transform.py
@torch.jit.script
def rotation_matrix_to_quaternion(
rotation_matrix: torch.Tensor, eps: float = 1.0e-6, order: QuaternionCoeffOrder = QuaternionCoeffOrder.WXYZ
) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to 4d quaternion vector.
The quaternion vector has components in (w, x, y, z) or (x, y, z, w) format.
.. note::
The (x, y, z, w) order is going to be deprecated in favor of efficiency.
Args:
rotation_matrix: the rotation matrix to convert.
eps: small value to avoid zero division.
order: quaternion coefficient order. Note: 'xyzw' will be deprecated in favor of 'wxyz'.
Return:
the rotation in quaternion.
Shape:
- Input: :math:`(*, 3, 3)`
- Output: :math:`(*, 4)`
Example:
>>> input = torch.rand(4, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_quaternion(input, eps=torch.finfo(input.dtype).eps,
... order=QuaternionCoeffOrder.WXYZ) # Nx4
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(rotation_matrix)}")
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}")
if not torch.jit.is_scripting():
if order.name not in QuaternionCoeffOrder.__members__.keys():
raise ValueError(f"order must be one of {QuaternionCoeffOrder.__members__.keys()}")
if order == QuaternionCoeffOrder.XYZW:
warnings.warn(
"`XYZW` quaternion coefficient order is deprecated and"
" will be removed after > 0.6. "
"Please use `QuaternionCoeffOrder.WXYZ` instead."
)
m00, m01, m02 = rotation_matrix[..., 0, 0], rotation_matrix[..., 0, 1], rotation_matrix[..., 0, 2]
m10, m11, m12 = rotation_matrix[..., 1, 0], rotation_matrix[..., 1, 1], rotation_matrix[..., 1, 2]
m20, m21, m22 = rotation_matrix[..., 2, 0], rotation_matrix[..., 2, 1], rotation_matrix[..., 2, 2]
trace: torch.Tensor = m00 + m11 + m22
sq = torch.sqrt((trace + 1.0).clamp_min(eps)) * 2.0 # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_zero_division(m21 - m12, sq)
qy = safe_zero_division(m02 - m20, sq)
qz = safe_zero_division(m10 - m01, sq)
if order == QuaternionCoeffOrder.XYZW:
trace_positive_cond = torch.stack((qx, qy, qz, qw), dim=-1)
trace_positive_cond = torch.stack((qw, qx, qy, qz), dim=-1)
sq = torch.sqrt((1.0 + m00 - m11 - m22).clamp_min(eps)) * 2.0 # sq = 4 * qx.
qw = safe_zero_division(m21 - m12, sq)
qx = 0.25 * sq
qy = safe_zero_division(m01 + m10, sq)
qz = safe_zero_division(m02 + m20, sq)
if order == QuaternionCoeffOrder.XYZW:
cond_1 = torch.stack((qx, qy, qz, qw), dim=-1)
cond_1 = torch.stack((qw, qx, qy, qz), dim=-1)
sq = torch.sqrt((1.0 + m11 - m00 - m22).clamp_min(eps)) * 2.0 # sq = 4 * qy.
qw = safe_zero_division(m02 - m20, sq)
qx = safe_zero_division(m01 + m10, sq)
qy = 0.25 * sq
qz = safe_zero_division(m12 + m21, sq)
if order == QuaternionCoeffOrder.XYZW:
cond_2 = torch.stack((qx, qy, qz, qw), dim=-1)
cond_2 = torch.stack((qw, qx, qy, qz), dim=-1)
sq = torch.sqrt((1.0 + m22 - m00 - m11).clamp_min(eps)) * 2.0 # sq = 4 * qz.
qw = safe_zero_division(m10 - m01, sq)
qx = safe_zero_division(m02 + m20, sq)
qy = safe_zero_division(m12 + m21, sq)
qz = 0.25 * sq
if order == QuaternionCoeffOrder.XYZW:
cond_3 = torch.stack((qx, qy, qz, qw), dim=-1)
cond_3 = torch.stack((qw, qx, qy, qz), dim=-1)
where_2 = torch.where((m11 > m22).unsqueeze(-1), cond_2, cond_3)
where_1 = torch.where(((m00 > m11) & (m00 > m22)).unsqueeze(-1), cond_1, where_2)
quaternion: torch.Tensor = torch.where((trace > 0.0).unsqueeze(-1), trace_positive_cond, where_1)
return quaternion
# Path: vid2player/utils/konia_transform.py
@torch.jit.script
def rotation_matrix_to_angle_axis(rotation_matrix: torch.Tensor) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix: rotation matrix.
Returns:
Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(rotation_matrix)}")
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input size must be a (*, 3, 3) tensor. Got {rotation_matrix.shape}")
quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix, order=QuaternionCoeffOrder.WXYZ)
return quaternion_to_angle_axis(quaternion, order=QuaternionCoeffOrder.WXYZ)
# Path: vid2player/utils/konia_transform.py
@torch.jit.script
def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:
r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix.
Args:
angle_axis: tensor of 3d vector of axis-angle rotations.
Returns:
tensor of 3x3 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 3, 3)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = angle_axis_to_rotation_matrix(input) # Nx3x3
"""
if not isinstance(angle_axis, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError("Input size must be a (*, 3) tensor. Got {}".format(angle_axis.shape))
orig_shape = angle_axis.shape
angle_axis = angle_axis.reshape(-1, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == torch.tensor(False)).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
rotation_matrix = rotation_matrix.view(orig_shape[:-1] + (3, 3))
return rotation_matrix # Nx3x3
# Path: vid2player/utils/torch_transform.py
import numpy as np
import torch
from .konia_transform import quaternion_to_angle_axis, angle_axis_to_quaternion, quaternion_to_rotation_matrix, rotation_matrix_to_quaternion, rotation_matrix_to_angle_axis, angle_axis_to_rotation_matrix
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
| qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: vLAR-group/RayDF
# Path: config.py
def config_parser():
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--eval_only", action='store_true',
help='only evaluation with pretrained model')
# parameterization options
parser.add_argument("--radius", type=float, default=1.5,
help='radius of sphere for distance field')
# training options
parser.add_argument("--N_rand", type=int, default=8192,
help='batch size')
parser.add_argument("--N_iters", type=int, default=100000,
help='number of epochs')
parser.add_argument("--lrate", type=float, default=1e-4,
help='learning rate')
# classifier options
parser.add_argument("--dist_thres", type=float, default=1e-2,
help='threshold to determine if the query point is occluded for the sampled view')
parser.add_argument("--vis_thres", type=float, default=0.5,
help='threshold for binary classification')
parser.add_argument("--netdepth_cls", type=int, default=8,
help='layers in visibilit classifier')
parser.add_argument("--netwidth_cls", type=int, default=512,
help='channels per layer')
parser.add_argument("--ext_layer_cls", type=int, default=1,
help='number of layers to extract individual features')
parser.add_argument("--pos_weight", type=float, default=1.,
help='positive weight for cross-entropy loss')
# multiview optimization options
parser.add_argument("--N_views", type=int, default=20,
help='the number of reference views per ray')
parser.add_argument("--w_rgb", type=float, default=1.,
help='weight of rgb loss')
parser.add_argument("--ckpt_path_cls", type=str, default=None,
help='checkpoint path of classifier to reload')
# ray-surface distance network
parser.add_argument("--netdepth", type=int, default=13,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=1024,
help='channels per layer')
parser.add_argument("--rgb_layer", type=int, default=0,
help='if true, network predicts radiance')
parser.add_argument("--denoise", action='store_true',
help='if true, compute gradients to remove outliers')
parser.add_argument("--grad_normal", action='store_true',
help='if true, use gradients to compute surface normal')
parser.add_argument("--grad_clip", type=float, default=-1,
help='maximum clip value for grad norm')
parser.add_argument("--outlier_thres", type=float, default=10.,
help='threshold to select outliers for minimizing the surface gradient')
# dataset options
parser.add_argument("--datadir", type=str, default='./datasets',
help='input data directory')
parser.add_argument("--dataset", type=str, required=True,
help='the name of dataset for train/eval')
parser.add_argument("--scene", type=str, required=True,
help='the name of scene for train/eval')
parser.add_argument("--trainskip", type=int, default=1,
help='will load 1/N images from test/val sets')
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets')
parser.add_argument("--voxel_sz", type=float, default=0.005,
help='size of voxel for tsdf integration')
parser.add_argument("--cd_sample", type=int, default=30000,
help='the number of sampling points to compute chamfer-distance')
parser.add_argument("--continuous", action='store_true',
help='output continuous distance maps')
# logging/saving options
parser.add_argument("--logdir", type=str, default='./logs',
help='where to store ckpts and logs')
parser.add_argument("--expname", type=str, default='',
help='experiment name')
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric loggin')
parser.add_argument("--i_img", type=int, default=5000,
help='frequency of image logging')
parser.add_argument("--i_weights", type=int, default=10000,
help='frequency of weight ckpt saving')
return parser
# Path: utils/log.py
EPS = 1e-8
def to_distmap(x, m=None, white_bkgd=True, min=None, max=None):
def to_normalmap(x, m=None, white_bkgd=True):
def to_colormap(x):
def save_config(args):
# Path: utils/math.py
def convert_d(d, scene_info, out='dist'):
H, W, focal = scene_info['H'], scene_info['W'], scene_info['focal']
i, j = np.meshgrid(np.arange(W, dtype=np.float32),
np.arange(H, dtype=np.float32), indexing='xy')
L = np.sqrt(np.power(j - H / 2., 2) + np.power(i - W / 2., 2) + focal ** 2)
fl = focal / L
if out == 'dist':
return d / fl
elif out == 'dep':
return d * fl
else:
raise NotImplementedError
# Path: utils/dataloader.py
class Dataloader:
def __init__(self, args, device):
self.args = args
self.device = device
self.N_rand = args.N_rand
i_split, self.all_dists, self.all_images, masks, self.cam_poses, self.scene_info = \
dataloder_func[args.dataset](args.datadir, args.trainskip, args.testskip)
# restore scene info
self.scene_info['sphere_radius'] = args.radius
self.i_train, self.i_test = i_split
print('TRAIN views are', self.i_train)
print('TEST views are', self.i_test)
# compute rays
all_rays = []
for i, pose in enumerate(self.cam_poses):
rays_o, rays_d = get_rays_np(self.scene_info, pose) # (H, W, 3), (H, W, 3), (H, W, 1)
ray = np.concatenate([rays_o, rays_d], -1)
all_rays.append(ray)
all_rays = np.stack(all_rays, axis=0)
self.rays, self.dists, self.masks, self.imgs = {}, {}, {}, {}
for mode, split in zip(['train', 'test'], [self.i_train, self.i_test]):
self.rays[mode] = np.reshape(all_rays[split], [-1, 6])
self.dists[mode] = np.reshape(self.all_dists[split], [-1, 1])
self.masks[mode] = np.reshape(masks[split], [-1, 1])
if args.rgb_layer > 0:
self.imgs[mode] = np.reshape(self.all_images[split], [-1, 3])
# extract foreground rays for train/eval
self.rays[mode+'_fg'] = self.rays[mode][self.masks[mode][:, 0]==1]
self.dists[mode+'_fg'] = self.dists[mode][self.masks[mode][:, 0]==1]
self.masks[mode+'_fg'] = self.masks[mode][self.masks[mode][:, 0]==1]
if args.rgb_layer > 0:
self.imgs[mode+'_fg'] = self.imgs[mode][self.masks[mode][:, 0]==1]
def __call__(self, inds, mode):
batch_rays = torch.Tensor(self.rays[mode][inds]).to(self.device)
dists = torch.Tensor(self.dists[mode][inds]).to(self.device)
masks = torch.Tensor(self.masks[mode][inds]).to(self.device)
targ_dict = {'dist': dists, 'mask': masks}
if self.args.rgb_layer > 0:
images = torch.Tensor(self.imgs[mode][inds]).to(self.device)
targ_dict['image'] = images
return batch_rays, targ_dict
# Path: utils/ray.py
def get_ray_param(ray_fn, rays):
samples, hit_info = ray_fn(rays)
return samples, hit_info['t0'].detach(), hit_info['ray_dir']
# Path: net_multiview/network.py
def create_net(args, scene_info, device):
ray_fn, input_ch = get_rayparam_func(scene_info)
# initialise classifier and load ckpt
model_cls = DualVisClassifier(D=args.netdepth_cls, W=args.netwidth_cls,
input_ch=input_ch, ext_layer=args.ext_layer_cls).to(device)
if not args.eval_only:
print('Reloading vis classifier from', args.ckpt_path_cls)
cls_ckpt = torch.load(args.ckpt_path_cls)
model_cls.load_state_dict(cls_ckpt['network_fn'])
# initialise distance network for multiview optimization
model = RaySurfDNet(D=args.netdepth, W=args.netwidth, input_ch=input_ch, rgb_layer=args.rgb_layer).to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lrate, betas=(0.9, 0.999), capturable=True)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.N_iters, eta_min=args.lrate*0.01)
############# Load checkpoints #############
ckpts = [os.path.join(args.logdir, args.expname, f) for f in sorted(os.listdir(
os.path.join(args.logdir, args.expname))) if 'tar' in f]
print('Found ckpts', ckpts)
start = 0
if len(ckpts) > 0:
ckpt_path = ckpts[-1]
print('Loading ckpt from:', ckpt_path)
ckpt = torch.load(ckpt_path)
start = ckpt['global_step']
model.load_state_dict(ckpt['network_fn'])
optimizer.load_state_dict(ckpt['optimizer'])
optimizer.param_groups[0]['capturable'] = True
scheduler.load_state_dict(ckpt['scheduler'])
scheduler.last_epoch = ckpt['global_step']
return ray_fn, start, model, model_cls, optimizer, scheduler
# Path: net_multiview/sampler.py
def get_multiview_rays(args, query_rays, query_gts):
# define the query surface points
wcoords = query_rays[..., :3] + query_gts['dist'] * query_rays[..., 3:]
# sample points on a unit sphere to construct vectors
x = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.
y = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.
z = 2. * torch.rand([wcoords.shape[0], args.N_views]) - 1.
mv_dirs = torch.stack([x, y, z], dim=-1).to(wcoords.device)
mv_dirs = mv_dirs / (torch.linalg.norm(mv_dirs, dim=-1, keepdim=True) + EPS)
rays_d = -mv_dirs
# generate new rays
dist = args.radius * 2.
rays_o = wcoords[:, None] - dist * rays_d
mv_rays = torch.concat([rays_o, rays_d], dim=-1) # (B, N_views, 6)
target_dict = {'dist': torch.ones_like(rays_d[..., :1]) * dist}
if args.rgb_layer > 0:
target_dict['image'] = torch.tile(query_gts['image'][:, None], (1, args.N_views, 1))
mv_rays_flat = mv_rays.reshape(-1, 6)
for k in target_dict:
target_dict[k] = target_dict[k].reshape(-1, target_dict[k].shape[-1])
return mv_rays_flat, target_dict
# Path: utils/math.py
def get_surface_gradient(t, raydirs):
dt = gradient(t, raydirs)
return torch.norm(dt, dim=-1, keepdim=True)
# Path: utils/math.py
def get_surface_normal(t, raydirs):
dt = gradient(t, raydirs)
dtdtheta, dtdphi = dt[..., :1], dt[..., 1:]
sin_theta, cos_theta = torch.sin(raydirs[..., :1]), torch.cos(raydirs[..., :1])
sin_phi, cos_phi = torch.sin(raydirs[..., 1:]), torch.cos(raydirs[..., 1:])
dtheta = torch.cat([(dtdtheta * sin_theta + t * cos_theta) * cos_phi,
(dtdtheta * sin_theta + t * cos_theta) * sin_phi,
dtdtheta * cos_theta - t * sin_theta], dim=-1)
dphi = torch.cat([(dtdphi * cos_phi - t * sin_phi) * sin_theta,
(dtdphi * sin_phi + t * cos_phi) * sin_theta,
dtdphi * cos_theta], dim=-1)
normal = torch.cross(dphi, dtheta)
normal = normal / (torch.linalg.norm(normal+EPS, dim=-1, keepdim=True)+EPS)
return normal
# Path: run_mv.py
import os
import torch
import numpy as np
import imageio
import trimesh
import open3d as o3d
import wandb
from tqdm import trange
from config import config_parser
from open3d import pipelines
from wandb import AlertLevel
from utils import log
from utils.math import convert_d
from utils.dataloader import Dataloader
from utils.ray import get_ray_param
from net_multiview.network import create_net
from net_multiview.sampler import get_multiview_rays
from utils.math import get_surface_gradient, get_surface_normal
from torchmetrics.functional import peak_signal_noise_ratio as PSNR
from torchmetrics.functional import structural_similarity_index_measure as SSIM
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from chamfer_distance import ChamferDistance
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
np.random.seed(0)
LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='alex').to(device)
CD = ChamferDistance().to(device)
def train(args):
# Load dataset
dataloader = Dataloader(args, device)
# Create rayparam function and network
ray_fn, global_step, model, model_cls, optimizer, scheduler = create_net(args, dataloader.scene_info, device)
# Create experiment logger
wandb.init(project="RayDF-RaySurfDNet")
wandb.run.name = args.expname
wandb.watch(model, log="all")
start = global_step
train_num = len(dataloader.dists['train_fg'])
inds = np.random.permutation(train_num)
step_batch = train_num // args.N_rand
for i in trange(start, args.N_iters):
optimizer.zero_grad()
j = i % step_batch
ep = i // step_batch
# re-random train indices after one epoch
if j == 0 and i != start:
inds = np.random.permutation(train_num)
# =================== Query Rays ========================
# Random rays from all images
train_i = inds[j * args.N_rand: (j+1) * args.N_rand]
# load query rays
batch_rays, target_dict = dataloader(inds=train_i, mode='train_fg')
batch_inputs, d0, _ = get_ray_param(ray_fn, batch_rays)
# normalize query gt distance and query surface point
target_dict['dist_norm'] = (target_dict['dist'] - d0) / (args.radius * 2.)
batch_pts = batch_rays[..., :3] + target_dict['dist'] * batch_rays[..., 3:]
for c in range(batch_pts.shape[-1]):
batch_pts[..., c] -= dataloader.scene_info['sphere_center'][c]
target_dict['pts_norm'] = batch_pts / dataloader.scene_info['sphere_radius']
# ================= Multiview Rays =====================
# Sample multiview rays and get their ray parameters
mv_rays, mv_targets = get_multiview_rays(args, query_rays=batch_rays, query_gts=target_dict)
mv_inputs, mv_d0, _ = get_ray_param(ray_fn, mv_rays)
mv_targets['dist_norm'] = (mv_targets['dist'] - mv_d0) / (args.radius * 2.)
# Compute visibility
with torch.no_grad():
cls_inputs = [torch.tile(batch_inputs[:, None], (1, args.N_views, 1)).reshape(-1, batch_inputs.shape[-1]),
mv_inputs,
torch.tile(target_dict['pts_norm'][:, None], (1, args.N_views, 1)).reshape(-1, 3)]
vis = model_cls(cls_inputs)
mv_targets['vis_score'] = torch.sigmoid(vis).reshape(args.N_rand, args.N_views)
reweigh = 0.5
mv_targets['vis_score'] = mv_targets['vis_score'] ** reweigh / (mv_targets['vis_score'] ** reweigh +
(1. - mv_targets['vis_score']) ** reweigh)
# Multiview forward
mv_batch_inputs = torch.cat([batch_inputs, mv_inputs], dim=0)
mv_outputs = model(mv_batch_inputs)
# ================= Optimization =====================
loss_d_query = torch.abs(mv_outputs['dist'][:args.N_rand] - target_dict['dist_norm'])[:, 0]
| loss_d_mv = torch.abs(mv_outputs['dist'][args.N_rand:] - mv_targets['dist_norm']).reshape(args.N_rand, args.N_views) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: snap-stanford/relbench
# Path: relbench/data/task.py
class TaskType(Enum):
r"""The type of the task.
Attributes:
REGRESSION: Regression task.
MULTICLASS_CLASSIFICATION: Multi-class classification task.
BINARY_CLASSIFICATION: Binary classification task.
"""
REGRESSION = "regression"
BINARY_CLASSIFICATION = "binary_classification"
# Path: relbench/datasets/fake.py
class FakeDataset(Dataset):
name = "rel-fake"
def __init__(
self, num_products: int = 30, num_customers: int = 100, num_reviews: int = 500
):
db = self.make_db(num_products, num_customers, num_reviews)
db.reindex_pkeys_and_fkeys()
val_timestamp = db.min_timestamp + 0.8 * (db.max_timestamp - db.min_timestamp)
test_timestamp = db.min_timestamp + 0.9 * (db.max_timestamp - db.min_timestamp)
super().__init__(
db=db,
val_timestamp=val_timestamp,
test_timestamp=test_timestamp,
task_cls_list=[ChurnTask, LTVTask],
)
def make_db(self, num_products, num_customers, num_reviews) -> Database:
product_df = pd.DataFrame(
{
"product_id": [f"product_id_{i}" for i in range(num_products)],
"category": [None, [], ["toy", "health"]] * (num_products // 3),
"title": [_generate_random_string(5, 15) for _ in range(num_products)],
"price": np.random.rand(num_products) * 10,
}
)
customer_df = pd.DataFrame(
{
"customer_id": [f"customer_id_{i}" for i in range(num_customers)],
"age": np.random.randint(10, 50, size=(num_customers,)),
"gender": ["male", "female"] * (num_customers // 2),
}
)
# Add some dangling foreign keys:
review_df = pd.DataFrame(
{
"customer_id": [
f"customer_id_{random.randint(0, num_customers+5)}"
for _ in range(num_reviews)
],
"product_id": [
f"product_id_{random.randint(0, num_products-1)}"
for _ in range(num_reviews)
],
"review_time": pd.to_datetime(10 * np.arange(num_reviews), unit="D"),
"rating": np.random.randint(1, 6, size=(num_reviews,)),
}
)
return Database(
table_dict={
"product": Table(
df=product_df,
fkey_col_to_pkey_table={},
pkey_col="product_id",
),
"customer": Table(
df=customer_df,
fkey_col_to_pkey_table={},
pkey_col="customer_id",
),
"review": Table(
df=review_df,
fkey_col_to_pkey_table={
"customer_id": "customer",
"product_id": "product",
},
time_col="review_time",
),
}
)
# Path: relbench/external/graph.py
def get_stype_proposal(db: Database) -> Dict[str, Dict[str, Any]]:
r"""Propose stype for columns of a set of tables in the given database.
Args:
db (Database): : The database object containing a set of tables.
Returns:
Dict[str, Dict[str, Any]]: A dictionary mapping table name into
:obj:`col_to_stype` (mapping column names into inferred stypes).
"""
inferred_col_to_stype_dict = {}
for table_name, table in db.table_dict.items():
inferred_col_to_stype = infer_df_stype(table.df)
# Remove pkey, fkey columns since they will not be used as input
# feature.
if table.pkey_col is not None:
inferred_col_to_stype.pop(table.pkey_col)
for fkey in table.fkey_col_to_pkey_table.keys():
inferred_col_to_stype.pop(fkey)
inferred_col_to_stype_dict[table_name] = inferred_col_to_stype
return inferred_col_to_stype_dict
# Path: relbench/external/graph.py
def get_train_table_input(
table: Table,
task: Task,
) -> TrainTableInput:
nodes = torch.from_numpy(table.df[task.entity_col].astype(int).values)
time: Optional[Tensor] = None
if table.time_col is not None:
time = to_unix_time(table.df[table.time_col])
target: Optional[Tensor] = None
transform: Optional[AttachTargetTransform] = None
if task.target_col in table.df:
target_type = float
if task.task_type == "multiclass_classification":
target_type = int
target = torch.from_numpy(table.df[task.target_col].values.astype(target_type))
transform = AttachTargetTransform(task.entity_table, target)
return TrainTableInput(
nodes=(task.entity_table, nodes),
time=time,
target=target,
transform=transform,
)
# Path: relbench/external/graph.py
def make_pkey_fkey_graph(
db: Database,
col_to_stype_dict: Dict[str, Dict[str, stype]],
text_embedder_cfg: Optional[TextEmbedderConfig] = None,
cache_dir: Optional[str] = None,
) -> HeteroData:
r"""Given a :class:`Database` object, construct a heterogeneous graph with
primary-foreign key relationships, together with the column stats of each
table.
Args:
db (Database): A database object containing a set of tables.
col_to_stype_dict (Dict[str, Dict[str, stype]]): Column to stype for
each table.
cache_dir (str, optional): A directory for storing materialized tensor
frames. If specified, we will either cache the file or use the
cached file. If not specified, we will not use cached file and
re-process everything from scrach without saving the cache.
Returns:
HeteroData: The heterogeneous :class:`PyG` object with
:class:`TensorFrame` feature.
"""
data = HeteroData()
if cache_dir is not None:
os.makedirs(cache_dir, exist_ok=True)
for table_name, table in db.table_dict.items():
# Materialize the tables into tensor frames:
df = table.df
# Ensure that pkey is consecutive.
if table.pkey_col is not None:
assert (df[table.pkey_col].values == np.arange(len(df))).all()
col_to_stype = col_to_stype_dict[table_name]
if len(col_to_stype) == 0: # Add constant feature in case df is empty:
col_to_stype = {"__const__": stype.numerical}
df = pd.DataFrame({"__const__": np.ones(len(table.df))})
path = (
None if cache_dir is None else os.path.join(cache_dir, f"{table_name}.pt")
)
dataset = Dataset(
df=df,
col_to_stype=col_to_stype,
col_to_text_embedder_cfg=text_embedder_cfg,
).materialize(path=path)
data[table_name].tf = dataset.tensor_frame
data[table_name].col_stats = dataset.col_stats
# Add time attribute:
if table.time_col is not None:
data[table_name].time = to_unix_time(table.df[table.time_col])
# Add edges:
for fkey_name, pkey_table_name in table.fkey_col_to_pkey_table.items():
pkey_index = df[fkey_name]
# Filter out dangling foreign keys
mask = ~pkey_index.isna()
fkey_index = torch.arange(len(pkey_index))
# Filter dangling foreign keys:
pkey_index = torch.from_numpy(pkey_index[mask].astype(int).values)
fkey_index = fkey_index[torch.from_numpy(mask.values)]
# Ensure no dangling fkeys
assert (pkey_index < len(db.table_dict[pkey_table_name])).all()
# fkey -> pkey edges
edge_index = torch.stack([fkey_index, pkey_index], dim=0)
edge_type = (table_name, f"f2p_{fkey_name}", pkey_table_name)
data[edge_type].edge_index = sort_edge_index(edge_index)
# pkey -> fkey edges
edge_index = torch.stack([pkey_index, fkey_index], dim=0)
edge_type = (pkey_table_name, f"p2f_{fkey_name}", table_name)
data[edge_type].edge_index = sort_edge_index(edge_index)
data.validate()
return data
# Path: relbench/external/nn.py
class HeteroEncoder(torch.nn.Module):
r"""HeteroEncoder based on PyTorch Frame.
Args:
channels (int): The output channels for each node type.
node_to_col_names_dict (Dict[NodeType, Dict[torch_frame.stype, List[str]]]):
A dictionary mapping from node type to column names dictionary
compatible to PyTorch Frame.
torch_frame_model_cls: Model class for PyTorch Frame. The class object
takes :class:`TensorFrame` object as input and outputs
:obj:`channels`-dimensional embeddings. Default to
:class:`torch_frame.nn.ResNet`.
torch_frame_model_kwargs (Dict[str, Any]): Keyword arguments for
:class:`torch_frame_model_cls` class. Default keyword argument is
set specific for :class:`torch_frame.nn.ResNet`. Expect it to
be changed for different :class:`torch_frame_model_cls`.
default_stype_encoder_cls_kwargs (Dict[torch_frame.stype, Any]):
A dictionary mapping from :obj:`torch_frame.stype` object into a
tuple specifying :class:`torch_frame.nn.StypeEncoder` class and its
keyword arguments :obj:`kwargs`.
"""
def __init__(
self,
channels: int,
node_to_col_names_dict: Dict[NodeType, Dict[torch_frame.stype, List[str]]],
node_to_col_stats: Dict[NodeType, Dict[str, Dict[StatType, Any]]],
torch_frame_model_cls=ResNet,
torch_frame_model_kwargs: Dict[str, Any] = {
"channels": 128,
"num_layers": 4,
},
default_stype_encoder_cls_kwargs: Dict[torch_frame.stype, Any] = {
torch_frame.categorical: (torch_frame.nn.EmbeddingEncoder, {}),
torch_frame.numerical: (torch_frame.nn.LinearEncoder, {}),
torch_frame.multicategorical: (
torch_frame.nn.MultiCategoricalEmbeddingEncoder,
{},
),
torch_frame.embedding: (torch_frame.nn.LinearEmbeddingEncoder, {}),
torch_frame.timestamp: (torch_frame.nn.TimestampEncoder, {}),
},
):
super().__init__()
self.encoders = torch.nn.ModuleDict()
for node_type in node_to_col_names_dict.keys():
stype_encoder_dict = {
stype: default_stype_encoder_cls_kwargs[stype][0](
**default_stype_encoder_cls_kwargs[stype][1]
)
for stype in node_to_col_names_dict[node_type].keys()
}
torch_frame_model = torch_frame_model_cls(
**torch_frame_model_kwargs,
out_channels=channels,
col_stats=node_to_col_stats[node_type],
col_names_dict=node_to_col_names_dict[node_type],
stype_encoder_dict=stype_encoder_dict,
)
self.encoders[node_type] = torch_frame_model
def reset_parameters(self):
for encoder in self.encoders.values():
encoder.reset_parameters()
def forward(
self,
tf_dict: Dict[NodeType, torch_frame.TensorFrame],
) -> Dict[NodeType, Tensor]:
x_dict = {
node_type: self.encoders[node_type](tf) for node_type, tf in tf_dict.items()
}
return x_dict
# Path: relbench/external/nn.py
class HeteroGraphSAGE(torch.nn.Module):
def __init__(
self,
node_types: List[NodeType],
edge_types: List[EdgeType],
channels: int,
aggr: str = "mean",
num_layers: int = 2,
):
super().__init__()
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
conv = HeteroConv(
{
edge_type: SAGEConv((channels, channels), channels, aggr=aggr)
for edge_type in edge_types
},
aggr="sum",
)
self.convs.append(conv)
self.norms = torch.nn.ModuleList()
for _ in range(num_layers):
norm_dict = torch.nn.ModuleDict()
for node_type in node_types:
norm_dict[node_type] = LayerNorm(channels, mode="node")
self.norms.append(norm_dict)
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
for norm_dict in self.norms:
for norm in norm_dict.values():
norm.reset_parameters()
def forward(
self,
x_dict: Dict[NodeType, Tensor],
edge_index_dict: Dict[NodeType, Tensor],
num_sampled_nodes_dict: Optional[Dict[NodeType, List[int]]] = None,
num_sampled_edges_dict: Optional[Dict[EdgeType, List[int]]] = None,
) -> Dict[NodeType, Tensor]:
for i, (conv, norm_dict) in enumerate(zip(self.convs, self.norms)):
# Trim graph and features to only hold required data per layer:
if num_sampled_nodes_dict is not None:
assert num_sampled_edges_dict is not None
x_dict, edge_index_dict, _ = trim_to_layer(
layer=i,
num_sampled_nodes_per_hop=num_sampled_nodes_dict,
num_sampled_edges_per_hop=num_sampled_edges_dict,
x=x_dict,
edge_index=edge_index_dict,
)
x_dict = conv(x_dict, edge_index_dict)
x_dict = {key: norm_dict[key](x) for key, x in x_dict.items()}
x_dict = {key: x.relu() for key, x in x_dict.items()}
return x_dict
# Path: test/external/test_nn.py
from typing import Dict
from torch_frame.config.text_embedder import TextEmbedderConfig
from torch_frame.testing.text_embedder import HashTextEmbedder
from torch_geometric.loader import NeighborLoader
from torch_geometric.nn import MLP
from relbench.data.task import TaskType
from relbench.datasets import FakeDataset
from relbench.external.graph import (
get_stype_proposal,
get_train_table_input,
make_pkey_fkey_graph,
)
from relbench.external.nn import HeteroEncoder, HeteroGraphSAGE
import torch
import torch.nn.functional as F
def test_train_fake_product_dataset(tmp_path):
dataset = FakeDataset()
data = make_pkey_fkey_graph(
dataset.db,
get_stype_proposal(dataset.db),
text_embedder_cfg=TextEmbedderConfig(
text_embedder=HashTextEmbedder(8), batch_size=None
),
cache_dir=tmp_path,
)
node_to_col_names_dict = { # TODO Expose as method in `HeteroData`.
| node_type: data[node_type].tf.col_names_dict for node_type in data.node_types |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: masked-spacetime-hashing/msth
# Path: nerfstudio/data/utils/nerfstudio_collate.py
def nerfstudio_collate(
batch, extra_mappings: Union[Dict[type, Callable], None] = None
): # pylint: disable=too-many-return-statements
r"""
This is the default pytorch collate function, but with support for nerfstudio types. All documentation
below is copied straight over from pytorch's default_collate function, python version 3.8.13,
pytorch version '1.12.1+cu113'. Custom nerfstudio types are accounted for at the end, and extra
mappings can be passed in to handle custom types. These mappings are from types: callable (types
being like int or float or the return value of type(3.), etc). The only code before we parse for custom types that
was changed from default pytorch was the addition of the extra_mappings argument, a find and replace operation
from default_collate to nerfstudio_collate, and the addition of the nerfstudio_collate_err_msg_format variable.
Function that takes in a batch of data and puts the elements within the batch
into a tensor with an additional outer dimension - batch size. The exact output type can be
a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a
Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.
This is used as the default function for collation when
`batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.
Here is the general input type (based on the type of the element within the batch) to output type mapping:
* :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)
* NumPy Arrays -> :class:`torch.Tensor`
* `float` -> :class:`torch.Tensor`
* `int` -> :class:`torch.Tensor`
* `str` -> `str` (unchanged)
* `bytes` -> `bytes` (unchanged)
* `Mapping[K, V_i]` -> `Mapping[K, nerfstudio_collate([V_1, V_2, ...])]`
* `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[nerfstudio_collate([V1_1, V1_2, ...]),
nerfstudio_collate([V2_1, V2_2, ...]), ...]`
* `Sequence[V1_i, V2_i, ...]` -> `Sequence[nerfstudio_collate([V1_1, V1_2, ...]),
nerfstudio_collate([V2_1, V2_2, ...]), ...]`
Args:
batch: a single batch to be collated
Examples:
>>> # Example with a batch of `int`s:
>>> nerfstudio_collate([0, 1, 2, 3])
tensor([0, 1, 2, 3])
>>> # Example with a batch of `str`s:
>>> nerfstudio_collate(['a', 'b', 'c'])
['a', 'b', 'c']
>>> # Example with `Map` inside the batch:
>>> nerfstudio_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])
{'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}
>>> # Example with `NamedTuple` inside the batch:
>>> Point = namedtuple('Point', ['x', 'y'])
>>> nerfstudio_collate([Point(0, 0), Point(1, 1)])
Point(x=tensor([0, 1]), y=tensor([0, 1]))
>>> # Example with `Tuple` inside the batch:
>>> nerfstudio_collate([(0, 1), (2, 3)])
[tensor([0, 2]), tensor([1, 3])]
>>> # Example with `List` inside the batch:
>>> nerfstudio_collate([[0, 1], [2, 3]])
[tensor([0, 2]), tensor([1, 3])]
"""
if extra_mappings is None:
extra_mappings = {}
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor): # pylint: disable=no-else-return
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel, device=elem.device) # pylint: disable=protected-access
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == "numpy" and elem_type.__name__ != "str_" and elem_type.__name__ != "string_":
# pylint: disable=no-else-return, consider-using-in
if elem_type.__name__ == "ndarray" or elem_type.__name__ == "memmap":
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem.dtype))
return nerfstudio_collate([torch.as_tensor(b) for b in batch], extra_mappings=extra_mappings)
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
try:
return elem_type(
{key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}
)
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, "_fields"): # namedtuple
return elem_type(*(nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError("each element in list of batch should be of equal size")
transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.
if isinstance(elem, tuple):
return [
nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed
] # Backwards compatibility.
else:
try:
return elem_type([nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed])
except TypeError:
# The sequence type may not support `__init__(iterable)` (e.g., `range`).
return [nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed]
# NerfStudio types supported below
elif isinstance(elem, Cameras):
# If a camera, just concatenate along the batch dimension. In the future, this may change to stacking
assert all((isinstance(cam, Cameras) for cam in batch))
assert all((cam.distortion_params is None for cam in batch)) or all(
(cam.distortion_params is not None for cam in batch)
), "All cameras must have distortion parameters or none of them should have distortion parameters.\
Generalized batching will be supported in the future."
# If no batch dimension exists, then we need to stack everything and create a batch dimension on 0th dim
if elem.shape == ():
op = torch.stack
# If batch dimension exists, then we need to concatenate along the 0th dimension
else:
op = torch.cat
return Cameras(
op([cameras.camera_to_worlds for cameras in batch], dim=0),
op([cameras.fx for cameras in batch], dim=0),
op([cameras.fy for cameras in batch], dim=0),
op([cameras.cx for cameras in batch], dim=0),
op([cameras.cy for cameras in batch], dim=0),
height=op([cameras.height for cameras in batch], dim=0),
width=op([cameras.width for cameras in batch], dim=0),
distortion_params=op(
[
cameras.distortion_params
if cameras.distortion_params is not None
else torch.zeros_like(cameras.distortion_params)
for cameras in batch
],
dim=0,
),
camera_type=op([cameras.camera_type for cameras in batch], dim=0),
times=torch.stack(
[cameras.times if cameras.times is not None else -torch.ones_like(cameras.times) for cameras in batch],
dim=0,
),
)
for type_key in extra_mappings:
if isinstance(elem, type_key):
return extra_mappings[type_key](batch)
raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem_type))
# Path: nerfstudio/utils/misc.py
def get_dict_to_torch(stuff: Any, device: Union[torch.device, str] = "cpu", exclude: Optional[List[str]] = None):
"""Set everything in the dict to the specified torch device.
Args:
stuff: things to convert to torch
device: machine to put the "stuff" on
exclude: list of keys to skip over transferring to device
"""
if isinstance(stuff, dict):
for k, v in stuff.items():
if exclude and k in exclude:
stuff[k] = v
else:
stuff[k] = get_dict_to_torch(v, device)
return stuff
if isinstance(stuff, torch.Tensor):
return stuff.to(device)
return stuff
# Path: MSTH/dataparser.py
class VideoDataParserOutputs:
data_dir: Path
video_filenames: List[Path]
start_frame: int
num_frames: int
"""Dataparser outputs for the which will be used by the DataManager
for creating RayBundle and RayGT objects."""
"""Filenames for the images."""
cameras: Cameras
"""Camera object storing collection of camera information in dataset."""
alpha_color: Optional[TensorType[3]] = None
"""Color of dataset background."""
scene_box: SceneBox = SceneBox()
"""Scene box of dataset. Used to bound the scene or provide the scene scale depending on model."""
mask_filenames: Optional[List[Path]] = None
"""Filenames for any masks that are required"""
metadata: Dict[str, Any] = to_immutable_dict({})
"""Dictionary of any metadata that be required for the given experiment.
Will be processed by the InputDataset to create any additional tensors that may be required.
"""
dataparser_transform: TensorType[3, 4] = torch.eye(4)[:3, :]
"""Transform applied by the dataparser."""
dataparser_scale: float = 1.0
"""Scale applied by the dataparser."""
def as_dict(self) -> dict:
"""Returns the dataclass as a dictionary."""
return vars(self)
def save_dataparser_transform(self, path: Path):
"""Save dataparser transform to json file. Some dataparsers will apply a transform to the poses,
this method allows the transform to be saved so that it can be used in other applications.
Args:
path: path to save transform to
"""
data = {
"transform": self.dataparser_transform.tolist(),
"scale": float(self.dataparser_scale),
}
if not path.parent.exists():
path.parent.mkdir(parents=True)
with open(path, "w", encoding="UTF-8") as file:
json.dump(data, file, indent=4)
# Path: MSTH/utils.py
class Timer:
recorder = defaultdict(list)
def __init__(self, des="", verbose=True, record=False) -> None:
self.des = des
self.verbose = verbose
self.record = record
def __enter__(self):
return self
self.start = time.time()
self.start_cuda = torch.cuda.Event(enable_timing=True)
self.end_cuda = torch.cuda.Event(enable_timing=True)
self.start_cuda.record()
return self
def __exit__(self, *args):
return
self.end = time.time()
self.end_cuda.record()
self.interval = self.end - self.start
if self.verbose:
torch.cuda.synchronize()
print(f"[cudasync]{self.des} consuming {self.start_cuda.elapsed_time(self.end_cuda)/1000.:.8f}")
print(f"{self.des} consuming {self.interval:.8f}")
if self.record:
Timer.recorder[self.des].append(self.interval)
@staticmethod
def show_recorder():
pprint(Timer.recorder)
# Path: MSTH/ibrnet/feature_extractor.py
class ResUNet(nn.Module):
def __init__(self,
encoder='resnet34',
coarse_out_ch=32,
fine_out_ch=32,
norm_layer=None,
coarse_only=False
):
super(ResUNet, self).__init__()
assert encoder in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'], "Incorrect encoder type"
if encoder in ['resnet18', 'resnet34']:
filters = [64, 128, 256, 512]
else:
filters = [256, 512, 1024, 2048]
self.coarse_only = coarse_only
if self.coarse_only:
fine_out_ch = 0
self.coarse_out_ch = coarse_out_ch
self.fine_out_ch = fine_out_ch
out_ch = coarse_out_ch + fine_out_ch
# original
layers = [3, 4, 6, 3]
if norm_layer is None:
# norm_layer = nn.BatchNorm2d
norm_layer = nn.InstanceNorm2d
self._norm_layer = norm_layer
self.dilation = 1
block = BasicBlock
replace_stride_with_dilation = [False, False, False]
self.inplanes = 64
self.groups = 1
self.base_width = 64
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False, padding_mode='reflect')
self.bn1 = norm_layer(self.inplanes, track_running_stats=False, affine=True)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
# decoder
self.upconv3 = upconv(filters[2], 128, 3, 2)
self.iconv3 = conv(filters[1] + 128, 128, 3, 1)
self.upconv2 = upconv(128, 64, 3, 2)
self.iconv2 = conv(filters[0] + 64, out_ch, 3, 1)
# fine-level conv
self.out_conv = nn.Conv2d(out_ch, out_ch, 1, 1)
self.upsample = nn.Upsample(scale_factor=4)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion, track_running_stats=False, affine=True),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def skipconnect(self, x1, x2):
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return x
def forward(self, x):
self.upsample = nn.Upsample(size=x.shape[1:3])
x = x.moveaxis(-1, 1)
x = self.relu(self.bn1(self.conv1(x)))
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x = self.upconv3(x3)
x = self.skipconnect(x2, x)
x = self.iconv3(x)
x = self.upconv2(x)
x = self.skipconnect(x1, x)
x = self.iconv2(x)
x_out = self.out_conv(x)
if self.coarse_only:
x_coarse = x_out
x_fine = None
else:
x_coarse = x_out[:, :self.coarse_out_ch, :]
x_fine = x_out[:, -self.fine_out_ch:, :]
x_coarse = self.upsample(x_coarse)
x_coarse = x_coarse.moveaxis(1, -1)
if x_fine is not None:
x_fine = self.upsample(x_fine)
x_fine = x_fine.moveaxis(1, -1)
return x_coarse, x_fine
@classmethod
def load_from_pretrained(cls, ckpt_path, map_location="cpu"):
res_u_net = cls()
res_u_net.load_state_dict(torch.load(ckpt_path, map_location=map_location)["feature_net"])
return res_u_net
# Path: MSTH/dataset.py
import gc
import concurrent
import concurrent.futures
import os
import cv2
import numpy as np
import numpy.typing as npt
import torch
from copy import deepcopy
from time import time
from typing import Dict, Union
from tqdm import tqdm, trange
from scipy.ndimage import gaussian_filter
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
from torchtyping import TensorType
from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate
from nerfstudio.utils.misc import get_dict_to_torch
from MSTH.dataparser import VideoDataParserOutputs
from MSTH.utils import Timer
from MSTH.ibrnet.feature_extractor import ResUNet
from pathlib import Path
from rich.console import Console
CONSOLE = Console(width=120)
def get_mask_single_image(mask):
threshold = 0.05
mask = gaussian_filter(mask, sigma=5)
return torch.where(torch.from_numpy(mask) > threshold, 1.0, 0.0)
def extend_mask(mask, radius=5):
mask = gaussian_filter(mask, sigma=5, radius=[radius, radius])
mask[np.where(mask > 0.0)] = 1.0
# print("shit")
return torch.from_numpy(mask)
class VideoDataset(Dataset):
def __init__(
self,
dataparser_outputs: VideoDataParserOutputs,
scale_factor: float = 1.0,
mask_extend_radius: int = 5,
next_n_frames: int = 1,
) -> None:
super().__init__()
self._dataparser_outputs = dataparser_outputs
self.scale_factor = scale_factor
self.scene_box = deepcopy(dataparser_outputs.scene_box)
self.metadata = deepcopy(dataparser_outputs.metadata)
self.cameras = deepcopy(dataparser_outputs.cameras)
self.cameras.rescale_output_resolution(scaling_factor=scale_factor)
self.vcs = []
# TODO: maybe add h and w to dataparseroutputs ?
self.h = self.cameras.height[0][0].item()
assert isinstance(self.h, int), "support only all the inputs share same size"
self.w = self.cameras.width[0][0].item()
self._prepare_video_captures()
self.next_n_frames = next_n_frames
if next_n_frames > 1:
| self.load_first_n_frames() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: sehyunkwon/ICTC
# Path: step1/llava/model/llava_arch.py
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
self.config.use_mm_proj = True
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
if not hasattr(self, 'mm_projector'):
self.mm_projector = nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
# Path: step1/llava/model/llava_arch.py
class LlavaMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
# multimodal LLM, but the current sample is not multimodal
cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)
cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if model_args.mm_use_im_start_end:
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = self.get_input_embeddings().weight.data
output_embeddings = self.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
if model_args.tune_mm_mlp_adapter:
for p in self.get_input_embeddings().parameters():
p.requires_grad = True
for p in self.get_output_embeddings().parameters():
p.requires_grad = False
if model_args.pretrain_mm_mlp_adapter:
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
assert num_new_tokens == 2
if input_embeddings.shape == embed_tokens_weight.shape:
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
elif embed_tokens_weight.shape[0] == num_new_tokens:
input_embeddings[-num_new_tokens:] = embed_tokens_weight
else:
raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
elif model_args.mm_use_im_patch_token:
if model_args.tune_mm_mlp_adapter:
for p in self.get_input_embeddings().parameters():
p.requires_grad = False
for p in self.get_output_embeddings().parameters():
p.requires_grad = False
# Path: step1/llava/model/language_model/llava_llama.py
from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers import AutoConfig, AutoModelForCausalLM, \
LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
import torch
import torch.nn as nn
# Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaConfig(LlamaConfig):
model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
config_class = LlavaConfig
def __init__(self, config: LlamaConfig):
super(LlavaLlamaModel, self).__init__(config)
class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
config_class = LlavaConfig
def __init__(self, config):
super(LlamaForCausalLM, self).__init__(config)
self.model = LlavaLlamaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_model(self):
return self.model
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
images: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
shift_logits = shift_logits.view(-1, self.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model/pipeline parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
if not return_dict:
| output = (logits,) + outputs[1:] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: phineas-pta/comfy-trt-test
# Path: comfy_trt/exporter.py
def export_onnx(onnx_path: str, modelobj: UNetModel, profile: ProfileSettings, opset: int = 17, disable_optimizations: bool = False):
logging.info("Exporting to ONNX...")
inputs = modelobj.get_sample_input(profile.bs_opt * 2, profile.h_opt // 8, profile.w_opt // 8, profile.t_opt)
if not os.path.exists(onnx_path):
_export_onnx(
modelobj.unet,
inputs,
onnx_path,
opset,
modelobj.get_input_names(),
modelobj.get_output_names(),
modelobj.get_dynamic_axes(),
modelobj.optimize if not disable_optimizations else None,
)
# Path: comfy_trt/exporter.py
def export_trt(trt_path: str, onnx_path: str, timing_cache: str, profile: dict, use_fp16: bool) -> int:
engine = Engine(trt_path)
s = time.time()
ret = engine.build(onnx_path, use_fp16, enable_refit=True, timing_cache=timing_cache, input_profile=[profile])
e = time.time()
logging.info(f"Time taken to build: {e-s}s")
return ret
# Path: comfy_trt/model_helper.py
class UNetModel:
def __init__(self, unet, version: str, unet_dim: int, embedding_dim: int, text_minlen: int = 77) -> None:
super().__init__()
self.unet = unet
self.is_xl = version in ["SDXL", "SDXLRefiner", "SSD1B", "Segmind_Vega"]
self.text_minlen = text_minlen
self.embedding_dim = embedding_dim
match version:
case "SDXL" | "SSD1B" | "Segmind_Vega":
self.num_xl_classes = 2816
case "SDXLRefiner":
self.num_xl_classes = 2560
case _:
self.num_xl_classes = 0
self.emb_chn = 1280
self.in_channels = unet_dim
self.dyn_axes = {
"sample": {0: "2B", 2: "H", 3: "W"},
"encoder_hidden_states": {0: "2B", 1: "77N"},
"timesteps": {0: "2B"},
"latent": {0: "2B", 2: "H", 3: "W"},
"y": {0: "2B"},
}
def get_input_names(self) -> list[str]:
names = ["sample", "timesteps", "encoder_hidden_states"]
if self.is_xl:
names.append("y")
return names
def get_output_names(self) -> list[str]:
return ["latent"]
def get_dynamic_axes(self) -> dict:
io_names = self.get_input_names() + self.get_output_names()
return {name: self.dyn_axes[name] for name in io_names}
def get_sample_input(
self,
batch_size: int,
latent_height: int,
latent_width: int,
text_len: int,
device: str = "cuda",
dtype: torch.dtype = torch.float32,
) -> tuple[torch.Tensor]:
return (
torch.randn(batch_size, self.in_channels, latent_height, latent_width, dtype=dtype, device=device),
torch.randn(batch_size, dtype=dtype, device=device),
torch.randn(batch_size, text_len, self.embedding_dim, dtype=dtype, device=device),
torch.randn(batch_size, self.num_xl_classes, dtype=dtype, device=device) if self.is_xl else None,
)
def get_input_profile(self, profile: ProfileSettings) -> dict:
min_batch, opt_batch, max_batch = profile.get_batch_dim()
(
min_latent_height, latent_height, max_latent_height,
min_latent_width, latent_width, max_latent_width,
) = profile.get_latent_dim()
shape_dict = {
"sample": [
(min_batch, self.in_channels, min_latent_height, min_latent_width),
(opt_batch, self.in_channels, latent_height, latent_width),
(max_batch, self.in_channels, max_latent_height, max_latent_width),
],
"timesteps": [(min_batch,), (opt_batch,), (max_batch,)],
"encoder_hidden_states": [
(min_batch, profile.t_min, self.embedding_dim),
(opt_batch, profile.t_opt, self.embedding_dim),
(max_batch, profile.t_max, self.embedding_dim),
],
}
if self.is_xl:
shape_dict["y"] = [
(min_batch, self.num_xl_classes),
(opt_batch, self.num_xl_classes),
(max_batch, self.num_xl_classes),
]
return shape_dict
# Helper utility for weights map
def export_weights_map(self, onnx_opt_path: str, weights_map_path: dict):
onnx_opt_dir = onnx_opt_path
state_dict = self.unet.state_dict()
onnx_opt_model = onnx.load(onnx_opt_path)
# Create initializer data hashes
def init_hash_map(onnx_opt_model):
initializer_hash_mapping = {}
for initializer in onnx_opt_model.graph.initializer:
initializer_data = onnx.numpy_helper.to_array(initializer, base_dir=onnx_opt_dir).astype(np.float16)
initializer_hash = hash(initializer_data.data.tobytes())
initializer_hash_mapping[initializer.name] = (initializer_hash, initializer_data.shape)
return initializer_hash_mapping
initializer_hash_mapping = init_hash_map(onnx_opt_model)
weights_name_mapping, weights_shape_mapping = {}, {}
# set to keep track of initializers already added to the name_mapping dict
initializers_mapped = set()
for wt_name, wt in state_dict.items():
# get weight hash
wt = wt.cpu().detach().numpy().astype(np.float16)
wt_hash = hash(wt.data.tobytes())
wt_t_hash = hash(np.transpose(wt).data.tobytes())
for initializer_name, (initializer_hash, initializer_shape) in initializer_hash_mapping.items():
# Due to constant folding, some weights are transposed during export
# To account for the transpose op, we compare the initializer hash to the hash for the weight and its transpose
if wt_hash == initializer_hash or wt_t_hash == initializer_hash:
# The assert below ensures there is a 1:1 mapping between PyTorch and ONNX weight names.
# It can be removed in cases where 1:many mapping is found and name_mapping[wt_name] = list()
assert initializer_name not in initializers_mapped
weights_name_mapping[wt_name] = initializer_name
initializers_mapped.add(initializer_name)
is_transpose = wt_hash != initializer_hash
weights_shape_mapping[wt_name] = (initializer_shape, is_transpose)
# Sanity check: Were any weights not matched
if wt_name not in weights_name_mapping:
print(f"[I] PyTorch weight {wt_name} not matched with any ONNX initializer")
print(f"[I] UNet: {len(weights_name_mapping.keys())} PyTorch weights were matched with ONNX initializers")
assert weights_name_mapping.keys() == weights_shape_mapping.keys()
with open(weights_map_path, "w") as fp:
json.dump([weights_name_mapping, weights_shape_mapping], fp, indent="\t")
@staticmethod
def optimize(name, onnx_graph, verbose=False):
opt = Optimizer(onnx_graph, verbose=verbose)
opt.info(f"{name}: original")
opt.cleanup()
opt.info(f"{name}: cleanup")
opt.fold_constants()
opt.info(f"{name}: fold constants")
opt.infer_shapes()
opt.info(f"{name}: shape inference")
onnx_opt_graph = opt.cleanup(return_onnx=True)
opt.info(f"{name}: finished")
return onnx_opt_graph
# Path: comfy_trt/model_manager.py
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
ONNX_MODEL_DIR = os.path.join(BASE_PATH, "Unet-onnx")
TRT_MODEL_DIR = os.path.join(BASE_PATH, "Unet-trt")
MODEL_FILE = os.path.join(TRT_MODEL_DIR, "model.json")
def get_cc() -> tuple[int]:
def __init__(self, model_file: str = MODEL_FILE):
def get_onnx_path(model_name: str) -> tuple[str]:
def get_trt_path(self, model_name: str, profile: dict, static_shape: bool) -> tuple[str]:
def get_weights_map_path(self, model_name: str):
def update(self) -> None:
def add_entry(
self,
model_name: str,
profile: dict,
static_shapes: bool,
fp32: bool,
baseline_model: str,
prediction_type: str,
inpaint: bool,
refit: bool,
unet_hidden_dim: int,
lora: bool
) -> None:
def add_lora_entry(
self,
base_model: str,
lora_name: str,
trt_lora_path: str,
fp32: bool,
baseline_model: str,
prediction_type: str,
inpaint: bool,
unet_hidden_dim: int
) -> None:
def write_json(self) -> None:
def read_json(self, encode_config: bool = True) -> dict:
def available_models(self) -> dict:
def available_loras(self):
def get_timing_cache(self) -> str:
def get_valid_models_from_dict(self, base_model: str, feed_dict: dict) -> tuple[list[bool], list[float]]:
def get_valid_models(self, base_model: str, width: int, height: int, batch_size: int, max_embedding: int) -> tuple[list[bool], list[float]]:
class ModelManager:
# Path: comfy_trt/datastructures.py
class ProfileSettings:
bs_min: int # batch size
bs_opt: int
bs_max: int
h_min: int # height
h_opt: int
h_max: int
w_min: int # width
w_opt: int
w_max: int
t_min: int # token count
t_opt: int
t_max: int
static_shapes: bool = False
def __str__(self) -> str:
return "\n\t- ".join([
"[I] size & shape parameters:",
f"batch size: min={self.bs_min}, opt={self.bs_opt}, max={self.bs_max}",
f"height: min={self.h_min}, opt={self.h_opt}, max={self.h_max}",
f"width: min={self.w_min}, opt={self.w_opt}, max={self.w_max}",
f"token count: min={self.t_min}, opt={self.t_opt}, max={self.t_max}",
])
def out(self) -> tuple[int]:
return (
self.bs_min, self.bs_opt, self.bs_max,
self.h_min, self.h_opt, self.h_max,
self.w_min, self.w_opt, self.w_max,
self.t_min, self.t_opt, self.t_max,
)
def token_to_dim(self) -> None:
self.t_min = (self.t_min // 75) * 77
self.t_opt = (self.t_opt // 75) * 77
self.t_max = (self.t_max // 75) * 77
if self.static_shapes:
self.t_min = self.t_max = self.t_opt
self.bs_min = self.bs_max = self.bs_opt
self.h_min = self.h_max = self.h_opt
self.w_min = self.w_max = self.w_opt
self.static_shapes = True
def get_latent_dim(self) -> tuple[int]:
return (
self.h_min // 8, self.h_opt // 8, self.h_max // 8,
self.w_min // 8, self.w_opt // 8, self.w_max // 8,
)
def get_batch_dim(self) -> tuple[int]:
is_static_batch = self.bs_min == self.bs_max == self.bs_opt
if self.t_max <= 77:
return (self.bs_min * 2, self.bs_opt * 2, self.bs_max * 2)
elif self.t_max > 77 and is_static_batch:
return (self.bs_opt, self.bs_opt, self.bs_opt)
elif self.t_max > 77 and not is_static_batch:
if self.t_opt > 77:
return (self.bs_min, self.bs_opt, self.bs_max * 2)
else:
return (self.bs_min, self.bs_opt * 2, self.bs_max * 2)
else:
raise Exception("Uncovered case in get_batch_dim")
# Path: convert_unet.py
import argparse
import sys
import os.path
import gc
import torch
from comfy_trt.exporter import export_onnx, export_trt
from comfy_trt.model_helper import UNetModel
from comfy_trt.model_manager import modelmanager, cc_major
from comfy_trt.datastructures import ProfileSettings
from comfy.utils import load_torch_file, calculate_parameters
from comfy.supported_models import models as LIST_MODELS
from comfy.model_detection import detect_unet_config
from comfy.model_management import unet_dtype as get_unet_dtype
# -*- coding: utf-8 -*-
# modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/ui_trt.py
# CHANGE: remove lora, make script as CLI command
# STATUS: ok i guess
sys.path.append(os.path.join("..", ".."))
def parseArgs():
parser = argparse.ArgumentParser(description="test: convert Stable Diffusion checkpoint to TensorRT engine")
parser.add_argument("--ckpt_path", required=True)
parser.add_argument("--output_name", help=".onnx & .trt file name, default to ckpt file name")
parser.add_argument("--batch_min", type=int, default=1, help="default 1")
parser.add_argument("--batch_opt", type=int, default=1, help="default 1")
parser.add_argument("--batch_max", type=int, default=1, help="limit 16")
parser.add_argument("--height_min", type=int, help="default 768 if sdxl else 512, limit 256")
parser.add_argument("--height_opt", type=int, help="default 1024 if sdxl else 512")
parser.add_argument("--height_max", type=int, help="default 1024 if sdxl else 768, limit 4096")
parser.add_argument("--width_min", type=int, help="default 768 if sdxl else 512, limit 256")
parser.add_argument("--width_opt", type=int, help="default 768 if sdxl else 512")
parser.add_argument("--width_max", type=int, help="default 1024 if sdxl else 768, limit 4096")
parser.add_argument("--token_count_min", type=int, default=75, help="default 75, cannot go lower")
parser.add_argument("--token_count_opt", type=int, default=75, help="default 75")
parser.add_argument("--token_count_max", type=int, default=150, help="default 150, limit 750")
| parser.add_argument("--force_export", action="store_true") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hydrogram/hydrogram
# Path: hydrogram/utils.py
async def ainput(prompt: str = "", *, hide: bool = False):
def get_input_media_from_file_id(
file_id: str, expected_file_type: FileType = None, ttl_seconds: Optional[int] = None
) -> Union["raw.types.InputMediaPhoto", "raw.types.InputMediaDocument"]:
async def parse_messages(
client, messages: "raw.types.messages.Messages", replies: int = 1
) -> list["types.Message"]:
def parse_deleted_messages(client, update) -> list["types.Message"]:
def pack_inline_message_id(msg_id: "raw.base.InputBotInlineMessageID"):
def unpack_inline_message_id(inline_message_id: str) -> "raw.base.InputBotInlineMessageID":
def get_raw_peer_id(peer: raw.base.Peer) -> Optional[int]:
def get_peer_id(peer: raw.base.Peer) -> int:
def get_peer_type(peer_id: int) -> str:
def get_channel_id(peer_id: int) -> int:
def btoi(b: bytes) -> int:
def itob(i: int) -> bytes:
def sha256(data: bytes) -> bytes:
def xor(a: bytes, b: bytes) -> bytes:
def compute_password_hash(
algo: raw.types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str,
) -> bytes:
def compute_password_check(
r: raw.types.account.Password, password: str
) -> raw.types.InputCheckPasswordSRP:
async def parse_text_entities(
client: "hydrogram.Client",
text: str,
parse_mode: enums.ParseMode,
entities: list["types.MessageEntity"],
) -> dict[str, Union[str, list[raw.base.MessageEntity]]]:
def zero_datetime() -> datetime:
def timestamp_to_datetime(ts: Optional[int]) -> Optional[datetime]:
def datetime_to_timestamp(dt: Optional[datetime]) -> Optional[int]:
def get_reply_head_fm(
message_thread_id: int, reply_to_message_id: int
) -> raw.types.InputReplyToMessage:
MIN_CHANNEL_ID = -1002147483647
MAX_CHANNEL_ID = -1000000000000
MIN_CHAT_ID = -2147483647
MAX_USER_ID_OLD = 2147483647
MAX_USER_ID = 999999999999
B = btoi(B_bytes)
A = pow(g, a, p)
S = pow(g_b, a_ux, p)
# Path: hydrogram/handlers/callback_query_handler.py
class CallbackQueryHandler(Handler):
"""The CallbackQuery handler class. Used to handle callback queries coming from inline buttons.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_callback_query` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new CallbackQuery arrives. It takes *(client, callback_query)*
as positional arguments (look at the section below for a detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of callback queries to be passed
in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the message handler.
callback_query (:obj:`~hydrogram.types.CallbackQuery`):
The received callback query.
"""
def __init__(self, callback: Callable, filters=None):
self.original_callback = callback
super().__init__(self.resolve_future_or_callback, filters)
def compose_data_identifier(self, query: CallbackQuery) -> Identifier:
"""
Composes an Identifier object from a CallbackQuery object.
Parameters:
query (:obj:`~hydrogram.types.CallbackQuery`):
The CallbackQuery object to compose of.
Returns:
:obj:`~hydrogram.types.Identifier`: An Identifier object.
"""
from_user = query.from_user
from_user_id = from_user.id if from_user else None
from_user_username = from_user.username if from_user else None
chat_id = None
message_id = None
if query.message:
message_id = getattr(query.message, "id", getattr(query.message, "message_id", None))
if query.message.chat:
chat_id = [query.message.chat.id, query.message.chat.username]
return Identifier(
message_id=message_id,
chat_id=chat_id,
from_user_id=[from_user_id, from_user_username],
inline_message_id=query.inline_message_id,
)
async def check_if_has_matching_listener(
self, client: "hydrogram.Client", query: CallbackQuery
) -> tuple[bool, Optional[Listener]]:
"""
Checks if the CallbackQuery object has a matching listener.
Parameters:
client (:obj:`~hydrogram.Client`):
The Client object to check with.
query (:obj:`~hydrogram.types.CallbackQuery`):
The CallbackQuery object to check with.
Returns:
A tuple of whether the CallbackQuery object has a matching listener and its filters does match with the
CallbackQuery and the matching listener;
"""
data = self.compose_data_identifier(query)
listener = client.get_listener_matching_with_data(data, ListenerTypes.CALLBACK_QUERY)
listener_does_match = False
if listener:
filters = listener.filters
if callable(filters):
if iscoroutinefunction(filters.__call__):
listener_does_match = await filters(client, query)
else:
listener_does_match = await client.loop.run_in_executor(
None, filters, client, query
)
else:
listener_does_match = True
return listener_does_match, listener
async def check(self, client: "hydrogram.Client", query: CallbackQuery) -> bool:
"""
Checks if the CallbackQuery object has a matching listener or handler.
Parameters:
client (:obj:`~hydrogram.Client`):
The Client object to check with.
query (:obj:`~hydrogram.types.CallbackQuery`):
The CallbackQuery object to check with.
Returns:
``bool``: A boolean indicating whether the CallbackQuery object has a matching listener or the handler filter matches.
"""
listener_does_match, listener = await self.check_if_has_matching_listener(client, query)
if callable(self.filters):
if iscoroutinefunction(self.filters.__call__):
handler_does_match = await self.filters(client, query)
else:
handler_does_match = await client.loop.run_in_executor(
None, self.filters, client, query
)
else:
handler_does_match = True
data = self.compose_data_identifier(query)
if PyromodConfig.unallowed_click_alert:
# matches with the current query but from any user
permissive_identifier = Identifier(
chat_id=data.chat_id,
message_id=data.message_id,
inline_message_id=data.inline_message_id,
from_user_id=None,
)
matches = permissive_identifier.matches(data)
if (
listener
and (matches and not listener_does_match)
and listener.unallowed_click_alert
):
alert = (
listener.unallowed_click_alert
if isinstance(listener.unallowed_click_alert, str)
else PyromodConfig.unallowed_click_alert_text
)
await query.answer(alert)
return False
# let handler get the chance to handle if listener
# exists but its filters doesn't match
return listener_does_match or handler_does_match
async def resolve_future_or_callback(
self, client: "hydrogram.Client", query: CallbackQuery, *args
) -> None:
"""
Resolves the future or calls the callback of the listener. Will call the original handler if no listener.
Parameters:
client (:obj:`~hydrogram.Client`):
The Client object to resolve or call with.
query (:obj:`~hydrogram.types.CallbackQuery`):
The CallbackQuery object to resolve or call with.
args:
The arguments to call the callback with.
Returns:
``None``
"""
listener_does_match, listener = await self.check_if_has_matching_listener(client, query)
if listener and listener_does_match:
client.remove_listener(listener)
if listener.future and not listener.future.done():
listener.future.set_result(query)
raise hydrogram.StopPropagation
if listener.callback:
if iscoroutinefunction(listener.callback):
await listener.callback(client, query, *args)
else:
listener.callback(client, query, *args)
raise hydrogram.StopPropagation
raise ValueError("Listener must have either a future or a callback")
await self.original_callback(client, query, *args)
# Path: hydrogram/handlers/chat_join_request_handler.py
class ChatJoinRequestHandler(Handler):
"""The ChatJoinRequest handler class. Used to handle join chat requests.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`.
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_chat_join_request` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new ChatJoinRequest event arrives. It takes
*(client, chat_join_request)* as positional arguments (look at the section below for a detailed
description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of updates to be passed in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the handler.
chat_join_request (:obj:`~hydrogram.types.ChatJoinRequest`):
The received chat join request.
"""
def __init__(self, callback: Callable, filters=None):
super().__init__(callback, filters)
# Path: hydrogram/handlers/chat_member_updated_handler.py
class ChatMemberUpdatedHandler(Handler):
"""The ChatMemberUpdated handler class. Used to handle changes in the status of a chat member.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`.
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_chat_member_updated` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new ChatMemberUpdated event arrives. It takes
*(client, chat_member_updated)* as positional arguments (look at the section below for a detailed
description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of updates to be passed in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the handler.
chat_member_updated (:obj:`~hydrogram.types.ChatMemberUpdated`):
The received chat member update.
"""
def __init__(self, callback: Callable, filters=None):
super().__init__(callback, filters)
# Path: hydrogram/handlers/chosen_inline_result_handler.py
class ChosenInlineResultHandler(Handler):
"""The ChosenInlineResultHandler handler class. Used to handle chosen inline results coming from inline queries.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_chosen_inline_result` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new chosen inline result arrives.
It takes *(client, chosen_inline_result)* as positional arguments (look at the section below for a
detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of chosen inline results to be passed
in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the message handler.
chosen_inline_result (:obj:`~hydrogram.types.ChosenInlineResult`):
The received chosen inline result.
"""
def __init__(self, callback: Callable, filters=None):
super().__init__(callback, filters)
# Path: hydrogram/handlers/deleted_messages_handler.py
class DeletedMessagesHandler(Handler):
"""The deleted messages handler class. Used to handle deleted messages coming from any chat
(private, group, channel). It is intended to be used with :meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_deleted_messages` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when one or more messages have been deleted.
It takes *(client, messages)* as positional arguments (look at the section below for a detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of messages to be passed
in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the message handler.
messages (List of :obj:`~hydrogram.types.Message`):
The deleted messages, as list.
"""
def __init__(self, callback: Callable, filters: Filter = None):
super().__init__(callback, filters)
async def check(self, client: "hydrogram.Client", messages: list[Message]):
# Every message should be checked, if at least one matches the filter True is returned
# otherwise, or if the list is empty, False is returned
for message in messages:
if await super().check(client, message):
return True
return False
# Path: hydrogram/handlers/edited_message_handler.py
class EditedMessageHandler(Handler):
"""The EditedMessage handler class. Used to handle edited messages.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_edited_message` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new edited message arrives. It takes *(client, message)*
as positional arguments (look at the section below for a detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of messages to be passed
in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the message handler.
edited_message (:obj:`~hydrogram.types.Message`):
The received edited message.
"""
def __init__(self, callback: Callable, filters=None):
super().__init__(callback, filters)
# Path: hydrogram/handlers/inline_query_handler.py
class InlineQueryHandler(Handler):
"""The InlineQuery handler class. Used to handle inline queries.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_inline_query` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new InlineQuery arrives. It takes *(client, inline_query)*
as positional arguments (look at the section below for a detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of inline queries to be passed
in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the inline query handler.
inline_query (:obj:`~hydrogram.types.InlineQuery`):
The received inline query.
"""
def __init__(self, callback: Callable, filters=None):
super().__init__(callback, filters)
# Path: hydrogram/handlers/message_handler.py
class MessageHandler(Handler):
"""The Message handler class. Used to handle new messages.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_message` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new Message arrives. It takes *(client, message)*
as positional arguments (look at the section below for a detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of messages to be passed
in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the message handler.
message (:obj:`~hydrogram.types.Message`):
The received message.
"""
def __init__(self, callback: Callable, filters=None):
self.original_callback = callback
super().__init__(self.resolve_future_or_callback, filters)
async def check_if_has_matching_listener(
self, client: "hydrogram.Client", message: Message
) -> tuple[bool, Optional[Listener]]:
"""
Checks if the message has a matching listener.
Parameters:
client (:obj:`~hydrogram.Client`):
The Client object to check with.
message (:obj:`~hydrogram.types.Message`):
The Message object to check with.
Returns:
``tuple``: A tuple of two elements, the first one is whether the message has a matching listener or not,
the second one is the matching listener if exists.
"""
from_user = message.from_user
from_user_id = from_user.id if from_user else None
from_user_username = from_user.username if from_user else None
message_id = getattr(message, "id", getattr(message, "message_id", None))
data = Identifier(
message_id=message_id,
chat_id=[message.chat.id, message.chat.username],
from_user_id=[from_user_id, from_user_username],
)
listener = client.get_listener_matching_with_data(data, ListenerTypes.MESSAGE)
listener_does_match = False
if listener:
filters = listener.filters
if callable(filters):
if iscoroutinefunction(filters.__call__):
listener_does_match = await filters(client, message)
else:
listener_does_match = await client.loop.run_in_executor(
None, filters, client, message
)
else:
listener_does_match = True
return listener_does_match, listener
async def check(self, client: "hydrogram.Client", message: Message) -> bool:
"""
Checks if the message has a matching listener or handler and its filters does match with the Message.
Parameters:
client (:obj:`~hydrogram.Client`):
The Client object to check with.
message (:obj:`~hydrogram.types.Message`):
The Message object to check with.
Returns:
``bool``: Whether the message has a matching listener or handler and its filters does match with the Message.
"""
listener_does_match = (await self.check_if_has_matching_listener(client, message))[0]
if callable(self.filters):
if iscoroutinefunction(self.filters.__call__):
handler_does_match = await self.filters(client, message)
else:
handler_does_match = await client.loop.run_in_executor(
None, self.filters, client, message
)
else:
handler_does_match = True
# let handler get the chance to handle if listener
# exists but its filters doesn't match
return listener_does_match or handler_does_match
async def resolve_future_or_callback(
self, client: "hydrogram.Client", message: Message, *args
):
"""
Resolves the future or calls the callback of the listener if the message has a matching listener.
Parameters:
client (:obj:`~hydrogram.Client`):
The Client object to resolve or call with.
message (:obj:`~hydrogram.types.Message`):
The Message object to resolve or call with.
args (``tuple``):
Arguments to call the callback with.
"""
listener_does_match, listener = await self.check_if_has_matching_listener(client, message)
if listener and listener_does_match:
client.remove_listener(listener)
if listener.future and not listener.future.done():
listener.future.set_result(message)
raise hydrogram.StopPropagation
if listener.callback:
if iscoroutinefunction(listener.callback):
await listener.callback(client, message, *args)
else:
listener.callback(client, message, *args)
raise hydrogram.StopPropagation
raise ValueError("Listener must have either a future or a callback")
await self.original_callback(client, message, *args)
# Path: hydrogram/handlers/poll_handler.py
class PollHandler(Handler):
"""The Poll handler class. Used to handle polls updates.
It is intended to be used with :meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_poll` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new poll update arrives. It takes *(client, poll)*
as positional arguments (look at the section below for a detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of polls to be passed
in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the poll handler.
poll (:obj:`~hydrogram.types.Poll`):
The received poll.
"""
def __init__(self, callback: Callable, filters=None):
super().__init__(callback, filters)
# Path: hydrogram/handlers/raw_update_handler.py
class RawUpdateHandler(Handler):
"""The Raw Update handler class. Used to handle raw updates. It is intended to be used with
:meth:`~hydrogram.Client.add_handler`
For a nicer way to register this handler, have a look at the
:meth:`~hydrogram.Client.on_raw_update` decorator.
Parameters:
callback (``Callable``):
A function that will be called when a new update is received from the server. It takes
*(client, update, users, chats)* as positional arguments (look at the section below for
a detailed description).
Other Parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the update handler.
update (``Update``):
The received update, which can be one of the many single Updates listed in the
:obj:`~hydrogram.raw.base.Update` base type.
users (``dict``):
Dictionary of all :obj:`~hydrogram.types.User` mentioned in the update.
You can access extra info about the user (such as *first_name*, *last_name*, etc...) by using
the IDs you find in the *update* argument (e.g.: *users[1768841572]*).
chats (``dict``):
Dictionary of all :obj:`~hydrogram.types.Chat` and
:obj:`~hydrogram.raw.types.Channel` mentioned in the update.
You can access extra info about the chat (such as *title*, *participants_count*, etc...)
by using the IDs you find in the *update* argument (e.g.: *chats[1701277281]*).
Note:
The following Empty or Forbidden types may exist inside the *users* and *chats* dictionaries.
They mean you have been blocked by the user or banned from the group/channel.
- :obj:`~hydrogram.raw.types.UserEmpty`
- :obj:`~hydrogram.raw.types.ChatEmpty`
- :obj:`~hydrogram.raw.types.ChatForbidden`
- :obj:`~hydrogram.raw.types.ChannelForbidden`
"""
def __init__(self, callback: Callable):
super().__init__(callback)
# Path: hydrogram/handlers/user_status_handler.py
class UserStatusHandler(Handler):
"""The UserStatus handler class. Used to handle user status updates (user going online or offline).
It is intended to be used with :meth:`~hydrogram.Client.add_handler`.
For a nicer way to register this handler, have a look at the :meth:`~hydrogram.Client.on_user_status` decorator.
Parameters:
callback (``Callable``):
Pass a function that will be called when a new user status update arrives. It takes *(client, user)*
as positional arguments (look at the section below for a detailed description).
filters (:obj:`Filters`):
Pass one or more filters to allow only a subset of users to be passed in your callback function.
Other parameters:
client (:obj:`~hydrogram.Client`):
The Client itself, useful when you want to call other API methods inside the user status handler.
user (:obj:`~hydrogram.types.User`):
The user containing the updated status.
"""
def __init__(self, callback: Callable, filters=None):
super().__init__(callback, filters)
# Path: hydrogram/dispatcher.py
import asyncio
import inspect
import logging
import hydrogram
from collections import OrderedDict
from hydrogram import utils
from hydrogram.handlers import (
CallbackQueryHandler,
ChatJoinRequestHandler,
ChatMemberUpdatedHandler,
ChosenInlineResultHandler,
DeletedMessagesHandler,
EditedMessageHandler,
InlineQueryHandler,
MessageHandler,
PollHandler,
RawUpdateHandler,
UserStatusHandler,
)
from hydrogram.raw.types import (
UpdateBotCallbackQuery,
UpdateBotChatInviteRequester,
UpdateBotInlineQuery,
UpdateBotInlineSend,
UpdateChannelParticipant,
UpdateChatParticipant,
UpdateDeleteChannelMessages,
UpdateDeleteMessages,
UpdateEditChannelMessage,
UpdateEditMessage,
UpdateInlineBotCallbackQuery,
UpdateMessagePoll,
UpdateNewChannelMessage,
UpdateNewMessage,
UpdateNewScheduledMessage,
UpdateUserStatus,
)
# Hydrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2023 Dan <https://github.com/delivrance>
# Copyright (C) 2023-present Hydrogram <https://hydrogram.org>
#
# This file is part of Hydrogram.
#
# Hydrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Hydrogram. If not, see <http://www.gnu.org/licenses/>.
log = logging.getLogger(__name__)
class Dispatcher:
NEW_MESSAGE_UPDATES = (
UpdateNewMessage,
UpdateNewChannelMessage,
UpdateNewScheduledMessage,
)
EDIT_MESSAGE_UPDATES = (UpdateEditMessage, UpdateEditChannelMessage)
DELETE_MESSAGES_UPDATES = (UpdateDeleteMessages, UpdateDeleteChannelMessages)
CALLBACK_QUERY_UPDATES = (UpdateBotCallbackQuery, UpdateInlineBotCallbackQuery)
CHAT_MEMBER_UPDATES = (UpdateChatParticipant, UpdateChannelParticipant)
USER_STATUS_UPDATES = (UpdateUserStatus,)
BOT_INLINE_QUERY_UPDATES = (UpdateBotInlineQuery,)
POLL_UPDATES = (UpdateMessagePoll,)
CHOSEN_INLINE_RESULT_UPDATES = (UpdateBotInlineSend,)
| CHAT_JOIN_REQUEST_UPDATES = (UpdateBotChatInviteRequester,) |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: iwatake2222/rotop
# Path: src/rotop/data_container.py
class DataContainer:
MAX_ROW_CSV = 600
MAX_NUM_HISTORY = 100
def __init__(self, write_csv=False):
now = datetime.datetime.now()
if write_csv:
self.csv_dir_name = now.strftime('./rotop_%Y%m%d_%H%M%S')
os.mkdir(self.csv_dir_name)
else:
self.csv_dir_name = None
self.csv_index = 0
self.df_cpu = pd.DataFrame()
self.df_mem = pd.DataFrame()
self.df_cpu_history = pd.DataFrame()
self.df_mem_history = pd.DataFrame()
def run(self, top_runner: TopRunner, lines: list[str], num_process: int):
if top_runner.col_range_command and top_runner.col_range_command[0] > 0:
df_cpu_current, df_mem_current = self.create_df_from_top(top_runner, lines, num_process)
self.df_cpu = pd.concat([self.df_cpu, df_cpu_current], axis=0)
self.df_mem = pd.concat([self.df_mem, df_mem_current], axis=0)
self.df_cpu_history = pd.concat([self.df_cpu_history, df_cpu_current], axis=0, ignore_index=True)
self.df_mem_history = pd.concat([self.df_mem_history, df_mem_current], axis=0, ignore_index=True)
if self.csv_dir_name:
self.df_cpu.to_csv(os.path.join(self.csv_dir_name, f'cpu_{self.csv_index:03d}.csv'), index=False)
self.df_mem.to_csv(os.path.join(self.csv_dir_name, f'mem_{self.csv_index:03d}.csv'), index=False)
if len(self.df_cpu) >= self.MAX_ROW_CSV:
self.df_cpu = pd.DataFrame()
self.df_mem = pd.DataFrame()
self.csv_index += 1
if len(self.df_cpu_history) >= self.MAX_NUM_HISTORY:
self.df_cpu_history = self.df_cpu_history[1:]
self.df_mem_history = self.df_mem_history[1:]
self.df_cpu_history = self.sort_df_in_column(self.df_cpu_history)
self.df_mem_history = self.sort_df_in_column(self.df_mem_history)
return self.df_cpu_history, self.df_mem_history
def reset_history(self):
self.df_cpu_history = pd.DataFrame()
self.df_mem_history = pd.DataFrame()
@staticmethod
def sort_df_in_column(df: pd.DataFrame):
df = df.sort_values(by=len(df)-1, axis=1, ascending=False)
return df
@staticmethod
def create_df_from_top(top_runner: TopRunner, lines: list[str], num_process: int):
# now = datetime.datetime.now()
now = int(time.time())
for i, line in enumerate(lines):
if 'PID' in line:
lines = lines[i + 1:]
break
process_list = []
cpu_list = []
mem_list = []
for i, line in enumerate(lines):
if i >= num_process:
break
pid = line[top_runner.col_range_pid[0]:top_runner.col_range_pid[1]].strip()
command = line[top_runner.col_range_command[0]:].strip()
process_name = str(f'{command} ({pid})')
process_list.append(process_name)
cpu = float(line[top_runner.col_range_CPU[0]:top_runner.col_range_CPU[1]].strip())
cpu_list.append(cpu)
mem = float(line[top_runner.col_range_MEM[0]:top_runner.col_range_MEM[1]].strip())
mem_list.append(mem)
df_cpu_current = pd.DataFrame([[now] + cpu_list], columns=['datetime'] + process_list)
df_mem_current = pd.DataFrame([[now] + mem_list], columns=['datetime'] + process_list)
return df_cpu_current, df_mem_current
# Path: src/rotop/top_runner.py
class TopRunner:
def __init__(self, interval, filter):
self.child = pexpect.spawn(f'top -cb -d {interval} -o %CPU -w 512')
self.filter_re = self.create_filter_re(filter)
self.ros_re = self.create_filter_re('--ros-arg|/opt/ros')
self.col_range_list_to_display = None
self.col_range_pid = None
self.col_range_CPU = None
self.col_range_MEM = None
self.col_range_command = None
self.next_after = ''
def __del__(self):
signal.signal(signal.SIGINT, signal.SIG_IGN) # ignore ctrl-c while closing
self.child.close()
def run(self, max_num_process, show_all=False, only_ros=False):
# get the result string of top command
self.child.expect(r'top - .*load average:')
before = self.child.before
previous_after = self.next_after
self.next_after = self.child.after
if before == '' or previous_after == '' or self.next_after == '':
return None, None
top_str = (previous_after + before).decode('utf-8')
orgial_lines = top_str.splitlines()
result_lines = []
result_show_all_lines = []
row_process_info = 0
# System Information
for line in orgial_lines:
result_lines.append(line)
result_show_all_lines.append(line)
if 'PID' in line:
break
# get layout information from process header line
row_process_info = len(result_lines)
process_header_org = result_lines[-1]
self.analyze_cols(process_header_org, show_all)
process_header = ''
for range in self.col_range_list_to_display:
process_header += process_header_org[range[0]:range[1]]
result_lines[-1] = process_header
# Process Information
for line in orgial_lines[row_process_info:]:
if self.col_range_command and self.col_range_command[0] > 0 and len(line) > self.col_range_command[0]:
process_info_org = line[:self.col_range_command[0]]
process_info = ''
for range in self.col_range_list_to_display:
process_info += process_info_org[range[0]:range[1]]
command_str = line[self.col_range_command[0]:]
if not self.filter_re.match(command_str):
continue
if only_ros and not self.ros_re.match(command_str):
continue
command_str = self.parse_command_str(command_str)
line = process_info + command_str
show_all_line = process_info_org + command_str
result_lines.append(line)
result_show_all_lines.append(show_all_line)
if len(result_lines) >= row_process_info + max_num_process:
break
return result_lines, result_show_all_lines
def analyze_cols(self, process_header: str, show_all: bool):
if self.col_range_command is None or self.col_range_command[0] == -1:
self.col_range_list_to_display = self.get_col_range_list_to_display(process_header, show_all)
self.col_range_pid = TopRunner.get_col_range_PID(process_header)
self.col_range_CPU = TopRunner.get_col_range_CPU(process_header)
self.col_range_MEM = TopRunner.get_col_range_MEM(process_header)
self.col_range_command = TopRunner.get_col_range_command(process_header)
return
@staticmethod
def create_filter_re(filter_str):
if '.*' not in filter_str:
filter_str = '.*' + filter_str + '.*'
filter_re = re.compile(filter_str)
return filter_re
@staticmethod
def get_row_start_list(lines: list[str])->list[int]:
row_list = []
for i, line in enumerate(lines):
if 'top' in line and 'load average' in line:
row_list.append(i)
return row_list
@staticmethod
def get_col_range_command(process_info_header_line: str):
start_col = process_info_header_line.find('COMMAND')
end_col = len(process_info_header_line) - 1
return (start_col, end_col)
@staticmethod
def get_col_range_PID(process_info_header_line: str):
start_col = 0
end_col = process_info_header_line.find('PID') + len('PID')
return (start_col, end_col)
@staticmethod
def get_col_range_CPU(process_info_header_line: str):
start_col = process_info_header_line.find('SHR S') + len('SHR S')
end_col = process_info_header_line.find('%CPU') + len('%CPU')
return (start_col, end_col)
@staticmethod
def get_col_range_MEM(process_info_header_line: str):
start_col = process_info_header_line.find('%CPU') + len('%CPU')
end_col = process_info_header_line.find('%MEM') + len('%MEM')
return (start_col, end_col)
@staticmethod
def get_col_range_list_to_display(process_info_header_line: str, show_all=False):
range_list = []
if show_all:
range_list.append((0, len(process_info_header_line)))
else:
start_col = 0
end_col = process_info_header_line.find('PID') + len('PID')
range_list.append((start_col, end_col))
start_col = process_info_header_line.find('NI') + len('NI')
end_col = process_info_header_line.find('%MEM') + len('%MEM')
range_list.append((start_col, end_col))
start_col = process_info_header_line.find('COMMAND') - 1
end_col = len(process_info_header_line)
range_list.append((start_col, end_col))
return range_list
@staticmethod
def parse_component_container_command(command):
cmd = command.split()[0].split('/')[-1]
idx_node = command.find('__node')
if idx_node > 0:
node = command[idx_node:].split()[0].split('=')[-1]
cmd = node
idx_ns = command.find('__ns')
if idx_ns > 0:
ns = command[idx_ns:].split()[0].split('=')[-1]
# cmd = cmd + ', ' + node + ', ' + ns
cmd += ', ' + ns
return cmd
@staticmethod
def parse_python_command(command):
cmd_list = command.split()
cmd = cmd_list[0].split('/')[-1]
if len(cmd_list) > 1:
if cmd_list[1][0] == '-':
python_file = cmd_list[-1]
else:
python_file = cmd_list[1]
python_file = python_file.split('/')[-1]
ros2_option = ''
if 'ros2' == python_file:
ros2_option = ' '.join(cmd_list[2:5])
cmd = cmd + ' ' + python_file + ' ' + ros2_option
return cmd
@staticmethod
def parse_command_str(command):
param_for_ros2 = ['__node', '__ns']
if '[' == command[0]:
# kernel process
command = command
elif any(item in command for item in param_for_ros2):
command = TopRunner.parse_component_container_command(command)
elif 'python' in command:
command = TopRunner.parse_python_command(command)
else:
# normal process
command = command.split()[0].split('/')[-1]
return command
# Path: src/rotop/utility.py
def create_logger(name, level: int=logging.DEBUG, log_filename: str=None) -> logging.Logger:
handler_format = logging.Formatter('[%(asctime)s][%(levelname)-7s][%(filename)s:%(lineno)s] %(message)s')
# stream_handler = logging .StreamHandler()
# stream_handler.setLevel(level)
# stream_handler.setFormatter(handler_format)
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(level)
# logger.addHandler(stream_handler)
# if log_filename:
# file_handler = logging.FileHandler(log_filename)
# file_handler.setLevel(level)
# file_handler.setFormatter(handler_format)
# logger.addHandler(file_handler)
return logger
# Path: src/rotop/gui_main.py
import pandas as pd
import threading
import time
import dearpygui.dearpygui as dpg
from .data_container import DataContainer
from .top_runner import TopRunner
from .utility import create_logger
time.sleep(0.1)
dpg.render_dearpygui_frame()
dpg.destroy_context()
def get_plot_title(self):
return 'CPU [%]' if self.plot_is_cpu else 'MEM [%]'
def cb_button_cpumem(self, sender, app_data, user_data):
self.plot_is_cpu = not self.plot_is_cpu
dpg.set_item_label(self.dpg_plot_id, self.get_plot_title())
def cb_button_reset(self, sender, app_data, user_data):
global g_reset_history_df
g_reset_history_df = True
self.color_dict = {}
self.theme_dict = {}
def cb_button_pause(self, sender, app_data, user_data):
self.pause = not self.pause
def cb_resize(self, sender, app_data):
window_width = app_data[2]
window_height = app_data[3]
dpg.set_item_width(self.dpg_window_id, window_width)
dpg.set_item_height(self.dpg_window_id, window_height)
dpg.set_item_width(self.dpg_plot_id, window_width)
dpg.set_item_height(self.dpg_plot_id, window_height / 2)
def update_gui(self, result_lines:list[str], df_cpu_history:pd.DataFrame, df_mem_history:pd.DataFrame):
if self.pause:
return
if self.dpg_plot_axis_y_id:
dpg.delete_item(self.dpg_plot_axis_y_id)
self.dpg_plot_axis_y_id = dpg.add_plot_axis(dpg.mvYAxis, label=self.get_plot_title(), lock_min=True, parent=self.dpg_plot_id)
df = df_cpu_history if self.plot_is_cpu else df_mem_history
col_x = df.columns[0]
cols_y = df.columns[1:]
x = df[col_x].to_list()
for col_y in cols_y:
y = df[col_y].to_list()
line_series = dpg.add_line_series(x, y, label=col_y[:min(40, len(col_y))].ljust(40), parent=self.dpg_plot_axis_y_id)
theme = self.get_theme(col_y)
dpg.bind_item_theme(line_series, theme)
if self.plot_is_cpu:
dpg.add_line_series([x[0]], [110], label='', parent=self.dpg_plot_axis_y_id) # dummy for ymax>=100
dpg.add_plot_legend(parent=self.dpg_plot_id, outside=True, location=dpg.mvPlot_Location_NorthEast)
dpg.fit_axis_data(self.dpg_plot_axis_x_id)
dpg.fit_axis_data(self.dpg_plot_axis_y_id)
dpg.set_value(self.dpg_text, '\n'.join(result_lines))
def get_color(self, process_name)->tuple[int]:
# return (0, 0, 0)
if process_name in self.color_dict:
return self.color_dict[process_name]
else:
color = COLOR_MAP[len(self.color_dict)%len(COLOR_MAP)]
self.color_dict[process_name] = color
return color
def get_theme(self, process_name):
if process_name in self.theme_dict:
return self.theme_dict[process_name]
else:
with dpg.theme() as theme:
with dpg.theme_component(dpg.mvLineSeries):
dpg.add_theme_color(dpg.mvPlotCol_Line, self.get_color(process_name), category=dpg.mvThemeCat_Plots)
self.theme_dict[process_name] = theme
return theme
def gui_loop(view: GuiView):
view.start_dpg()
def gui_main(args):
global g_reset_history_df
top_runner = TopRunner(args.interval, args.filter)
data_container = DataContainer(args.csv)
view = GuiView()
gui_thread = threading.Thread(target=gui_loop, args=(view,))
gui_thread.start()
try:
while True:
if g_reset_history_df:
data_container.reset_history()
g_reset_history_df = False
result_lines, result_show_all_lines = top_runner.run(args.num_process, True, args.only_ros)
if result_show_all_lines is None:
time.sleep(0.1)
continue
df_cpu_history, df_mem_history = data_container.run(top_runner, result_show_all_lines, args.num_process)
df_cpu_history = df_cpu_history.iloc[:, :min(args.num_process, len(df_cpu_history.columns))]
df_mem_history = df_mem_history.iloc[:, :min(args.num_process, len(df_mem_history.columns))]
if gui_thread.is_alive():
view.update_gui(result_lines, df_cpu_history, df_mem_history)
else:
break
except KeyboardInterrupt:
pass
view.exit()
| gui_thread.join() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: chenruduan/OAReactDiff
# Path: oa_reactdiff/trainer/pl_trainer.py
class DDPMModule(LightningModule):
def __init__(
self,
model_config: Dict,
optimizer_config: Dict,
training_config: Dict,
node_nfs: List[int] = [9] * 3,
edge_nf: int = 4,
condition_nf: int = 3,
fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"],
pos_dim: int = 3,
update_pocket_coords: bool = True,
condition_time: bool = True,
edge_cutoff: Optional[float] = None,
norm_values: Tuple = (1.0, 1.0, 1.0),
norm_biases: Tuple = (0.0, 0.0, 0.0),
noise_schedule: str = "polynomial_2",
timesteps: int = 1000,
precision: float = 1e-5,
loss_type: str = "l2",
pos_only: bool = False,
process_type: Optional[str] = None,
model: nn.Module = None,
enforce_same_encoding: Optional[List] = None,
scales: List[float] = [1.0, 1.0, 1.0],
eval_epochs: int = 20,
source: Optional[Dict] = None,
fixed_idx: Optional[List] = None,
) -> None:
super().__init__()
egnn_dynamics = EGNNDynamics(
model_config=model_config,
node_nfs=node_nfs,
edge_nf=edge_nf,
condition_nf=condition_nf,
fragment_names=fragment_names,
pos_dim=pos_dim,
update_pocket_coords=update_pocket_coords,
condition_time=condition_time,
edge_cutoff=edge_cutoff,
model=model,
enforce_same_encoding=enforce_same_encoding,
source=source,
)
normalizer = Normalizer(
norm_values=norm_values,
norm_biases=norm_biases,
pos_dim=pos_dim,
)
gamma_module = PredefinedNoiseSchedule(
noise_schedule=noise_schedule,
timesteps=timesteps,
precision=precision,
)
schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)
self.ddpm = EnVariationalDiffusion(
dynamics=egnn_dynamics,
schdule=schedule,
normalizer=normalizer,
size_histogram=None,
loss_type=loss_type,
pos_only=pos_only,
fixed_idx=fixed_idx,
)
self.model_config = model_config
self.optimizer_config = optimizer_config
self.training_config = training_config
self.loss_type = loss_type
self.n_fragments = len(fragment_names)
self.remove_h = training_config["remove_h"]
self.pos_only = pos_only
self.process_type = process_type or "QM9"
self.scales = scales
sampling_gamma_module = PredefinedNoiseSchedule(
noise_schedule="polynomial_2",
timesteps=150,
precision=precision,
)
self.sampling_schedule = DiffSchedule(
gamma_module=sampling_gamma_module,
norm_values=norm_values,
)
self.eval_epochs = eval_epochs
self.clip_grad = training_config["clip_grad"]
if self.clip_grad:
self.gradnorm_queue = utils.Queue()
self.gradnorm_queue.add(3000)
self.save_hyperparameters()
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.ddpm.parameters(), **self.optimizer_config)
if not self.training_config["lr_schedule_type"] is None:
scheduler_func = LR_SCHEDULER[self.training_config["lr_schedule_type"]]
scheduler = scheduler_func(
optimizer=optimizer, **self.training_config["lr_schedule_config"]
)
return [optimizer], [scheduler]
else:
return optimizer
def setup(self, stage: Optional[str] = None):
func = PROCESS_FUNC[self.process_type]
ft = FILE_TYPE[self.process_type]
if stage == "fit":
self.train_dataset = func(
Path(self.training_config["datadir"], f"train_addprop{ft}"),
**self.training_config,
)
self.training_config["reflection"] = False # Turn off reflection in val.
self.val_dataset = func(
Path(self.training_config["datadir"], f"valid_addprop{ft}"),
**self.training_config,
)
elif stage == "test":
self.test_dataset = func(
Path(self.training_config["datadir"], f"test{ft}"),
**self.training_config,
)
else:
raise NotImplementedError
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset,
self.training_config["bz"],
shuffle=True,
num_workers=self.training_config["num_workers"],
collate_fn=self.train_dataset.collate_fn,
)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset,
self.training_config["bz"],
shuffle=False,
num_workers=self.training_config["num_workers"],
collate_fn=self.val_dataset.collate_fn,
)
def test_dataloader(self) -> DataLoader:
return DataLoader(
self.test_dataset,
self.training_config["bz"],
shuffle=False,
num_workers=self.training_config["num_workers"],
collate_fn=self.test_dataset.collate_fn,
)
def compute_loss(self, batch):
representations, conditions = batch
loss_terms = self.ddpm.forward(
representations,
conditions,
)
info = {}
if not self.pos_only:
denoms = [
(self.ddpm.pos_dim + self.ddpm.node_nfs[ii])
* representations[ii]["size"]
for ii in range(self.n_fragments)
]
else:
denoms = [
self.ddpm.pos_dim * representations[ii]["size"]
for ii in range(self.n_fragments)
]
error_t_normalized = [
loss_terms["error_t"][ii] / denoms[ii] * self.scales[ii]
for ii in range(self.n_fragments)
]
if self.loss_type == "l2" and self.training:
# normalize loss_t
loss_t = torch.stack(error_t_normalized, dim=0).sum(dim=0)
# normalize loss_0
loss_0_x = [
loss_terms["loss_0_x"][ii]
* self.scales[ii]
/ (self.ddpm.pos_dim * representations[ii]["size"])
for ii in range(self.n_fragments)
]
loss_0_x = torch.stack(loss_0_x, dim=0).sum(dim=0)
loss_0_cat = torch.stack(loss_terms["loss_0_cat"], dim=0).sum(dim=0)
loss_0_charge = torch.stack(loss_terms["loss_0_charge"], dim=0).sum(dim=0)
loss_0 = loss_0_x + loss_0_cat + loss_0_charge
# VLB objective or evaluation step
else:
# Note: SNR_weight should be negative
error_t = [
-self.ddpm.T * 0.5 * loss_terms["SNR_weight"] * _error_t
for _error_t in loss_terms["error_t"]
]
loss_t = torch.stack(error_t, dim=0).sum(dim=0)
loss_0_x = torch.stack(loss_terms["loss_0_x"], dim=0).sum(dim=0)
loss_0_cat = torch.stack(loss_terms["loss_0_cat"], dim=0).sum(dim=0)
loss_0_charge = torch.stack(loss_terms["loss_0_charge"], dim=0).sum(dim=0)
loss_0 = (
loss_0_x + loss_0_cat + loss_0_charge + loss_terms["neg_log_constants"]
)
nll = loss_t + loss_0 + loss_terms["kl_prior"]
# nll = loss_t
for ii in range(self.n_fragments):
info[f"error_t_{ii}"] = error_t_normalized[ii].mean().item() / (
self.scales[ii] + 1e-4
)
info[f"unorm_error_t_{ii}"] = loss_terms["error_t"][ii].mean().item()
# Correct for normalization on x.
if not (self.loss_type == "l2" and self.training):
nll = nll - loss_terms["delta_log_px"]
# Transform conditional nll into joint nll
# Note:
# loss = -log p(x,h|N) and log p(x,h,N) = log p(x,h|N) + log p(N)
# Therefore, log p(x,h|N) = -loss + log p(N)
# => loss_new = -log p(x,h,N) = loss - log p(N)
nll = nll - loss_terms["log_pN"]
return nll, info
def eval_inplaint_batch(
self,
batch: List,
resamplings: int = 5,
jump_length: int = 5,
frag_fixed: List = [0, 2],
):
sampling_ddpm = copy.deepcopy(self.ddpm)
sampling_ddpm.schedule = self.sampling_schedule
sampling_ddpm.T = self.sampling_schedule.gamma_module.timesteps
sampling_ddpm.eval()
representations, conditions = batch
xh_fixed = [
torch.cat(
[repre[feature_type] for feature_type in FEATURE_MAPPING],
dim=1,
)
for repre in representations
]
n_samples = representations[0]["size"].size(0)
fragments_nodes = [repre["size"] for repre in representations]
with torch.no_grad():
out_samples, _ = sampling_ddpm.inpaint(
n_samples=n_samples,
fragments_nodes=fragments_nodes,
conditions=conditions,
return_frames=1,
resamplings=resamplings,
jump_length=jump_length,
timesteps=None,
xh_fixed=xh_fixed,
frag_fixed=frag_fixed,
)
rmsds = batch_rmsd(
fragments_nodes,
out_samples[0],
xh_fixed,
idx=1,
threshold=0.5,
)
return np.mean(rmsds), np.median(rmsds)
def training_step(self, batch, batch_idx):
nll, info = self.compute_loss(batch)
loss = nll.mean(0)
self.log("train-totloss", loss, rank_zero_only=True)
for k, v in info.items():
self.log(f"train-{k}", v, rank_zero_only=True)
if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:
if self.trainer.is_global_zero:
print(
"evaluation on samping for training batch...",
batch[1].shape,
batch_idx,
)
rmsd_mean, rmsd_median = self.eval_inplaint_batch(batch)
info["rmsd"], info["rmsd-median"] = rmsd_mean, rmsd_median
else:
info["rmsd"], info["rmsd-median"] = np.nan, np.nan
info["loss"] = loss
return info
def _shared_eval(self, batch, batch_idx, prefix, *args):
nll, info = self.compute_loss(batch)
loss = nll.mean(0)
info["totloss"] = loss.item()
if (self.current_epoch + 1) % self.eval_epochs == 0 and batch_idx == 0:
if self.trainer.is_global_zero:
print(
"evaluation on samping for validation batch...",
batch[1].shape,
batch_idx,
)
info["rmsd"], info["rmsd-median"] = self.eval_inplaint_batch(batch)
else:
info["rmsd"], info["rmsd-median"] = np.nan, np.nan
info_prefix = {}
for k, v in info.items():
info_prefix[f"{prefix}-{k}"] = v
return info_prefix
def validation_step(self, batch, batch_idx, *args):
return self._shared_eval(batch, batch_idx, "val", *args)
def test_step(self, batch, batch_idx, *args):
return self._shared_eval(batch, batch_idx, "test", *args)
def validation_epoch_end(self, val_step_outputs):
val_epoch_metrics = average_over_batch_metrics(val_step_outputs)
if self.trainer.is_global_zero:
pretty_print(self.current_epoch, val_epoch_metrics, prefix="val")
val_epoch_metrics.update({"epoch": self.current_epoch})
for k, v in val_epoch_metrics.items():
self.log(k, v, sync_dist=True)
def training_epoch_end(self, outputs) -> None:
epoch_metrics = average_over_batch_metrics(
outputs, allowed=["rmsd", "rmsd-median"]
)
self.log("train-rmsd", epoch_metrics["rmsd"], sync_dist=True)
self.log("train-rmsd-median", epoch_metrics["rmsd-median"], sync_dist=True)
def configure_gradient_clipping(
self, optimizer, optimizer_idx, gradient_clip_val, gradient_clip_algorithm
):
if not self.clip_grad:
return
# Allow gradient norm to be 150% + 1.5 * stdev of the recent history.
max_grad_norm = 1.5 * self.gradnorm_queue.mean() + 3 * self.gradnorm_queue.std()
# Get current grad_norm
params = [p for g in optimizer.param_groups for p in g["params"]]
grad_norm = utils.get_grad_norm(params)
# Lightning will handle the gradient clipping
self.clip_gradients(
optimizer, gradient_clip_val=max_grad_norm, gradient_clip_algorithm="norm"
)
if float(grad_norm) > max_grad_norm:
self.gradnorm_queue.add(float(max_grad_norm))
else:
self.gradnorm_queue.add(float(grad_norm))
if float(grad_norm) > max_grad_norm:
print(
f"Clipped gradient with value {grad_norm:.1f} "
f"while allowed {max_grad_norm:.1f}"
)
# Path: oa_reactdiff/dataset/transition1x.py
class ProcessedTS1x(BaseDataset):
def __init__(
self,
npz_path,
center=True,
pad_fragments=0,
device="cpu",
zero_charge=False,
remove_h=False,
single_frag_only=True,
swapping_react_prod=False,
append_frag=False,
reflection=False,
use_by_ind=False,
only_ts=False,
confidence_model=False,
position_key="positions",
ediff=None,
**kwargs,
):
super().__init__(
npz_path=npz_path,
center=center,
device=device,
zero_charge=zero_charge,
remove_h=remove_h,
)
if confidence_model:
use_by_ind = False
if remove_h:
print("remove_h is ignored because it is not reasonble for TS.")
if single_frag_only:
single_frag_inds = np.where(
np.array(self.raw_dataset["single_fragment"]) == 1
)[0]
else:
single_frag_inds = np.array(range(len(self.raw_dataset["single_fragment"])))
if use_by_ind:
use_inds = self.raw_dataset["use_ind"]
else:
use_inds = range(len(self.raw_dataset["single_fragment"]))
single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))
data_duplicated = copy.deepcopy(self.raw_dataset)
for k, mapped_k in FRAG_MAPPING.items():
for v, val in data_duplicated[k].items():
self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]
if swapping_react_prod:
mapped_val = data_duplicated[mapped_k][v]
self.raw_dataset[k][v] += [
mapped_val[ii] for ii in single_frag_inds
]
if reflection:
for k, mapped_k in FRAG_MAPPING.items():
for v, val in self.raw_dataset[k].items():
if v in ["wB97x_6-31G(d).forces", position_key]:
self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]
else:
self.raw_dataset[k][v] += val
self.reactant = self.raw_dataset["reactant"]
self.transition_state = self.raw_dataset["transition_state"]
self.product = self.raw_dataset["product"]
self.n_fragments = pad_fragments + 3
self.device = torch.device(device)
n_samples = len(self.reactant["charges"])
self.n_samples = len(self.reactant["charges"])
self.data = {}
repeat = 2 if swapping_react_prod else 1
if confidence_model:
self.data["target"] = torch.tensor(
self.raw_dataset["target"] * repeat
).unsqueeze(1)
self.data["rmsd"] = torch.tensor(
self.raw_dataset["rmsd"] * repeat
).unsqueeze(1)
if ediff is not None:
self.data["ediff"] = torch.tensor(
self.raw_dataset[ediff]["ediff"] * repeat
).unsqueeze(1)
if not only_ts:
if not append_frag:
self.process_molecules(
"reactant", n_samples, idx=0, position_key=position_key
)
self.process_molecules("transition_state", n_samples, idx=1)
self.process_molecules(
"product", n_samples, idx=2, position_key=position_key
)
else:
self.process_molecules(
"reactant",
n_samples,
idx=0,
append_charge=0,
position_key=position_key,
)
self.process_molecules(
"transition_state", n_samples, idx=1, append_charge=1
)
self.process_molecules(
"product",
n_samples,
idx=2,
append_charge=0,
position_key=position_key,
)
for idx in range(pad_fragments):
self.patch_dummy_molecules(idx + 3)
else:
if not append_frag:
self.process_molecules("transition_state", n_samples, idx=0)
else:
self.process_molecules(
"transition_state", n_samples, idx=0, append_charge=1
)
# for idx in range(2):
# self.patch_dummy_molecules(idx + 1)
self.data["condition"] = [
torch.zeros(
size=(1, 1),
dtype=torch.int64,
device=self.device,
)
for _ in range(self.n_samples)
]
# Path: oa_reactdiff/analyze/rmsd.py
def batch_rmsd(
fragments_nodes: List[Tensor],
out_samples: List[Tensor],
xh: List[Tensor],
idx: int = 1,
threshold=0.5,
):
rmsds = []
out_samples_use = out_samples[idx]
xh_use = xh[idx]
nodes = fragments_nodes[idx].long().cpu().numpy()
start_ind, end_ind = 0, 0
for jj, natoms in enumerate(nodes):
end_ind += natoms
mol1 = xh2pmg(out_samples_use[start_ind:end_ind])
mol2 = xh2pmg(xh_use[start_ind:end_ind])
try:
rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)
except:
rmsd = 1.0
rmsds.append(min(rmsd, 1.0))
start_ind = end_ind
return rmsds
# Path: oa_reactdiff/evaluate/utils.py
def set_new_schedule(
ddpm_trainer: DDPMModule,
timesteps: int = 250,
device: torch.device = torch.device("cuda"),
noise_schedule: str = "polynomial_2",
) -> DDPMModule:
precision: float = 1e-5
gamma_module = PredefinedNoiseSchedule(
noise_schedule=noise_schedule,
timesteps=timesteps,
precision=precision,
)
schedule = DiffSchedule(
gamma_module=gamma_module, norm_values=ddpm_trainer.ddpm.norm_values
)
ddpm_trainer.ddpm.schedule = schedule
ddpm_trainer.ddpm.T = timesteps
return ddpm_trainer.to(device)
# Path: oa_reactdiff/evaluate/utils.py
def inplaint_batch(
batch: List,
ddpm_trainer: DDPMModule,
resamplings: int = 1,
jump_length: int = 1,
frag_fixed: List = [0, 2],
):
representations, conditions = batch
xh_fixed = [
torch.cat(
[repre[feature_type] for feature_type in FEATURE_MAPPING],
dim=1,
)
for repre in representations
]
n_samples = representations[0]["size"].size(0)
fragments_nodes = [repre["size"] for repre in representations]
out_samples, _ = ddpm_trainer.ddpm.inpaint(
n_samples=n_samples,
fragments_nodes=fragments_nodes,
conditions=conditions,
return_frames=1,
resamplings=resamplings,
jump_length=jump_length,
timesteps=None,
xh_fixed=xh_fixed,
frag_fixed=frag_fixed,
)
return out_samples[0], xh_fixed, fragments_nodes
# Path: oa_reactdiff/evaluate/utils.py
def samples_to_pos_charge(out_samples, fragments_nodes):
x_r = torch.tensor_split(
out_samples[0], torch.cumsum(fragments_nodes[0], dim=0).to("cpu")[:-1]
)
x_ts = torch.tensor_split(
out_samples[1], torch.cumsum(fragments_nodes[0], dim=0).to("cpu")[:-1]
)
x_p = torch.tensor_split(
out_samples[2], torch.cumsum(fragments_nodes[0], dim=0).to("cpu")[:-1]
)
pos = {
"reactant": [_x[:, :3].cpu().numpy() for _x in x_r],
"transition_state": [_x[:, :3].cpu().numpy() for _x in x_ts],
"product": [_x[:, :3].cpu().numpy() for _x in x_p],
}
z = [_x[:, -1].long().cpu().numpy() for _x in x_r]
natoms = [f.cpu().item() for f in fragments_nodes[0]]
return pos, z, natoms
# Path: oa_reactdiff/evaluate/generate_confidence_sample.py
from typing import List
from uuid import uuid4
from torch.utils.data import DataLoader
from oa_reactdiff.trainer.pl_trainer import DDPMModule
from oa_reactdiff.dataset.transition1x import ProcessedTS1x
from oa_reactdiff.analyze.rmsd import batch_rmsd
from oa_reactdiff.evaluate.utils import (
set_new_schedule,
inplaint_batch,
samples_to_pos_charge,
)
import time
import os
import numpy as np
import torch
import pickle
import argparse
"--single_frag_only",
dest="single_frag_only",
default=0,
type=int,
help="single_frag_only",
)
parser.add_argument(
"--model", dest="model", default="leftnet_2074", type=str, help="model"
)
parser.add_argument("--power", dest="power", default="2", type=str, help="power")
parser.add_argument(
"--position_key",
dest="position_key",
default="positions",
type=str,
help="position_key",
)
args = parser.parse_args()
print("args: ", args)
config = dict(
model=args.model,
dataset=args.dataset,
partition=args.partition,
timesteps=args.timesteps,
bz=args.bz,
resamplings=args.resamplings,
jump_length=args.jump_length,
repeats=args.repeats,
max_batch=-1,
shuffle=True,
single_frag_only=args.single_frag_only,
noise_schedule="polynomial_" + args.power,
position_key=args.position_key,
)
print("loading ddpm trainer...")
device = torch.device("cuda")
tspath = "/home/ubuntu/efs/TSDiffusion/oa_reactdiff/trainer/ckpt/TSDiffusion-TS1x-All"
checkpoints = {
"leftnet_2074": f"{tspath}/leftnet-8-70b75beeaac1/ddpm-epoch=2074-val-totloss=531.18.ckpt",
"egnn": f"{tspath}/egnn-1-7d0e388fa0fd/ddpm-epoch=759-val-totloss=616.42.ckpt",
"leftnet_wo_oa": f"{tspath}/leftnet-10-da396de30744_wo_oa/ddpm-epoch=149-val-totloss=600.87.ckpt",
"leftnet_wo_oa_aligned": f"{tspath}/leftnet-10-d13a2c2bace6_wo_oa_align/ddpm-epoch=779-val-totloss=747.10.ckpt",
"leftnet_wo_oa_aligned_early": f"{tspath}/leftnet-10-d13a2c2bace6_wo_oa_align/ddpm-epoch=719-val-totloss=680.64.ckpt",
}
ddpm_trainer = DDPMModule.load_from_checkpoint(
checkpoint_path=checkpoints[config["model"]],
map_location=device,
)
ddpm_trainer = set_new_schedule(
ddpm_trainer, timesteps=config["timesteps"], noise_schedule=config["noise_schedule"]
)
print("loading dataset...")
dataset = ProcessedTS1x(
npz_path=f"../data/{args.dataset}/{args.partition}.pkl",
center=True,
pad_fragments=0,
device="cuda",
zero_charge=False,
remove_h=False,
single_frag_only=config["single_frag_only"],
swapping_react_prod=False,
use_by_ind=True,
position_key=config["position_key"],
)
print("# of points:", len(dataset))
loader = DataLoader(
dataset,
batch_size=config["bz"],
shuffle=config["shuffle"],
num_workers=0,
collate_fn=dataset.collate_fn,
)
print("evaluating...")
speices = ["reactant", "transition_state", "product"]
keys = ["num_atoms", "charges", "position"]
for num_repeat in range(config["repeats"]):
print("num_repeat: ", num_repeat)
_rmsds, _genEs = [], []
filename = assemble_filename(config)
data = {}
for s in speices:
data[s] = {}
for k in keys:
data[s][k] = []
for s in ["target", "rmsd"]:
data[s] = []
for ii, batch in enumerate(loader):
print("batch_idx: ", ii)
time_start = time.time()
if ii == config["max_batch"]:
break
# TS gen
out_samples, xh_fixed, fragments_nodes = inplaint_batch(
batch,
ddpm_trainer,
resamplings=config["resamplings"],
jump_length=config["jump_length"],
frag_fixed=[0, 2],
)
pos, z, natoms = samples_to_pos_charge(out_samples, fragments_nodes)
_rmsds = batch_rmsd(
fragments_nodes,
out_samples,
xh_fixed,
idx=1,
threshold=0.5,
)
for s in speices:
data[s]["position"] += pos[s]
data[s]["charges"] += z
data[s]["num_atoms"] += natoms
| data["rmsd"] += _rmsds |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: lewandofskee/DiAD
# Path: mvtecad_dataloader.py
class MVTecDataset(Dataset):
def __init__(self,type, root):
self.data = []
if type == 'train':
with open('./training/MVTec-AD/train.json', 'rt') as f:
for line in f:
self.data.append(json.loads(line))
else:
with open('./training/MVTec-AD/test.json', 'rt') as f:
for line in f:
self.data.append(json.loads(line))
self.label_to_idx = {'bottle': '0', 'cable': '1', 'capsule': '2', 'carpet': '3', 'grid': '4', 'hazelnut': '5',
'leather': '6', 'metal_nut': '7', 'pill': '8', 'screw': '9', 'tile': '10',
'toothbrush': '11', 'transistor': '12', 'wood': '13', 'zipper': '14'}
self.image_size = (256, 256)
self.root = root
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
item = self.data[idx]
source_filename = item['filename']
target_filename = item['filename']
label = item["label"]
if item.get("maskname", None):
mask = cv2.imread( self.root + item['maskname'], cv2.IMREAD_GRAYSCALE)
else:
if label == 0: # good
mask = np.zeros(self.image_size).astype(np.uint8)
elif label == 1: # defective
mask = (np.ones(self.image_size)).astype(np.uint8)
else:
raise ValueError("Labels must be [None, 0, 1]!")
prompt = ""
source = cv2.imread(self.root + source_filename)
target = cv2.imread(self.root + target_filename)
source = cv2.cvtColor(source, 4)
target = cv2.cvtColor(target, 4)
source = Image.fromarray(source, "RGB")
target = Image.fromarray(target, "RGB")
mask = Image.fromarray(mask, "L")
# transform_fn = transforms.Resize(256, Image.BILINEAR)
transform_fn = transforms.Resize(self.image_size)
source = transform_fn(source)
target = transform_fn(target)
mask = transform_fn(mask)
source = transforms.ToTensor()(source)
target = transforms.ToTensor()(target)
mask = transforms.ToTensor()(mask)
normalize_fn = transforms.Normalize(mean=mean_train, std=std_train)
source = normalize_fn(source)
target = normalize_fn(target)
clsname = item["clsname"]
image_idx = self.label_to_idx[clsname]
return dict(jpg=target, txt=prompt, hint=source, mask=mask, filename=source_filename, clsname=clsname, label=int(image_idx))
# Path: sgn/model.py
def create_model(config_path):
config = OmegaConf.load(config_path)
model = instantiate_from_config(config.model).cpu()
print(f'Loaded model config from [{config_path}]')
return model
# Path: sgn/model.py
def load_state_dict(ckpt_path, location='cpu'):
_, extension = os.path.splitext(ckpt_path)
if extension.lower() == ".safetensors":
import safetensors.torch
state_dict = safetensors.torch.load_file(ckpt_path, device=location)
else:
state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))
state_dict = get_state_dict(state_dict)
print(f'Loaded state_dict from [{ckpt_path}]')
return state_dict
# Path: utils/eval_helper.py
def dump(save_dir, outputs):
filenames = outputs["filename"]
batch_size = len(filenames)
preds = outputs["pred"].cpu().numpy() # B x 1 x H x W
masks = outputs["mask"].cpu().numpy() # B x 1 x H x W
# heights = outputs["height"].cpu().numpy()
# widths = outputs["width"].cpu().numpy()
clsnames = outputs["clsname"]
for i in range(batch_size):
file_dir, filename = os.path.split(filenames[i])
_, subname = os.path.split(file_dir)
filename = "{}_{}_{}".format(clsnames[i], subname, filename)
filename, _ = os.path.splitext(filename)
save_file = os.path.join(save_dir, filename + ".npz")
np.savez(
save_file,
filename=filenames[i],
pred=preds[i],
mask=masks[i],
# height=heights[i],
# width=widths[i],
clsname=clsnames[i],
)
# Path: utils/eval_helper.py
def log_metrics(ret_metrics, config):
logger = logging.getLogger("global_logger")
clsnames = set([k.rsplit("_", 2)[0] for k in ret_metrics.keys()])
clsnames = list(clsnames - set(["mean"])) + ["mean"]
# auc
if config.get("auc", None):
auc_keys = [k for k in ret_metrics.keys() if "auc" in k]
evalnames = list(set([k.rsplit("_", 2)[1] for k in auc_keys]))
record = Report(["clsname"] + evalnames)
for clsname in clsnames:
clsvalues = [
ret_metrics["{}_{}_auc".format(clsname, evalname)]
for evalname in evalnames
]
record.add_one_record([clsname] + clsvalues)
logger.info(f"\n{record}")
# Path: utils/eval_helper.py
def merge_together(save_dir):
npz_file_list = glob.glob(os.path.join(save_dir, "*.npz"))
fileinfos = []
preds = []
masks = []
for npz_file in npz_file_list:
npz = np.load(npz_file)
fileinfos.append(
{
"filename": str(npz["filename"]),
# "height": npz["height"],
# "width": npz["width"],
"clsname": str(npz["clsname"]),
}
)
preds.append(npz["pred"])
masks.append(npz["mask"])
preds = np.concatenate(np.asarray(preds), axis=0) # N x H x W
masks = np.concatenate(np.asarray(masks), axis=0) # N x H x W
return fileinfos, preds, masks
# Path: utils/eval_helper.py
def performances(fileinfos, preds, masks, config):
ret_metrics = {}
clsnames = set([fileinfo["clsname"] for fileinfo in fileinfos])
for clsname in clsnames:
preds_cls = []
masks_cls = []
file_cls = []
for fileinfo, pred, mask in zip(fileinfos, preds, masks):
if fileinfo["clsname"] == clsname:
preds_cls.append(pred[None, ...])
masks_cls.append(mask[None, ...])
file_cls.append(fileinfo['filename'])
preds_cls = np.concatenate(np.asarray(preds_cls), axis=0) # N x H x W
masks_cls = np.concatenate(np.asarray(masks_cls), axis=0) # N x H x W
data_meta = EvalDataMeta(preds_cls, masks_cls, file_cls)
# auc
if config.get("auc", None):
for metric in config["auc"]:
evalname = metric["name"]
kwargs = metric.get("kwargs", {})
eval_method = eval_lookup_table[evalname](data_meta, **kwargs)
auc = eval_method.eval_auc()
ret_metrics["{}_{}_auc".format(clsname, evalname)] = auc
if config.get("auc", None):
for metric in config["auc"]:
evalname = metric["name"]
evalvalues = [
ret_metrics["{}_{}_auc".format(clsname, evalname)]
for clsname in clsnames
]
mean_auc = np.mean(np.array(evalvalues))
ret_metrics["{}_{}_auc".format("mean", evalname)] = mean_auc
return ret_metrics
# Path: utils/util.py
def cal_anomaly_map(fs_list, ft_list, out_size=224, amap_mode='mul'):
if amap_mode == 'mul':
anomaly_map = np.ones([out_size, out_size])
else:
anomaly_map = np.zeros([out_size, out_size])
a_map_list = []
for i in range(len(ft_list)):
fs = fs_list[i]
ft = ft_list[i]
#fs_norm = F.normalize(fs, p=2)
#ft_norm = F.normalize(ft, p=2)
a_map = 1 - F.cosine_similarity(fs, ft)
a_map = torch.unsqueeze(a_map, dim=1)
a_map = F.interpolate(a_map, size=out_size, mode='bilinear', align_corners=True)
a_map = a_map[0, 0, :, :].to('cpu').detach().numpy()
a_map_list.append(a_map)
if amap_mode == 'mul':
anomaly_map *= a_map
else:
anomaly_map += a_map
return anomaly_map, a_map_list
# Path: utils/util.py
def log_local(images, filenames):
pixel_mean = [0.485, 0.456, 0.406]
pixel_std = [0.229, 0.224, 0.225]
pixel_mean = torch.tensor(pixel_mean).cuda().unsqueeze(1).unsqueeze(1) # 3 x 1 x 1
pixel_std = torch.tensor(pixel_std).cuda().unsqueeze(1).unsqueeze(1)
root = os.path.join('log_image/')
name = filenames[-7:-4]
for k in images:
image = (images[k].squeeze() * pixel_std + pixel_mean) * 255
image = image.permute(1, 2, 0).to('cpu').numpy()
filename = "{}-{}.jpg".format(name, k)
path = os.path.join(root, filenames[:-7],filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
# Image.fromarray(image).save(path)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(path, image)
# Path: utils/util.py
def create_logger(name, log_file, level=logging.INFO):
log = logging.getLogger(name)
formatter = logging.Formatter(
"[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s"
)
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
log.setLevel(level)
log.addHandler(fh)
log.addHandler(sh)
return log
# Path: utils/util.py
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministirc = True
torch.backends.cudnn.benchmark = False
# Path: visa_dataloader.py
class VisaDataset(Dataset):
def __init__(self,type, root):
self.data = []
with open('./training/VisA/visa.csv', 'rt') as f:
render = csv.reader(f, delimiter=',')
header = next(render)
for row in render:
if row[1] == type:
data_dict = {'object':row[0],'split':row[1],'label':row[2],'image':row[3],'mask':row[4]}
self.data.append(data_dict)
self.label_to_idx = {'candle': '0', 'capsules': '1', 'cashew': '2', 'chewinggum': '3', 'fryum': '4', 'macaroni1': '5',
'macaroni2': '6', 'pcb1': '7', 'pcb2': '8', 'pcb3': '9', 'pcb4': '10',
'pipe_fryum': '11',}
self.image_size = (256,256)
self.root = root
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
item = self.data[idx]
source_filename = item['image']
target_filename = item['image']
prompt = ""
if item.get("mask", None):
mask = cv2.imread( self.root + item['mask'], cv2.IMREAD_GRAYSCALE)
else:
if item['label'] == 'normal': # good
mask = np.zeros(self.image_size).astype(np.uint8)
elif item['label'] == 'anomaly': # defective
mask = (np.ones(self.image_size)).astype(np.uint8)
else:
raise ValueError("Labels must be [None, 0, 1]!")
source = cv2.imread(self.root + source_filename)
target = cv2.imread(self.root + target_filename)
source = cv2.cvtColor(source, 4)
target = cv2.cvtColor(target, 4)
source = Image.fromarray(source, "RGB")
target = Image.fromarray(target, "RGB")
mask = Image.fromarray(mask, "L")
transform_fn = transforms.Resize(self.image_size)
source = transform_fn(source)
target = transform_fn(target)
mask = transform_fn(mask)
source = transforms.ToTensor()(source)
target = transforms.ToTensor()(target)
mask = transforms.ToTensor()(mask)
normalize_fn = transforms.Normalize(mean=mean_train, std=std_train)
source = normalize_fn(source)
target = normalize_fn(target)
clsname = item["object"]
image_idx = self.label_to_idx[clsname]
return dict(jpg=target, txt=prompt, hint=source, mask=mask, filename=source_filename, clsname=clsname, label=int(image_idx))
# Path: test.py
import random
import torchmetrics
import pytorch_lightning as pl
import torch
import os
import argparse
import torchvision
import numpy as np
import logging
import timm
import cv2
from share import *
from PIL import Image
from torch.utils.data import DataLoader
from mvtecad_dataloader import MVTecDataset
from sgn.model import create_model, load_state_dict
from utils.eval_helper import dump, log_metrics, merge_together, performances
from torch.nn import functional as F
from scipy.ndimage import gaussian_filter
from utils.util import cal_anomaly_map, log_local, create_logger, setup_seed
from visa_dataloader import VisaDataset
parser = argparse.ArgumentParser(description="DiAD")
parser.add_argument("--resume_path", default='./models/output.ckpt')
args = parser.parse_args()
# Configs
resume_path = args.resume_path
batch_size = 1
logger_freq = 300
learning_rate = 1e-5
only_mid_control = True
evl_dir = "npz_result"
logger = create_logger("global_logger", "log/")
# First use cpu to load models. Pytorch Lightning will automatically move it to GPUs.
model = create_model('models/diad.yaml').cpu()
model.load_state_dict(load_state_dict(resume_path, location='cpu'), strict=False)
| model.learning_rate = learning_rate |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nv-tlabs/trace
# Path: tbsim/utils/geometry_utils.py
def transform_points_tensor(
points: torch.Tensor, transf_matrix: torch.Tensor
) -> torch.Tensor:
"""
Transform a set of 2D/3D points using the given transformation matrix.
Assumes row major ordering of the input points. The transform function has 3 modes:
- points (N, F), transf_matrix (F+1, F+1)
all points are transformed using the matrix and the output points have shape (N, F).
- points (B, N, F), transf_matrix (F+1, F+1)
all sequences of points are transformed using the same matrix and the output points have shape (B, N, F).
transf_matrix is broadcasted.
- points (B, N, F), transf_matrix (B, F+1, F+1)
each sequence of points is transformed using its own matrix and the output points have shape (B, N, F).
Note this function assumes points.shape[-1] == matrix.shape[-1] - 1, which means that last
rows in the matrices do not influence the final results.
For 2D points only the first 2x3 parts of the matrices will be used.
:param points: Input points of shape (N, F) or (B, N, F)
with F = 2 or 3 depending on input points are 2D or 3D points.
:param transf_matrix: Transformation matrix of shape (F+1, F+1) or (B, F+1, F+1) with F = 2 or 3.
:return: Transformed points of shape (N, F) or (B, N, F) depending on the dimensions of the input points.
"""
points_log = f" received points with shape {points.shape} "
matrix_log = f" received matrices with shape {transf_matrix.shape} "
assert points.ndim in [2, 3], f"points should have ndim in [2,3],{points_log}"
assert transf_matrix.ndim in [
2,
3,
], f"matrix should have ndim in [2,3],{matrix_log}"
assert (
points.ndim >= transf_matrix.ndim
), f"points ndim should be >= than matrix,{points_log},{matrix_log}"
points_feat = points.shape[-1]
assert points_feat in [2, 3], f"last points dimension must be 2 or 3,{points_log}"
assert (
transf_matrix.shape[-1] == transf_matrix.shape[-2]
), f"matrix should be a square matrix,{matrix_log}"
matrix_feat = transf_matrix.shape[-1]
assert matrix_feat in [3, 4], f"last matrix dimension must be 3 or 4,{matrix_log}"
assert (
points_feat == matrix_feat - 1
), f"points last dim should be one less than matrix,{points_log},{matrix_log}"
def _transform(points: torch.Tensor, transf_matrix: torch.Tensor) -> torch.Tensor:
num_dims = transf_matrix.shape[-1] - 1
transf_matrix = torch.permute(transf_matrix, (0, 2, 1))
return (
points @ transf_matrix[:, :num_dims, :num_dims]
+ transf_matrix[:, -1:, :num_dims]
)
if points.ndim == transf_matrix.ndim == 2:
points = torch.unsqueeze(points, 0)
transf_matrix = torch.unsqueeze(transf_matrix, 0)
return _transform(points, transf_matrix)[0]
elif points.ndim == transf_matrix.ndim == 3:
return _transform(points, transf_matrix)
elif points.ndim == 3 and transf_matrix.ndim == 2:
transf_matrix = torch.unsqueeze(transf_matrix, 0)
return _transform(points, transf_matrix)
else:
raise NotImplementedError(f"unsupported case!{points_log},{matrix_log}")
# Path: tbsim/utils/geometry_utils.py
def transform_yaw(yaw, tf_mat):
'''
- yaw : (B)
- tf_mat : (B, 3, 3) matrix to transform yaw by
'''
yaw = yaw[:,None]
hvec = torch.cat([torch.cos(yaw), torch.sin(yaw)], dim=-1) # B x 2
rot_mat = tf_mat[:,:2,:2].clone() # B x 2 x 2
# rot part of mat may have scaling too
# print(rot_mat)
rot_mat[:,:,0] = rot_mat[:,:,0] / torch.norm(rot_mat[:,:,0], dim=-1, keepdim=True)
rot_mat[:,:,1] = rot_mat[:,:,1] / torch.norm(rot_mat[:,:,1], dim=-1, keepdim=True)
# print(rot_mat)
rot_hvec = torch.matmul(rot_mat, hvec.unsqueeze(-1))[:,:,0] # B x 2
# rot_hvec = rot_hvec / torch.norm(rot_hvec, dim=-1, keepdim=True)
# print(rot_hvec)
tf_yaw = torch.atan2(rot_hvec[:,1], rot_hvec[:,0]) # rot part of mat may have scaling too
return tf_yaw
# Path: tbsim/utils/geometry_utils.py
def detect_collision(
ego_pos: np.ndarray,
ego_yaw: np.ndarray,
ego_extent: np.ndarray,
other_pos: np.ndarray,
other_yaw: np.ndarray,
other_extent: np.ndarray,
):
"""
Computes whether a collision occured between ego and any another agent.
Also computes the type of collision: rear, front, or side.
For this, we compute the intersection of ego's four sides with a target
agent and measure the length of this intersection. A collision
is classified into a class, if the corresponding length is maximal,
i.e. a front collision exhibits the longest intersection with
egos front edge.
.. note:: please note that this funciton will stop upon finding the first
colision, so it won't return all collisions but only the first
one found.
:param ego_pos: predicted centroid
:param ego_yaw: predicted yaw
:param ego_extent: predicted extent
:param other_pos: target agents
:return: None if not collision was found, and a tuple with the
collision type and the agent track_id
"""
ego_bbox = _get_bounding_box(centroid=ego_pos, yaw=ego_yaw, extent=ego_extent)
for i in range(other_pos.shape[0]):
agent_bbox = _get_bounding_box(other_pos[i], other_yaw[i], other_extent[i])
if ego_bbox.intersects(agent_bbox):
front_side, rear_side, left_side, right_side = _get_sides(ego_bbox)
intersection_length_per_side = np.asarray(
[
agent_bbox.intersection(front_side).length,
agent_bbox.intersection(rear_side).length,
agent_bbox.intersection(left_side).length,
agent_bbox.intersection(right_side).length,
]
)
argmax_side = np.argmax(intersection_length_per_side)
# Remap here is needed because there are two sides that are
# mapped to the same collision type CollisionType.SIDE
max_collision_types = max(CollisionType).value
remap_argmax = min(argmax_side, max_collision_types)
collision_type = CollisionType(remap_argmax)
return collision_type, i
return None
# Path: tbsim/utils/geometry_utils.py
def batch_nd_transform_points_np(points, Mat):
ndim = Mat.shape[-1] - 1
batch = list(range(Mat.ndim-2))+[Mat.ndim-1]+[Mat.ndim-2]
Mat = np.transpose(Mat,batch)
if points.ndim==Mat.ndim-1:
return (points[...,np.newaxis,:] @ Mat[..., :ndim, :ndim]).squeeze(-2) + Mat[
..., -1:, :ndim
].squeeze(-2)
elif points.ndim==Mat.ndim:
return ((points[...,np.newaxis,:] @ Mat[...,np.newaxis, :ndim, :ndim]) + Mat[
...,np.newaxis, -1:, :ndim]).squeeze(-2)
else:
raise Exception("wrong shape")
# Path: tbsim/envs/env_metrics.py
class EnvMetrics(abc.ABC):
def __init__(self):
self._df = None
self._scene_ts = defaultdict(lambda:0)
self.reset()
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def add_step(self, state_info: Dict, all_scene_index: np.ndarray):
pass
@abc.abstractmethod
def get_episode_metrics(self) -> Dict[str, np.ndarray]:
pass
def get_multi_episode_metrics(self) -> Dict[str, np.ndarray]:
pass
def multi_episode_reset(self):
pass
def __len__(self):
return max(self._scene_ts.values()) if len(self._scene_ts)>0 else 0
# Path: tbsim/utils/batch_utils.py
def batch_utils():
return trajdataBatchUtils()
# Path: tbsim/utils/trajdata_utils.py
def get_raster_pix2m():
return 1.0 / BATCH_RASTER_CFG["pixel_size"]
# Path: tbsim/utils/guidance_metrics.py
from collections import defaultdict
from tbsim.utils.geometry_utils import transform_points_tensor, transform_yaw, detect_collision, batch_nd_transform_points_np
from tbsim.envs.env_metrics import EnvMetrics
from tbsim.utils.batch_utils import batch_utils
from tbsim.utils.trajdata_utils import get_raster_pix2m
import torch
import numpy as np
import tbsim.utils.tensor_utils as TensorUtils
import tbsim.utils.metrics as Metrics
tgt_pos = all_pos[np.arange(all_pos.shape[0]),self.times]
met = np.mean(np.linalg.norm(tgt_pos - self.locs, axis=-1))
all_scene_met = np.ones((self.num_scenes))*np.nan
all_scene_met[self.scene_idx] = met
return all_scene_met
class GlobalConstraintGuidance(GuidanceMetric):
"""
How well constraint (waypoints at specific time) are met.
"""
def reset(self):
if 'locs' in self.params and 'times' in self.params:
# true hard constraint
self.locs = np.array(self.params['locs'])
self.times = np.array(self.params['times'])
elif 'target_pos' in self.params and 'target_time' in self.params:
# guidance version
self.locs = np.array(self.params['target_pos'])
self.times = np.array(self.params['target_time'])
else:
raise NotImplementedError()
# NOTE: assumes add_step will be called for initial state before model prediction as well
self.times = self.times + 1
self._per_step = []
def compute_per_step(self, state_info: dict, all_scene_index: np.ndarray):
''' stores all given positions for needed agents '''
# which agent indices belong to each scene (list of np arrays with agent inds)
local_scene_index = torch.unique_consecutive(torch.tensor(state_info["scene_index"]), return_inverse=True)[1].numpy()
scene_mask = local_scene_index == self.scene_idx
pos_cur_scene = state_info["centroid"][scene_mask]
if self.agents is not None:
pos_cur_scene = pos_cur_scene[self.agents]
return pos_cur_scene
def add_step(self, state_info: dict, all_scene_index: np.ndarray):
step_pos = self.compute_per_step(state_info, all_scene_index)
self._per_step.append(step_pos)
def get_episode_metrics(self):
''' finally see if were met at desired step'''
all_pos = np.stack(self._per_step, axis=1) # [num_agents, num_steps, 2]
tgt_pos = all_pos[np.arange(all_pos.shape[0]),self.times]
met = np.mean(np.linalg.norm(tgt_pos - self.locs, axis=-1))
all_scene_met = np.ones((self.num_scenes))*np.nan
all_scene_met[self.scene_idx] = met
return all_scene_met
class MapCollisionGuidance(GuidanceMetric):
"""Compute the fraction of the time that the agent is in undrivable regions"""
def reset(self):
self._per_step = []
def compute_per_step(self, state_info: dict, all_scene_index: np.ndarray):
# CHANGE: set ignore_if_unspecified to deal with a string type on scene_index
obs = TensorUtils.to_tensor(state_info, ignore_if_unspecified=True)
drivable_region = batch_utils().get_drivable_region_map(obs["image"])
# print(obs["centroid"])
# print(obs["raster_from_world"])
centroid_raster = transform_points_tensor(obs["centroid"][:, None], obs["raster_from_world"])[:, 0]
cur_yaw = transform_yaw(obs["yaw"], obs["raster_from_world"])[:,None] # have to use raster tf mat because the raster may not be up to date with the agent (i.e. the raster may be from an older frame)
# cur_yaw = transform_yaw(obs["yaw"], obs["agent_from_world"])[:,None] # agent frame is same as raster, just scaled
extent = obs["extent"][:,:2]
# TODO: this is super hacky and assumes trajdata is being used.
# should really just transform corners in world frame then convert to raster.
extent = get_raster_pix2m()*extent # convert to raster frame
# filter for specified scene and agents only
local_scene_index = torch.unique_consecutive(torch.tensor(state_info["scene_index"]), return_inverse=True)[1].numpy()
scene_mask = local_scene_index == self.scene_idx
drivable_region = drivable_region[scene_mask]
centroid_raster = centroid_raster[scene_mask]
cur_yaw = cur_yaw[scene_mask]
extent = extent[scene_mask]
if self.agents is not None:
drivable_region = drivable_region[self.agents]
centroid_raster = centroid_raster[self.agents]
cur_yaw = cur_yaw[self.agents]
extent = extent[self.agents]
off_road = Metrics.batch_detect_off_road_boxes(centroid_raster, cur_yaw, extent, drivable_region)
# print(off_road)
off_road = TensorUtils.to_numpy(off_road)
return off_road
def add_step(self, state_info: dict, all_scene_index: np.ndarray):
met = self.compute_per_step(state_info, all_scene_index)
self._per_step.append(met)
def get_episode_metrics(self):
met_all_steps = np.stack(self._per_step, axis=1)
# fraction of frames colliding
# print(np.sum(met_all_steps, axis=1))
met_guide_scene = np.sum(met_all_steps, axis=1) / float(met_all_steps.shape[1])
met_guide_scene = np.mean(met_guide_scene)
all_scene_met = np.ones((self.num_scenes))*np.nan
all_scene_met[self.scene_idx] = met_guide_scene
return all_scene_met
class MapCollisionGuidanceDisk(GuidanceMetric):
"""Compute the fraction of the time that the agent is in undrivable regions.
each agent is approximated by a single disk"""
def reset(self):
self._per_step = []
def compute_per_step(self, state_info: dict, all_scene_index: np.ndarray):
# CHANGE: set ignore_if_unspecified to deal with a string type on scene_index
obs = TensorUtils.to_tensor(state_info, ignore_if_unspecified=True)
drivable_region = batch_utils().get_drivable_region_map(obs["image"])
# print(obs["centroid"])
# print(obs["raster_from_world"])
centroid_raster = transform_points_tensor(obs["centroid"][:, None], obs["raster_from_world"])[:, 0]
cur_yaw = transform_yaw(obs["yaw"], obs["raster_from_world"])[:,None] # have to use raster tf mat because the raster may not be up to date with the agent (i.e. the raster may be from an older frame)
# cur_yaw = transform_yaw(obs["yaw"], obs["agent_from_world"])[:,None] # agent frame is same as raster, just scaled
extent = obs["extent"][:,:2]
extent = get_raster_pix2m()*extent # convert to raster frame
# filter for specified scene and agents only
| local_scene_index = torch.unique_consecutive(torch.tensor(state_info["scene_index"]), return_inverse=True)[1].numpy() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: AetherBlack/abuseACL
# Path: abuseACL/structures/Credentials.py
class Credentials:
def __init__(self, username: str, password: str, domain: str, ntlmhash: str, aesKey: str, doKerberos: bool) -> None:
self.username = username
self.password = password
self.domain = domain
self.ntlmhash = ntlmhash
self.aesKey = aesKey
self.doKerberos = doKerberos
def getAuthenticationSecret(self) -> str:
return self.password or self.ntlmhash
# Path: abuseACL/structures/Target.py
class Target:
tlsv1_2: bool = None
tlsv1: bool = None
def __init__(self, remote: str, port: int) -> None:
self.remote = remote
self.port = port
def use_tls(self) -> bool:
return self.tlsv1_2 or self.tlsv1
# Path: abuseACL/core/abuseACL.py
class abuseACL:
def __init__(self, ldap: LDAP, logger: Logger, extends: bool) -> None:
self.ldap = ldap
self.logger = logger
self.extends = extends
self.users = self.ldap.getAllUsers()
self.groups = self.ldap.getAllGroups()
self.computers = self.ldap.getAllComputers()
self.certificatesTemplates = self.ldap.getAllCertificatesTemplates()
self.gpos = self.ldap.getAllGPOs()
self.ous = self.ldap.getAllOUs()
self.gMSAs = self.ldap.getAllgMSAs()
self.allObjects = self.users + self.groups + self.computers + \
self.certificatesTemplates + self.gpos + self.ous + self.gMSAs
if self.extends:
self.adminSDHolder = self.ldap.getAdminSDHolder()
self.schema = self.ldap.getSchema()
self.allObjects += self.adminSDHolder + self.schema
def isObjectTypeGUIDRestricted(self, ace) -> RIGHTS_GUID:
isDangerous = self.isObjectTypeGUIDDangerous(ace)
# Restricted but has a dangerous objectTypeGUID
if isDangerous:
return isDangerous
else:
# Not restricted
if "ObjectTypeLen" not in ace["Ace"].fields:
return RIGHTS_GUID.ALL
return False
def isObjectTypeGUIDDangerous(self, ace) -> RIGHTS_GUID:
# Check if the field exists
if "ObjectTypeLen" in ace["Ace"].fields:
# Check the length
if ace["Ace"]["ObjectTypeLen"]:
objectTypeGUID = bin_to_string(ace["Ace"]["ObjectType"]).lower()
# Check if the right is dangerous
for right in RIGHTS_GUID:
if right.value == objectTypeGUID:
return right
return False
def printVuln(self, entry: ADObject, ace, perm: ACCESS_MASK, principalName: str, principalSid: str, right: RIGHTS_GUID) -> None:
self.logger.vuln(f"Result for {entry.sAMAccountName} ({entry.distinguishedName})")
self.logger.vuln(f" ACE Type : {ace['Ace'].__class__.__name__}")
self.logger.vuln(f" Access mask : {perm.name}")
self.logger.vuln(f" Principal (SID) : {principalName} ({principalSid})")
if right:
# Right and GUID
self.logger.vuln(f" Object type (GUID) : {right.name} ({right.value})")
def abuse(self, principalName: str) -> None:
"""
crossDomain possible check with another forest.
Check if the user is the owner of another user, group, computer, certificateTemplate, gpo
Check if the user have dangerous write on another user, (group, Self the user can add itself to the group), computer, certificateTemplate, gpo
"""
haveVulnerability = False
principalName = principalName.lower()
principalSid = ADUser.getUserSid(self.users, principalName)
if principalSid is None:
principalSid = ADGroup.getGroupSid(self.groups, principalName)
if principalSid is None:
principalSid = ADComputer.getComputerSid(self.computers, principalName)
if principalSid is None:
principalSid = ADgMSA.getgMSASid(self.gMSAs, principalName)
if principalSid is None:
self.logger.error(f"Can't find principal with name {principalName}")
exit(1)
self.logger.debug(f"SID of the principal: {principalSid}")
for entry in self.allObjects:
entry: ADObject
# Name of the Object without AD
objectName = entry.__class__.__name__[2:]
securityDescriptor = entry.nTSecurityDescriptor
if principalSid == securityDescriptor["OwnerSid"].formatCanonical():
haveVulnerability = True
self.logger.vuln(f"{principalName} is the owner of {entry.sAMAccountName}")
# ACE in ACL
for ace in securityDescriptor["Dacl"].aces:
# Only check ALLOW
if ace["Ace"].ACE_TYPE in [ACCESS_ALLOWED_ACE.ACE_TYPE, ACCESS_ALLOWED_OBJECT_ACE.ACE_TYPE]:
# Only check concerned user
if principalSid != ace["Ace"]["Sid"].formatCanonical():
continue
# Don't need to check if already full rights
if ace["Ace"]["Mask"].hasPriv(ACCESS_MASK.FULL_CONTROL.value):
haveVulnerability = True
self.printVuln(entry, ace, ACCESS_MASK.FULL_CONTROL, principalName, principalSid, False)
continue
for perm in ACCESS_MASK:
# Check if permission in current ACE
if not (ace["Ace"]["Mask"].hasPriv(perm.value)):
continue
vuln = f"{principalName} can do {perm.name} on {entry.sAMAccountName}"
right = False
# Edit one of the object's attributes. The attribute is referenced by an "ObjectType GUID".
if perm.name == ACCESS_MASK.WRITE_PROPERTIES.name:
right = self.isObjectTypeGUIDDangerous(ace)
if right:
vuln = f"{principalName} can do {perm.name}:{right} on {entry.sAMAccountName}"
else:
# Debug, in case it is useful, (No vulnerability)
self.logger.debug(vuln)
vuln = ""
# Perform "Extended rights". "AllExtendedRights" refers to that permission being unrestricted. This right can be restricted by specifying the extended right in the "ObjectType GUID".
elif perm.name == ACCESS_MASK.ALL_EXTENDED_RIGHTS.name:
right = self.isObjectTypeGUIDRestricted(ace)
if right:
vuln = f"{principalName} can do {perm.name}:{right} on {entry.sAMAccountName}"
else:
# Debug, in case it is useful, (No vulnerability)
self.logger.debug(vuln)
vuln = ""
if len(vuln):
haveVulnerability = True
self.printVuln(entry, ace, perm, principalName, principalSid, right)
# In case no vulnerability were found for the principal
if not haveVulnerability:
self.logger.error(f"Nothing found for principal {principalName}")
# Path: abuseACL/core/Logger.py
class Logger:
def __init__(self, debug: bool, timestamp: bool) -> None:
self.__debug = debug
self.__timestamp = timestamp
def __toStdout(self, color: str, title: str, msg: str) -> None:
timestamp = str()
if self.__timestamp:
timestamp = time.strftime("[%Y/%m/%d %H:%M:%S] ")
print("%s%s[%s] %s%s" % (color, timestamp, title, msg, Style.RESET_ALL))
def debug(self, msg: str) -> None:
if self.__debug:
self.__toStdout(Fore.BLUE, "i", msg)
def error(self, msg: str) -> None:
self.__toStdout(Fore.RED, "!", msg)
def vuln(self, msg: str) -> None:
self.__toStdout(Fore.GREEN, "*", msg)
# Path: abuseACL/network/LDAP.py
class LDAP:
users = list()
groups = list()
computers = list()
certificatesTemplates = list()
gpos = list()
ous = list()
adminSDHolder = list()
schema = list()
gMSA = list()
def __init__(self, forest: str, target: Target, credentials: Credentials, logger: Logger) -> None:
self.target = target
self.credentials = credentials
self.logger = logger
self.__getPort()
self.__checkAuthentication()
def __getPort(self) -> None:
if self.target.port:
return
self.target.port, self.target.tlsv1_2 = self.__tryLDAPS(tls.PROTOCOL_TLSv1_2, self.target.port)
if self.target.tlsv1_2 is None:
self.target.port, self.target.tlsv1 = self.__tryLDAPS(tls.PROTOCOL_TLSv1, self.target.port)
if self.target.tlsv1 is None:
self.target.port = self.__tryLDAP(self.target.port)
if self.target.port is None:
self.logger.error(f"Impossible to communicate with the target {self.target.remote} !")
exit(1)
def __checkAuthentication(self) -> None:
self.logger.debug("Trying to connect to %s:%d" % (self.target.remote, self.target.port))
self.__Authentication()
try:
self.getNamingContexts()
except IndexError:
self.logger.error("Invalid credentials !")
exit(1)
self.logger.debug("Authentication success !")
def __Authentication(self) -> ldap3.Connection:
user = "%s\\%s" % (self.credentials.domain, self.credentials.username)
ldapTls = None
if self.target.tlsv1_2:
ldapTls = ldap3.Tls(validate=tls.CERT_NONE, version=tls.PROTOCOL_TLSv1_2, ciphers='ALL:@SECLEVEL=0')
elif self.target.tlsv1:
ldapTls = ldap3.Tls(validate=tls.CERT_NONE, version=tls.PROTOCOL_TLSv1, ciphers='ALL:@SECLEVEL=0')
ldapServer = ldap3.Server(self.target.remote, use_ssl=self.target.use_tls(), port=self.target.port, get_info=ldap3.ALL, tls=ldapTls)
if self.credentials.doKerberos:
ldapConn = ldap3.Connection(ldapServer)
ldapConn = self.kerberosAuthentication(ldapConn)
else:
ldapConn = ldap3.Connection(ldapServer, user=user, password=self.credentials.getAuthenticationSecret(), authentication=ldap3.NTLM)
ldapConn.bind()
if ldapConn.result["description"] == "invalidCredentials":
self.logger.error("Invalid credentials !")
exit(1)
return ldapConn
def __tryLDAPS(self, proto: tls._SSLMethod, port: int) -> int:
port = port or 636
ldapTls = ldap3.Tls(validate=tls.CERT_NONE, version=proto, ciphers="ALL:@SECLEVEL=0")
ldapServer = ldap3.Server(self.target.remote, use_ssl=True, port=port, get_info=ldap3.ALL, tls=ldapTls)
ldapConn = ldap3.Connection(ldapServer)
try:
ldapConn.bind()
except ldap3.core.exceptions.LDAPSocketOpenError:
return None, None
except ldap3.core.exceptions.LDAPSocketReceiveError:
pass
return port, True
def __tryLDAP(self, port: int) -> int:
self.logger.debug("LDAPS failed, trying with LDAP.")
port = port or 389
ldapServer = ldap3.Server(self.target.remote, use_ssl=False, port=port, get_info=ldap3.ALL)
ldapConn = ldap3.Connection(ldapServer)
try:
ldapConn.bind()
except ldap3.core.exceptions.LDAPSocketOpenError:
return None
except ldap3.core.exceptions.LDAPSocketReceiveError:
return port
return port
def kerberosAuthentication(self, ldapConn: ldap3.Connection) -> None:
blob = Kerberos.kerberosLogin(self.target.remote, self.credentials.username, self.credentials.password,
self.credentials.domain, self.credentials.ntlmhash, self.credentials.aesKey,
kdcHost=self.target.remote)
request = ldap3.operation.bind.bind_operation(ldapConn.version, ldap3.SASL, self.credentials.username, None, "GSS-SPNEGO", blob.getData())
# Done with the Kerberos saga, now let's get into LDAP
# try to open connection if closed
if ldapConn.closed:
ldapConn.open(read_server_info=False)
ldapConn.sasl_in_progress = True
response = ldapConn.post_send_single_response(ldapConn.send('bindRequest', request, None))
ldapConn.sasl_in_progress = False
if response[0]['result'] != 0:
raise Exception(response)
ldapConn.bound = True
return ldapConn
def search(self, dn: str, filter: str, scope: str, attributes: list = ["*"]) -> list:
ldapConn = self.__Authentication()
ldapConn.search(
search_base=dn,
search_filter=filter,
search_scope=scope,
attributes=attributes,
# Controls to get nTSecurityDescriptor from standard user
# OWNER_SECURITY_INFORMATION + GROUP_SECURITY_INFORMATION + DACL_SECURITY_INFORMATION
controls=[("1.2.840.113556.1.4.801", True, "%c%c%c%c%c" % (48, 3, 2, 1, 7), )]
)
return ldapConn.response
def __createArrayOfObject(self, response: list, obj: object) -> list:
array = list()
for entry in response:
# Not a response object
if entry["type"] != "searchResEntry":
continue
array.append(
obj(**entry["raw_attributes"])
)
return array
def getNamingContexts(self) -> list:
response = self.search(
"",
"(objectClass=*)",
ldap3.BASE,
["namingContexts"]
)
self.namingContexts = response[0]["attributes"]["namingContexts"]
self.defaultNamingContext = self.namingContexts[0]
self.configurationNamingContext = self.namingContexts[1]
self.schemaNamingContext = self.namingContexts[2]
self.domainDnsZonesNamingContext = self.namingContexts[3]
self.forestDnsZonesNamingContext = self.namingContexts[4]
def getAllUsers(self) -> List[ADUser]:
if len(self.users):
return self.users
response = self.search(
self.defaultNamingContext,
"(sAMAccountType=%d)" % (sAMAccountType.SAM_NORMAL_USER_ACCOUNT),
ldap3.SUBTREE,
["DistinguishedName", "name", "userPrincipalName", "sAMAccountName", "objectSid", "ntSecurityDescriptor", "userAccountControl"]
)
self.users = self.__createArrayOfObject(response, ADUser)
return self.users
def getAllGroups(self) -> List[ADGroup]:
if len(self.groups):
return self.groups
response = self.search(
self.defaultNamingContext,
"(|(sAMAccountType=%d)(sAMAccountType=%d)(sAMAccountType=%d)(sAMAccountType=%d))" % (
sAMAccountType.SAM_GROUP_OBJECT,
sAMAccountType.SAM_NON_SECURITY_GROUP_OBJECT,
sAMAccountType.SAM_ALIAS_OBJECT,
sAMAccountType.SAM_NON_SECURITY_ALIAS_OBJECT
),
ldap3.SUBTREE,
["DistinguishedName", "name", "sAMAccountName", "objectSid", "ntSecurityDescriptor"]
)
self.groups = self.__createArrayOfObject(response, ADGroup)
return self.groups
def getAllComputers(self) -> List[ADComputer]:
if len(self.computers):
return self.computers
response = self.search(
self.defaultNamingContext,
"(sAMAccountType=%d)" % (sAMAccountType.SAM_MACHINE_ACCOUNT),
ldap3.SUBTREE,
["DistinguishedName", "name", "sAMAccountName", "objectSid", "ntSecurityDescriptor", "userAccountControl"]
)
self.computers = self.__createArrayOfObject(response, ADComputer)
return self.computers
def getAllCertificatesTemplates(self) -> List[ADCertificateTemplate]:
if len(self.certificatesTemplates):
return self.certificatesTemplates
response = self.search(
f"CN=Certificate Templates,CN=Public Key Services,CN=Services,{self.configurationNamingContext}",
"(objectClass=pkiCertificateTemplate)",
ldap3.SUBTREE,
["DistinguishedName", "name", "ntSecurityDescriptor"]
)
self.certificatesTemplates = self.__createArrayOfObject(response, ADCertificateTemplate)
return self.certificatesTemplates
def getAllGPOs(self) -> List[ADGPO]:
if len(self.gpos):
return self.gpos
response = self.search(
f"CN=Policies,CN=System,{self.defaultNamingContext}",
"(objectClass=groupPolicyContainer)",
ldap3.SUBTREE,
["DistinguishedName", "displayName", "gPCFileSysPath", "ntSecurityDescriptor"]
)
self.gpos = self.__createArrayOfObject(response, ADGPO)
return self.gpos
def getAllOUs(self) -> List[ADGPO]:
if len(self.ous):
return self.ous
response = self.search(
self.defaultNamingContext,
"(objectClass=organizationalUnit)",
ldap3.SUBTREE,
["DistinguishedName", "name", "ntSecurityDescriptor"]
)
self.ous = self.__createArrayOfObject(response, ADOU)
return self.ous
def getAdminSDHolder(self) -> List[ADAdminSDHolder]:
if len(self.adminSDHolder):
return self.adminSDHolder
response = self.search(
f"CN=AdminSDHolder,CN=System,{self.defaultNamingContext}",
"(cn=AdminSDHolder)",
ldap3.BASE,
["DistinguishedName", "name", "ntSecurityDescriptor"]
)
self.adminSDHolder = self.__createArrayOfObject(response, ADAdminSDHolder)
return self.adminSDHolder
def getSchema(self) -> List[ADSchema]:
if len(self.schema):
return self.schema
# Subtree in case it's not inherant
response = self.search(
f"CN=Schema,{self.configurationNamingContext}",
"(objectClass=*)",
ldap3.SUBTREE,
["DistinguishedName", "name", "ntSecurityDescriptor"]
)
self.schema = self.__createArrayOfObject(response, ADSchema)
return self.schema
def getAllgMSAs(self) -> List[ADgMSA]:
if len(self.gMSA):
return self.gMSA
response = self.search(
self.defaultNamingContext,
"(objectClass=msDS-GroupManagedServiceAccount)",
ldap3.SUBTREE,
["DistinguishedName", "sAMAccountName", "objectSid", "ntSecurityDescriptor"]
)
self.gMSA = self.__createArrayOfObject(response, ADgMSA)
return self.gMSA
# Path: abuseACL.py
# Path: abuseACL/__main__.py
from impacket.examples import utils
from getpass import getpass
from abuseACL.structures.Credentials import Credentials
from abuseACL.structures.Target import Target
from abuseACL.core.abuseACL import abuseACL
from abuseACL.core.Logger import Logger
from abuseACL.network.LDAP import LDAP
from abuseACL import __banner__
import argparse
import sys
import os
class Arguments:
debug: bool
ts: bool
no_pass: bool
hashes: str
doKerberos: bool
aesKey: bool
dc_ip: str
port: int
domain: str
username: str
password: str
remote_name: str
principal: str
principalsfile: str
forest: str
extends: bool
def __init__(self) -> None:
self.__parser = argparse.ArgumentParser(add_help=True, description="Automatic Windows vulnerable ACEs/ACLs listing")
self.__parser.add_argument("-debug", default=False, action="store_true", help="Turn DEBUG output ON. (Default: False)")
self.__parser.add_argument("-ts", action="store_true", help="Adds timestamp to every logging output")
# Credentials
credentials = self.__parser.add_argument_group("Credentials")
credentials.add_argument("-no-pass", action="store_true", help="Don't ask for password (useful for -k or when using proxychains)")
credentials.add_argument("-hashes", action="store", metavar="[LMHASH]:NTHASH", help="NT/LM hashes. LM hash can be empty.")
credentials.add_argument("-k", action="store_true", help="Use Kerberos authentication. Grabs credentials from ccache file (KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the ones specified in the command line")
credentials.add_argument("-aesKey", action="store", metavar="hex key", help="AES key to use for Kerberos Authentication (128 or 256 bits)")
# Connection
connection = self.__parser.add_argument_group("Connection")
connection.add_argument("-dc-ip", action="store", metavar="ip address", help="IP Address of the domain controller. If omitted it will use the domain part (FQDN) specified in the target parameter")
connection.add_argument("-port", type=int, action="store", help="Port of the domain controller. If omitted it will try to authenticate in LDAPS and then in LDAP.")
# User
filters = self.__parser.add_argument_group("Filters")
filters.add_argument("-principal", action="store", help="Find vulnerable ACE for a specific User/Computer/Group")
filters.add_argument("-principalsfile", action="store", help="File with multiple User/Computer/Group")
filters.add_argument("-forest", action="store", help="Forest to use if different from dc. Not implemented yet.")
# LDAP
ldap = self.__parser.add_argument_group("LDAP")
ldap.add_argument("-extends", action="store_true", help="Check adminSDHolder and Schema.")
self.__parser.add_argument("target", action="store", help="[[domain/]username[:password]@]<targetName or address>")
def parseArgs(self) -> None:
| if len(sys.argv) == 1: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: gydpku/PPTC
# Path: src/ppt_executor.py
SLIDE_HEIGHT = 6858000
SLIDE_WIDTH = 9144000
CENTER_TOP = 3429000
CENTER_LEFT = 4572000
SHAPE_HEIGHT = 900000
SHAPE_WIDTH = 900000
TABLE_HEIGHT = 370000 # per line
CONTENT_HEIGHT = 4351338
CONTENT_WIDTH = 7886700
CONTENT_LEFT = 628650
CONTENT_TOP = 1825625
TITLE_HEIGHT = 1325563
TITLE_WIDTH = 7886700
TITLE_LEFT = 628650
TITLE_TOP = 365126
MARGIN = 600000
CORNER_LEFT = 0 + MARGIN
CORNER_TOP = 0 + MARGIN
CORNER_RIGHT = SLIDE_WIDTH - MARGIN
CORNER_BOTTOM = SLIDE_HEIGHT - MARGIN
SHAPE_LEFT = CENTER_LEFT - SHAPE_WIDTH / 2
SHAPE_TOP = CENTER_TOP - SHAPE_HEIGHT / 2
PIC_LEFT = CONTENT_LEFT
PIC_TOP = CONTENT_TOP
PIC_PATH = "./PPTC/"+"test/pics"
def check_api_in_list(line, api_list):
def API_executor(lines, test=False,args=None):
def set_ppt(ppt_path):
def set_current_slide(idx):
def get_ppt():
def save_ppt(ppt_path):
def get_current_page_id():
def create_slide():
def move_to_next_slide():
def move_to_previous_slide():
def move_to_slide(idx):
def set_background_color(color):
def choose_title():
def choose_content():
def choose_textbox(idx=0):
def choose_picture(idx=0):
def choose_chart():
def choose_shape(shape_name):
def choose_table():
def choose_table_cell(row_id, column_id):
def insert_text(text):
def insert_bullet_point(text):
def insert_note(note):
def insert_textbox():
def delete_text():
def set_font_size(size):
def set_font_color(color):
def set_font_bold():
def set_font_italic():
def set_font_underline():
def set_font_style(font_name):
def set_line_space(line_space_level=0):
def text_align_left():
def text_align_center():
def text_align_right():
def insert_rectangle():
def insert_right_arrow():
def insert_rounded_rectangle():
def insert_triangle():
def insert_callout():
def insert_cloud():
def insert_star():
def insert_circle():
def insert_picture(picture_name):
def set_width(width):
def set_height(height):
def rotate_element(angle):
def set_fill_color(color):
def align_top_right_corner():
def align_top_left_corner():
def align_bottom_right_corner():
def align_bottom_left_corner():
def align_slide_left():
def align_slide_right():
def align_slide_top():
def align_slide_bottom():
def align_slide_center():
def set_left(left):
def set_top(top):
def insert_table(row_num, col_num):
def insert_table_row(row_data):
def insert_line_chart(data,series=None):
def insert_bar_chart(data,series=None):
def insert_pie_chart(data,series=None):
def set_chart_title(title):
# Path: src/ppt_reader.py
SCALE = 1000
def get_fill_color(shape):
def __init__(self, shape):
def text_info(self):
def space_info(self):
def size_info(self):
def style_info(self):
def discription(self):
def __repr__(self):
def __init__(self, shape, id=None):
def style_info(self):
def discription(self):
def __init__(self, shape):
def text_info(self):
def discription(self):
def __init__(self, shape):
def text_info(self):
def style_info(self):
def discription(self):
def __init__(self, shape, id=None):
def text_info(self):
def style_info(self):
def discription(self):
def __init__(self, shape):
def text_info(self):
def style_info(self):
def __init__(self, shape):
def text_info(self):
def style_info(self):
def hasshape(shape_str, shape_list):
def get_content(need_text,need_style,need_position,need_title,need_content,need_picture,need_table,need_chart,need_textbox,need_shape):
def get_content_by_instructions(ppt_path, instruction, args, ppt):
def eval_get_contents(need_text=True, need_style=True, need_position=True, need_shape_list=None, ppt=None):
class BasicShape:
class Picture(BasicShape):
class Table(BasicShape):
class Chart(BasicShape):
class Textbox(BasicShape):
class Placeholder(BasicShape):
class AutoShape(BasicShape):
# Path: src/openai_api.py
def completions_with_backoff(**kwargs):
def chat_with_backoff(**kwargs):
def embeddings_with_backoff(**kwargs):
def query_azure_openai(query, model = "vicuna-13b-v1.5-16k",id=None):
def truncate_text_with_token_count (text, max_tokens):
def rewrite(prompt):
# Path: src/prompt_factor.py
def get_instruction_to_API_code_prompt(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):
def get_instruction_to_API_code_prompt2(selected_API, ppt_content, chat_history, instruction, ask_less_question=False, current_page=1):
# Path: src/dataset.py
def load_data(path, dataset, args):
def load_data_json(path, dataset):
# Path: src/api_selection.py
K = None
K = args.api_topk
def get_topk(scores, k=10):
def get_embedding(text):
def get_api_embedding(args):
def select_api(query, k=10):
def get_selected_apis(instruction, args):
def get_all_apis(args):
def prepare_embedding(args):
# Path: src/utils.py
def write_list(lst, filename):
def read_list(filename):
def write_lines(lst, path):
def read_lines(path):
def makedir(path):
def merge_list(lst):
def get_picture_name(labels):
def get_picture_name_list(args):
def parse_api(codes):
def prepare_exp_name(args):
def get_tokens(text):
def calc_api_cost(path):
def check_token(model, prompt):
def get_token(text, trunc_num, model):
def checkpoint(mode,args,idx,step):
def sorted_list(path):
def parse_train_json(path):
def parse_test_json(path):
# Path: src/modeling.py
class PPT_assistant(object):
def __init__(self, args=None):
def planner(self, instruction):
def api_selector(self, instruction):
def content_selector(self, ppt_path, instruction, args, ppt):
def api_executor(self, apis, test=False):
def load_chat_history(self, instructions, labels):
def load_ppt(self, path):
def chat(self, user_instruction, ppt_path=None, verbose=False):
# Path: src/evaluate.py
def calc_token_cost(path):
def calc_acc(label_path, pred_path, instruction, additional_restrictions=[]):
def check_eval(args):
def get_error_case(args):
def eval(args):
# Path: src/content_selection.py
def select_information_type(query,args):
def select_shape_type(query,args):
def parse_answer(answer):
# Path: main.py
from src import ppt_executor, ppt_reader, openai_api, prompt_factor, dataset, api_selection, utils, modeling, evaluate, content_selection
from tqdm import tqdm
import argparse
import os
import jsonlines
if args.tf and os.path.exists(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx'):
print('Exists!')
continue
if args.sess and os.path.exists(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{len(session)-1}.pptx'):
print('Exists!')
continue
turn_id, instruction, label_api, base_ppt_path, label_ppt_path, api_lack_base_ppt_path, api_lack_label_ppt_path = turn
if turn_id == 0 and args.sess:
if args.api_lack:
ppt_assistant.load_ppt(args.user_path+api_lack_base_ppt_path)
label_file = api_lack_label_ppt_path
else:
ppt_assistant.load_ppt(args.user_path+base_ppt_path)
label_file = label_ppt_path
splitted_instruction = instruction.split("##")[0]
if args.tf:
if args.api_lack:
ppt_assistant.load_ppt(args.user_path+api_lack_base_ppt_path)
label_file = api_lack_label_ppt_path
else:
ppt_assistant.load_ppt(args.user_path+base_ppt_path)
label_file = label_ppt_path
ppt_assistant.load_chat_history([x[0] for x in chat_history],[x[1].strip(';').split(';') for x in chat_history])
prompt, reply = ppt_assistant.chat(splitted_instruction, ppt_path=args.user_path+base_ppt_path, verbose=False)
apis = utils.parse_api(reply)
ppt_assistant.api_executor(apis,test=True)
ppt_executor.save_ppt(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx')
utils.write_lines([prompt],args.user_path+f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt')
#import pdb
#pdb.set_trace()
with jsonlines.open(args.user_path+f"PPT_test_output/{set_name}/{args.exp_name}_session_{sess_id}.json", mode='a') as writer:
data={'Turn':turn_id,'User instruction':instruction,'Feasible API sequence':label_api,'Reply':reply,'Pred API sequence':apis,'Pred File':f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx','Label File':label_file,'Prompt File':f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt'}
writer.write(data)
chat_history.append([splitted_instruction, label_api])
elif args.sess:
prompt, reply = ppt_assistant.chat(instruction, ppt_path=None, verbose=False)
apis = utils.parse_api(reply)
ppt_assistant.api_executor(apis,test=True)
ppt_executor.save_ppt(args.user_path+f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx')
utils.write_lines([prompt],args.user_path+f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt')
with jsonlines.open(args.user_path+f"PPT_test_output/{set_name}/{args.exp_name}_session_{sess_id}.json", mode='a') as writer:
data={'Turn':turn_id,'User instruction':instruction,'Feasible API sequence':label_api,'Reply':reply,'Pred API sequence':apis,'Pred File':f'PPT_Pred_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.pptx','Label File':label_file,'Prompt File':f'PPT_Prompt_File/{set_name}/{args.exp_name}_{sess_id}_{turn_id}.txt'}
writer.write(data)
def test_planning(ppt_assistant):
instructions, labels = dataset.load_data(args.data_path, args.dataset)
f = open(f'test_system/planning_{args.dataset}.txt','a+')
for idx, dialogue in tqdm(enumerate(instructions)):
for step, instruction in enumerate(dialogue):
instruction = instruction.split("##")[0]
try:
planned = ppt_assistant.planner(instruction)
f.write(f'{idx}/{step}\n')
f.write(instruction+'\n')
f.write(str(planned)+'\n\n')
f.flush()
except:
pass
def test_api_selection(ppt_assistant):
instructions, labels = dataset.load_data(args.data_path, args.dataset)
f = open(f'test_system/api_selection_{args.api_topk}_{args.dataset}.txt','a+')
cnt = 0
for idx, dialogue in tqdm(enumerate(instructions)):
for step, instruction in enumerate(dialogue):
label_apis = labels[idx][step]
instruction = instruction.split("##")[0]
# instructions = ppt_assistant.planner(instruction)
# selected_apis = []
# for ins in instructions:
# selected_apis.extend(ppt_assistant.api_selector(ins))
selected_apis = ppt_assistant.api_selector(instruction)
selected_apis = [x.name for x in selected_apis]
for xx in label_apis:
if ('align_slide' in xx.split('(')[0]) or (xx.split('(')[0] in ['set_left','set_right','set_top','set_bottom']) or ('corner' in xx.split('(')[0]):
continue
if not xx.split('(')[0] in selected_apis:
f.write(f'{idx}/{step}\n')
f.write(instruction+'\n')
f.write(xx.split('(')[0]+'\n')
f.write(str(selected_apis)+'\n\n')
f.flush()
cnt += 1
print(cnt)
def test_content_selection(ppt_assistant):
instructions, labels = dataset.load_data(args.data_path, args.dataset)
f = open(f'test_system/content_selection_{args.dataset}.txt','a+')
for idx, dialogue in tqdm(enumerate(instructions)):
for step, instruction in enumerate(dialogue):
instruction = instruction.split("##")[0]
prompt = prompt_factor.PPT_content_selection_prompt.format(instruction)
reply = openai_api.query_azure_openai(prompt, model='turbo')
f.write(f'{idx}/{step}\n')
f.write(instruction+'\n')
f.write(reply+'\n\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# PPT assistant
parser.add_argument("--data_path", default="test", type=str,
help="The data path to load the instructions")
parser.add_argument("--dataset", default="short", type=str,
help="short/long")
parser.add_argument("--model_id", default="None", type=str,
help="short/long")
parser.add_argument("--user_path", default='./PPTC/', type=str,
help="the user storage file path ")
parser.add_argument("--save_path", default="test_pptx_data", type=str,
help="the path to save the intermediate ppts.")
# mode
parser.add_argument("--prepare", default=False, action='store_true',
help='whether to prepare the data for the model')
| parser.add_argument("--eval", default=False, action='store_true', |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: nv-tlabs/pacer
# Path: pacer/utils/flags.py
class Flags(object):
def __init__(self, items):
# Path: poselib/poselib/core/rotation3d.py
@torch.jit.script
def quat_inverse(x):
"""
The inverse of the rotation
"""
return quat_conjugate(x)
# Path: poselib/poselib/core/rotation3d.py
@torch.jit.script
def quat_mul(a, b):
"""
quaternion multiplication
"""
x1, y1, z1, w1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3]
x2, y2, z2, w2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3]
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
y = w1 * y2 + y1 * w2 + z1 * x2 - x1 * z2
z = w1 * z2 + z1 * w2 + x1 * y2 - y1 * x2
return torch.stack([x, y, z, w], dim=-1)
# Path: pacer/utils/draw_utils.py
def agt_color(aidx):
return matplotlib.colors.to_rgb(plt.rcParams['axes.prop_cycle'].by_key()['color'][aidx % 10])
# Path: pacer/env/tasks/humanoid.py
@torch.jit.script
def compute_humanoid_observations_smpl_max(body_pos, body_rot, body_vel,
body_ang_vel, smpl_params, limb_weight_params,
local_root_obs, root_height_obs,
upright, has_smpl_params, has_limb_weight_params):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, bool, bool, bool, bool, bool) -> Tensor
root_pos = body_pos[:, 0, :]
root_rot = body_rot[:, 0, :]
root_h = root_pos[:, 2:3]
if not upright:
root_rot = remove_base_rot(root_rot)
heading_rot_inv = torch_utils.calc_heading_quat_inv(root_rot)
if (not root_height_obs):
root_h_obs = torch.zeros_like(root_h)
else:
root_h_obs = root_h
heading_rot_inv_expand = heading_rot_inv.unsqueeze(-2)
heading_rot_inv_expand = heading_rot_inv_expand.repeat((1, body_pos.shape[1], 1))
flat_heading_rot_inv = heading_rot_inv_expand.reshape(heading_rot_inv_expand.shape[0] * heading_rot_inv_expand.shape[1],heading_rot_inv_expand.shape[2])
root_pos_expand = root_pos.unsqueeze(-2)
local_body_pos = body_pos - root_pos_expand
flat_local_body_pos = local_body_pos.reshape(local_body_pos.shape[0] * local_body_pos.shape[1],local_body_pos.shape[2])
flat_local_body_pos = torch_utils.my_quat_rotate(flat_heading_rot_inv, flat_local_body_pos)
local_body_pos = flat_local_body_pos.reshape(local_body_pos.shape[0],local_body_pos.shape[1] * local_body_pos.shape[2])
local_body_pos = local_body_pos[..., 3:] # remove root pos
flat_body_rot = body_rot.reshape(body_rot.shape[0] * body_rot.shape[1], body_rot.shape[2])
flat_local_body_rot = quat_mul(flat_heading_rot_inv, flat_body_rot)
flat_local_body_rot_obs = torch_utils.quat_to_tan_norm(flat_local_body_rot)
local_body_rot_obs = flat_local_body_rot_obs.reshape(
body_rot.shape[0],
body_rot.shape[1] * flat_local_body_rot_obs.shape[1])
if not (local_root_obs): ##### ZL Here: it should be "not local_root_obs". If local, it shouldn't be overriden
root_rot_obs = torch_utils.quat_to_tan_norm(root_rot)
local_body_rot_obs[..., 0:6] = root_rot_obs
flat_body_vel = body_vel.reshape(body_vel.shape[0] * body_vel.shape[1], body_vel.shape[2])
flat_local_body_vel = torch_utils.my_quat_rotate(flat_heading_rot_inv, flat_body_vel)
local_body_vel = flat_local_body_vel.reshape(
body_vel.shape[0], body_vel.shape[1] * body_vel.shape[2])
flat_body_ang_vel = body_ang_vel.reshape(
body_ang_vel.shape[0] * body_ang_vel.shape[1], body_ang_vel.shape[2])
flat_local_body_ang_vel = torch_utils.my_quat_rotate(
flat_heading_rot_inv, flat_body_ang_vel)
local_body_ang_vel = flat_local_body_ang_vel.reshape(
body_ang_vel.shape[0], body_ang_vel.shape[1] * body_ang_vel.shape[2])
obs_list = []
if root_height_obs: obs_list.append(root_h_obs)
obs_list += [
local_body_pos, local_body_rot_obs, local_body_vel, local_body_ang_vel
]
if has_smpl_params: obs_list.append(smpl_params[:, :-6])
if has_limb_weight_params: obs_list.append(limb_weight_params)
obs = torch.cat(obs_list, dim=-1)
return obs
# Path: pacer/env/tasks/humanoid.py
@torch.jit.script
def compute_humanoid_observations_smpl(root_pos, root_rot, root_vel,
root_ang_vel, dof_pos, dof_vel,
key_body_pos, dof_obs_size, dof_offsets,
smpl_params, local_root_obs,
root_height_obs, upright,
has_smpl_params):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, List[int], Tensor, bool, bool,bool, bool) -> Tensor
root_h = root_pos[:, 2:3]
if not upright:
root_rot = remove_base_rot(root_rot)
heading_rot_inv = torch_utils.calc_heading_quat_inv(root_rot)
if (local_root_obs):
root_rot_obs = quat_mul(heading_rot_inv, root_rot)
else:
root_rot_obs = root_rot
root_rot_obs = torch_utils.quat_to_tan_norm(root_rot_obs)
if (not root_height_obs):
root_h_obs = torch.zeros_like(root_h)
else:
root_h_obs = root_h
local_root_vel = torch_utils.my_quat_rotate(heading_rot_inv, root_vel)
local_root_ang_vel = torch_utils.my_quat_rotate(heading_rot_inv, root_ang_vel)
root_pos_expand = root_pos.unsqueeze(-2)
local_key_body_pos = key_body_pos - root_pos_expand
heading_rot_expand = heading_rot_inv.unsqueeze(-2)
heading_rot_expand = heading_rot_expand.repeat( (1, local_key_body_pos.shape[1], 1))
flat_end_pos = local_key_body_pos.view(
local_key_body_pos.shape[0] * local_key_body_pos.shape[1],
local_key_body_pos.shape[2])
flat_heading_rot = heading_rot_expand.view(
heading_rot_expand.shape[0] * heading_rot_expand.shape[1],
heading_rot_expand.shape[2])
local_end_pos = torch_utils.my_quat_rotate(flat_heading_rot, flat_end_pos)
flat_local_key_pos = local_end_pos.view(
local_key_body_pos.shape[0],
local_key_body_pos.shape[1] * local_key_body_pos.shape[2])
dof_obs = dof_to_obs(dof_pos, dof_obs_size, dof_offsets)
obs_list = []
if root_height_obs: obs_list.append(root_h_obs)
obs_list += [
root_rot_obs,
local_root_vel,
local_root_ang_vel,
dof_obs,
dof_vel,
flat_local_key_pos,
]
if has_smpl_params: obs_list.append(smpl_params)
obs = torch.cat(obs_list, dim=-1)
return obs
# Path: pacer/env/tasks/humanoid.py
@torch.jit.script
def compute_humanoid_observations_max(body_pos, body_rot, body_vel,
body_ang_vel, local_root_obs,
root_height_obs):
# type: (Tensor, Tensor, Tensor, Tensor, bool, bool) -> Tensor
root_pos = body_pos[:, 0, :]
root_rot = body_rot[:, 0, :]
root_h = root_pos[:, 2:3]
heading_rot = torch_utils.calc_heading_quat_inv(root_rot)
if (not root_height_obs):
root_h_obs = torch.zeros_like(root_h)
else:
root_h_obs = root_h
heading_rot_expand = heading_rot.unsqueeze(-2)
heading_rot_expand = heading_rot_expand.repeat((1, body_pos.shape[1], 1))
flat_heading_rot = heading_rot_expand.reshape(
heading_rot_expand.shape[0] * heading_rot_expand.shape[1],
heading_rot_expand.shape[2])
root_pos_expand = root_pos.unsqueeze(-2)
local_body_pos = body_pos - root_pos_expand
flat_local_body_pos = local_body_pos.reshape(
local_body_pos.shape[0] * local_body_pos.shape[1],
local_body_pos.shape[2])
flat_local_body_pos = torch_utils.my_quat_rotate(flat_heading_rot,
flat_local_body_pos)
local_body_pos = flat_local_body_pos.reshape(
local_body_pos.shape[0],
local_body_pos.shape[1] * local_body_pos.shape[2])
local_body_pos = local_body_pos[..., 3:] # remove root pos
flat_body_rot = body_rot.reshape(body_rot.shape[0] * body_rot.shape[1],
body_rot.shape[2])
flat_local_body_rot = quat_mul(flat_heading_rot, flat_body_rot)
flat_local_body_rot_obs = torch_utils.quat_to_tan_norm(flat_local_body_rot)
local_body_rot_obs = flat_local_body_rot_obs.reshape(
body_rot.shape[0],
body_rot.shape[1] * flat_local_body_rot_obs.shape[1])
if (local_root_obs): # This is wrong.
print("this is wrong!!!")
print("this is wrong!!!")
print("this is wrong!!!")
root_rot_obs = torch_utils.quat_to_tan_norm(root_rot)
local_body_rot_obs[..., 0:6] = root_rot_obs
flat_body_vel = body_vel.reshape(body_vel.shape[0] * body_vel.shape[1],
body_vel.shape[2])
flat_local_body_vel = torch_utils.my_quat_rotate(flat_heading_rot,
flat_body_vel)
local_body_vel = flat_local_body_vel.reshape(
body_vel.shape[0], body_vel.shape[1] * body_vel.shape[2])
flat_body_ang_vel = body_ang_vel.reshape(
body_ang_vel.shape[0] * body_ang_vel.shape[1], body_ang_vel.shape[2])
flat_local_body_ang_vel = torch_utils.my_quat_rotate(
flat_heading_rot, flat_body_ang_vel)
local_body_ang_vel = flat_local_body_ang_vel.reshape(
body_ang_vel.shape[0], body_ang_vel.shape[1] * body_ang_vel.shape[2])
obs = torch.cat((root_h_obs, local_body_pos, local_body_rot_obs,
local_body_vel, local_body_ang_vel),
dim=-1)
return obs
# Path: pacer/env/tasks/humanoid.py
@torch.jit.script
def compute_humanoid_observations(root_pos, root_rot, root_vel, root_ang_vel,
dof_pos, dof_vel, key_body_pos,
local_root_obs, root_height_obs,
dof_obs_size, dof_offsets):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, bool, bool, int, List[int]) -> Tensor
root_h = root_pos[:, 2:3]
heading_rot = torch_utils.calc_heading_quat_inv(root_rot)
if (local_root_obs):
root_rot_obs = quat_mul(heading_rot, root_rot)
else:
root_rot_obs = root_rot
root_rot_obs = torch_utils.quat_to_tan_norm(root_rot_obs)
if (not root_height_obs):
root_h_obs = torch.zeros_like(root_h)
else:
root_h_obs = root_h
local_root_vel = torch_utils.my_quat_rotate(heading_rot, root_vel)
local_root_ang_vel = torch_utils.my_quat_rotate(heading_rot, root_ang_vel)
root_pos_expand = root_pos.unsqueeze(-2)
local_key_body_pos = key_body_pos - root_pos_expand
heading_rot_expand = heading_rot.unsqueeze(-2)
heading_rot_expand = heading_rot_expand.repeat(
(1, local_key_body_pos.shape[1], 1))
flat_end_pos = local_key_body_pos.view(
local_key_body_pos.shape[0] * local_key_body_pos.shape[1],
local_key_body_pos.shape[2])
flat_heading_rot = heading_rot_expand.view(
heading_rot_expand.shape[0] * heading_rot_expand.shape[1],
heading_rot_expand.shape[2])
local_end_pos = torch_utils.my_quat_rotate(flat_heading_rot, flat_end_pos)
flat_local_key_pos = local_end_pos.view(
local_key_body_pos.shape[0],
local_key_body_pos.shape[1] * local_key_body_pos.shape[2])
dof_obs = dof_to_obs(dof_pos, dof_obs_size, dof_offsets)
obs = torch.cat((root_h_obs, root_rot_obs, local_root_vel,
local_root_ang_vel, dof_obs, dof_vel, flat_local_key_pos),
dim=-1)
return obs
# Path: pacer/env/tasks/humanoid.py
ENABLE_MAX_COORD_OBS = True
# Path: pacer/env/tasks/humanoid_pedestrain_terrain.py
from shutil import ExecError
from isaacgym import gymapi
from isaacgym.torch_utils import *
from env.tasks.humanoid import dof_to_obs
from env.tasks.humanoid_amp import HumanoidAMP, remove_base_rot
from pacer.utils.flags import flags
from utils import torch_utils
from isaacgym import gymtorch
from poselib.poselib.core.rotation3d import quat_inverse, quat_mul
from tqdm import tqdm
from scipy.spatial.transform import Rotation as sRot
from typing import OrderedDict
from pacer.utils.draw_utils import agt_color
from pacer.env.tasks.humanoid import compute_humanoid_observations_smpl_max, compute_humanoid_observations_smpl,\
compute_humanoid_observations_max, compute_humanoid_observations,\
ENABLE_MAX_COORD_OBS
from isaacgym.terrain_utils import *
from pacer.utils.draw_utils import *
import torch
import numpy as np
import env.tasks.humanoid_traj as humanoid_traj
import joblib
import matplotlib.pyplot as plt
# group obs
B, D = normal_task_obs.shape
traj_samples_dim = 20
obs_acc = []
normal_task_obs = normal_task_obs.clone()
traj_samples = normal_task_obs[:, :traj_samples_dim].view(B, 10, 2)
traj_samples[..., 1] *= -1
obs_acc.append(traj_samples.view(B, -1))
if self.terrain_obs:
if self.velocity_map:
height_samples = normal_task_obs[..., traj_samples_dim: traj_samples_dim + self.num_height_points * 3]
height_samples = height_samples.view(B, int(np.sqrt(self.num_height_points)), int(np.sqrt(self.num_height_points)), 3)
height_samples[..., 0].flip(2)
height_samples = height_samples.flip(2)
obs_acc.append(height_samples.view(B, -1))
else:
height_samples = normal_task_obs[..., traj_samples_dim: traj_samples_dim + self.num_height_points].view(B, int(np.sqrt(self.num_height_points)), int(np.sqrt(self.num_height_points)))
height_samples = height_samples.flip(2)
obs_acc.append(height_samples.view(B, -1))
obs = torch.cat(obs_acc, dim=1)
if self._divide_group and self._group_obs:
group_obs = normal_task_obs[..., traj_samples_dim + self.num_height_points: ].view(B, -1, 3)
group_obs[..., 1] *= -1
obs_acc.append(group_obs.view(B, -1))
obs = torch.cat(obs_acc, dim=1)
del obs_acc
return obs
def _reset_task(self, env_ids):
# super()._reset_task(env_ids) # Commented out to disable traj resetting
if not flags.server_mode:
root_pos = self._humanoid_root_states[env_ids, 0:3]
self._traj_gen.reset(env_ids, root_pos)
return
def _sample_ref_state(self, env_ids, vel_min=1, vel_range=0.5):
num_envs = env_ids.shape[0]
motion_ids = self._motion_lib.sample_motions(num_envs)
if (self._state_init == HumanoidAMP.StateInit.Random
or self._state_init == HumanoidAMP.StateInit.Hybrid):
motion_times = self._sample_time(motion_ids)
elif (self._state_init == HumanoidAMP.StateInit.Start):
motion_times = torch.zeros(num_envs, device=self.device)
else:
assert (
False
), "Unsupported state initialization strategy: {:s}".format(
str(self._state_init))
if self.smpl_humanoid:
curr_gender_betas = self.humanoid_betas[env_ids]
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos, rb_pos, rb_rot, body_vel, body_ang_vel = self._get_fixed_smpl_state_from_motionlib(
motion_ids, motion_times, curr_gender_betas)
else:
root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos = self._motion_lib.get_motion_state(
motion_ids, motion_times)
rb_pos, rb_rot = None, None
if flags.random_heading:
random_rot = np.zeros([num_envs, 3])
random_rot[:, 2] = np.pi * (2 * np.random.random([num_envs]) - 1.0)
random_heading_quat = torch.from_numpy(sRot.from_euler("xyz", random_rot).as_quat()).float().to(self.device)
random_heading_quat_repeat = random_heading_quat[:, None].repeat(1, 24, 1)
root_rot = quat_mul(random_heading_quat, root_rot).clone()
rb_pos = quat_apply(random_heading_quat_repeat, rb_pos - root_pos[:, None, :]).clone()
key_pos = quat_apply(random_heading_quat_repeat[:, :4, :], (key_pos - root_pos[:, None, :])).clone()
rb_rot = quat_mul(random_heading_quat_repeat, rb_rot).clone()
root_ang_vel = quat_apply(random_heading_quat, root_ang_vel).clone()
curr_heading = torch_utils.calc_heading_quat(root_rot)
root_vel[:, 0] = torch.rand([num_envs]) * vel_range + vel_min
root_vel = quat_apply(curr_heading, root_vel).clone()
return motion_ids, motion_times, root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos, rb_pos, rb_rot
def _reset_ref_state_init(self, env_ids):
num_envs = env_ids.shape[0]
motion_ids, motion_times, root_pos, root_rot, dof_pos, root_vel, root_ang_vel, dof_vel, key_pos, rb_pos, rb_rot = self._sample_ref_state(env_ids)
## Randomrized location setting
new_root_xy = self.terrain.sample_valid_locations(self.num_envs, env_ids)
# joblib.dump(self.terrain.sample_valid_locations(100000, torch.arange(100000)).detach().cpu(), "new_root_xy.pkl")
# import ipdb; ipdb.set_trace()
if flags.fixed:
# new_root_xy[:, 0], new_root_xy[:, 1] = 0 , 0
# new_root_xy[:, 0], new_root_xy[:, 1] = 134.8434 + env_ids , -28.9593
# new_root_xy[:, 0], new_root_xy[:, 1] = 30 + env_ids * 4, 240
new_root_xy[:, 0], new_root_xy[:, 1] = 10 + env_ids * 3, 10
# new_root_xy[:, 0], new_root_xy[:, 1] = 95 + env_ids * 5, 307
# new_root_xy[:, 0], new_root_xy[:, 1] = 27, 1 + env_ids * 2
# x_grid, y_grid = torch.meshgrid(torch.arange(64), torch.arange(64))
# new_root_xy[:, 0], new_root_xy[:, 1] = x_grid.flatten()[env_ids] * 2, y_grid.flatten()[env_ids] * 2
# if env_ids[0] == 0:
# new_root_xy[0, 0], new_root_xy[0, 1] = 34 , -81
if flags.server_mode:
new_traj = self._traj_gen.input_new_trajs(env_ids)
new_root_xy[:, 0], new_root_xy[:, 1] = new_traj[:, 0, 0], new_traj[:, 0, 1]
diff_xy = new_root_xy - root_pos[:, 0:2]
root_pos[:, 0:2] = new_root_xy
root_states = torch.cat([root_pos, root_rot], dim=1)
center_height = self.get_center_heights(root_states, env_ids=env_ids).mean(dim=-1)
root_pos[:, 2] += center_height
key_pos[..., 0:2] += diff_xy[:, None, :]
key_pos[..., 2] += center_height[:, None]
| rb_pos[..., 0:2] += diff_xy[:, None, :] |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: Improbable-AI/dexenv
# Path: dexenv/engine/base_engine.py
class BaseEngine:
agent: Any
runner: Any
cfg: DictConfig
def __post_init__(self):
self.cur_step = 0
self._best_eval_ret = -np.inf
self._best_train_ret = -np.inf
self._best_eval_success_rate = -np.inf
self._best_train_success_rate = -np.inf
self._eval_is_best = False
self._train_is_best = False
self.smooth_eval_return = None
self.smooth_train_return = None
self.smooth_tau = self.cfg.alg.smooth_eval_tau
self.optim_stime = None
wandb_cfg = OmegaConf.to_container(self.cfg, resolve=True, throw_on_missing=True)
if self.cfg.test or self.cfg.resume:
if self.cfg.resume_id is None:
raise ValueError('Please specify the run ID to be resumed!')
run_id = self.cfg.resume_id.split(':')[0]
if '/' in run_id and not self.cfg.resume_to_diff_id:
run_id_split = run_id.split('/')
run_id = run_id_split[-1]
self.cfg.logging.wandb.project = run_id_split[-2]
if self.cfg.resume_to_diff_id:
run_id = get_hydra_run_dir().name
if self.cfg.resume:
wandb_cfg['hostname'] = platform.node()
self.wandb_runs = wandb.init(**self.cfg.logging.wandb,
resume='allow', id=run_id,
config=wandb_cfg if self.cfg.resume else None,
)
else:
wandb_cfg['hostname'] = platform.node()
wandb_kwargs = self.cfg.logging.wandb
wandb_tags = wandb_kwargs.get('tags', None)
if wandb_tags is not None and isinstance(wandb_tags, str):
wandb_kwargs['tags'] = [wandb_tags]
self.wandb_runs = wandb.init(**wandb_kwargs, config=wandb_cfg,
# id=wandb_id, name=wandb_id,
# settings=wandb.Settings(start_method="thread"),
)
logger.warning(f'Wandb run dir:{self.wandb_runs.dir}')
logger.warning(f' Project name:{self.wandb_runs.project_name()}')
if (self.cfg.test or self.cfg.resume) and not self.cfg.test_pretrain:
self.cur_step = self.agent.load_model(eval=self.cfg.test and self.cfg.test_eval_best,
pretrain_model=self.cfg.alg.pretrain_model)
if self.cfg.resume_to_diff_id:
self.cur_step = 0
else:
if self.cfg.alg.pretrain_model is not None:
self.agent.load_model(pretrain_model=self.cfg.alg.pretrain_model)
def train(self, **kwargs):
raise NotImplementedError
def rollout_once(self, *args, **kwargs):
t0 = time.perf_counter()
self.agent.eval_mode()
traj = self.runner(**kwargs)
t1 = time.perf_counter()
elapsed_time = t1 - t0
return traj, elapsed_time
def do_eval(self, det=False, sto=False):
eval_log_info = dict()
det = det or self.cfg.alg.det_eval
sto = sto or self.cfg.alg.sto_eval
if det:
det_log_info, _ = self.eval(eval_num=self.cfg.test_num,
sample=False, smooth=True)
det_log_info = {f'det/{k}': v for k, v in det_log_info.items()}
eval_log_info.update(det_log_info)
if sto:
sto_log_info, _ = self.eval(eval_num=self.cfg.test_num,
sample=True, smooth=False)
sto_log_info = {f'sto/{k}': v for k, v in sto_log_info.items()}
eval_log_info.update(sto_log_info)
if len(eval_log_info) > 0:
wandb.log(eval_log_info, step=self.cur_step)
if self._eval_is_best and self.cfg.save_eval_ckpt:
self.agent.save_model(is_best=True,
step=self.cur_step,
eval=True,
wandb_run=self.wandb_runs)
@torch.no_grad()
def eval(self, render=False, eval_num=1,
sleep_time=0, sample=True, no_tqdm=None):
t0 = time.perf_counter()
time_steps = []
rets = []
lst_step_infos = []
successes = []
check_dist = None
dists = []
if no_tqdm is not None:
disable_tqdm = bool(no_tqdm)
else:
disable_tqdm = not self.cfg.test
for idx in tqdm(range(eval_num), disable=disable_tqdm):
traj, _ = self.rollout_once(time_steps=self.cfg.alg.eval_rollout_steps,
return_on_done=False,
sample=self.cfg.alg.sample_action and sample,
render=render,
sleep_time=sleep_time,
evaluation=True)
done_indices = traj.done_indices
if hasattr(traj, 'raw_rewards'):
rewards = traj.raw_rewards
else:
rewards = traj.rewards
infos = traj.infos
for eid in range(len(done_indices)):
done_ids = done_indices[eid]
start_id = 0
for ejd in range(len(done_ids)):
end_id = done_ids[ejd] + 1
ret = np.sum(torch_to_np(rewards[start_id:end_id, eid]))
start_id = end_id
rets.append(ret)
lst_step_infos.append(get_element_from_traj_infos(infos=infos, key=None,
i=end_id - 1, j=eid))
episode_steps = traj.episode_steps
time_steps.extend(episode_steps)
successes.extend(get_element_from_traj_infos_row_ids(infos=infos, key='success',
row_ids=done_indices))
raw_traj_info = {'return': rets,
'lst_step_info': lst_step_infos}
if isinstance(time_steps[0], list):
raw_traj_info['episode_length'] = np.concatenate(time_steps)
else:
raw_traj_info['episode_length'] = np.asarray(time_steps)
if check_dist:
raw_traj_info['abs_dist'] = np.concatenate(dists)
log_info = dict()
for key, val in raw_traj_info.items():
if 'info' in key:
continue
val_stats = get_list_stats(val)
for sk, sv in val_stats.items():
log_info['eval/' + key + '/' + sk] = sv
if len(successes) > 0:
log_info['eval/success'] = np.mean(successes)
t1 = time.perf_counter()
elapsed_time = t1 - t0
log_info['eval/eval_time'] = elapsed_time
return log_info
def get_train_log(self, optim_infos, traj=None):
log_info = dict()
vector_keys = set()
scalar_keys = set()
for oinf in optim_infos:
for key in oinf.keys():
if 'vec_' in key:
vector_keys.add(key)
else:
scalar_keys.add(key)
for key in scalar_keys:
log_info[key] = np.mean([inf[key] for inf in optim_infos if key in inf])
for key in vector_keys:
k_stats = get_list_stats([inf[key] for inf in optim_infos if key in inf])
for sk, sv in k_stats.items():
log_info[f'{key}/' + sk] = sv
if traj is not None:
actions_stats = get_list_stats(torch_to_np(traj.actions))
for sk, sv in actions_stats.items():
log_info['rollout_action/' + sk] = sv
log_info['rollout_steps_per_iter'] = traj.total_steps
ep_returns_stats = get_list_stats(self.runner.train_ep_return)
for sk, sv in ep_returns_stats.items():
log_info['episode_return/' + sk] = sv
ep_len_stats = get_list_stats(self.runner.train_ep_len)
for sk, sv in ep_len_stats.items():
log_info['episode_length/' + sk] = sv
if len(self.runner.train_success) > 0:
log_info['episode_success'] = np.mean(self.runner.train_success)
if 'episode_return/mean' in log_info:
self.smooth_train_return = smooth_value(log_info['episode_return/mean'],
self.smooth_train_return,
self.smooth_tau)
log_info['smooth_return/mean'] = self.smooth_train_return
if self.cfg.save_best_on_success and len(self.runner.train_success) > 0:
self._train_is_best, self._best_train_success_rate = self._check_ckpt_is_best(
log_info['episode_success'],
best_history_val=self._best_train_success_rate,
bigger_is_better=True
)
else:
self._train_is_best, self._best_train_ret = self._check_ckpt_is_best(self.smooth_train_return,
best_history_val=self._best_train_ret,
bigger_is_better=True)
ex_info = traj[0].info
if isinstance(ex_info, Sequence):
traj_info_keys = ex_info[0].keys()
else: # dict
traj_info_keys = traj[0].info.keys()
reward_keys = [x for x in traj_info_keys if 'reward' in x]
if len(reward_keys) > 0:
traj_infos = traj.infos
for rkey in reward_keys:
rlist = aggregate_traj_info(traj_infos, rkey)
rlist_stats = get_list_stats(rlist)
for rk, rv in rlist_stats.items():
log_info[f'{rkey}/{rk}'] = rv
log_info['rollout_steps'] = self.cur_step
train_log_info = dict()
for key, val in log_info.items():
train_log_info['train/' + key] = val
return train_log_info
def get_dataloader(self, dataset, batch_size):
subset = SubsetRandomSampler(range(len(dataset)))
batch_sampler = BatchSampler(subset,
batch_size=batch_size,
drop_last=False,
)
inds = list(batch_sampler)
dataloader = []
for ind in inds:
sub_data = dict()
if isinstance(dataset, DictDataset):
for keys in dataset.data.keys():
sub_data[keys] = dataset.data[keys][ind]
elif isinstance(dataset, TrajDatasetSplitByDone):
for keys in dataset.data.keys():
if keys == 'time_steps':
sub_data[keys] = torch.tensor(dataset.data[keys])[ind]
else:
sub_data[keys] = dataset.data[keys][ind]
dataloader.append(sub_data)
return dataloader
def _check_ckpt_is_best(self, cur_val, best_history_val, bigger_is_better=True):
is_best = cur_val > best_history_val if bigger_is_better else cur_val < best_history_val
if is_best:
best_history_val = cur_val
return is_best, best_history_val
def _get_batch_size(self, dataset):
if hasattr(self.cfg.alg, 'num_batches') and self.cfg.alg.num_batches is not None:
batch_size = max(1, int(len(dataset) / self.cfg.alg.num_batches))
else:
batch_size = self.cfg.alg.batch_size
return batch_size
# Path: dexenv/utils/common.py
def get_list_stats(data):
if len(data) < 1:
return dict()
if isinstance(data, torch.Tensor):
data = data.cpu().numpy()
min_data = np.amin(data)
max_data = np.amax(data)
mean_data = np.mean(data)
median_data = np.median(data)
stats = dict(
min=min_data,
max=max_data,
mean=mean_data,
median=median_data
)
return stats
# Path: dexenv/utils/common.py
def smooth_value(current_value, past_value, tau):
if past_value is None:
return current_value
else:
return past_value * tau + current_value * (1 - tau)
# Path: dexenv/utils/common.py
def stack_data(data, torch_to_numpy=False, dim=0):
if isinstance(data[0], dict):
out = dict()
for key in data[0].keys():
out[key] = stack_data([x[key] for x in data], dim=dim)
return out
if check_torch_tensor(data):
try:
ret = torch.stack(data, dim=dim)
if torch_to_numpy:
ret = ret.cpu().numpy()
except:
# if data is a list of arrays that do not have same shapes (such as point cloud)
ret = data
else:
try:
ret = np.stack(data, axis=dim)
except:
ret = data
return ret
# Path: dexenv/utils/common.py
def stat_for_traj_data(traj_data, dones, op='min'):
"""
traj_data: T x #of envs
"""
op = getattr(np, op)
stats = []
for i in range(dones.shape[1]):
stat = []
di = dones[:, i]
if not np.any(di):
stat.append(op(traj_data[:, i]))
else:
done_idx = np.where(di)[0]
t = 0
for idx in done_idx:
stat.append(op(traj_data[t: idx + 1, i]))
t = idx + 1
stats.append(stat)
return np.concatenate(stats)
# Path: dexenv/utils/dataset.py
class DictDataset(Dataset):
def __init__(self, **kwargs):
self.data = kwargs
self.length = next(iter(self.data.values())).shape[0]
def __len__(self):
return self.length
def __getitem__(self, idx):
sample = dict()
for key, val in self.data.items():
sample[key] = val[idx]
return sample
# Path: dexenv/utils/info_util.py
def aggregate_traj_info(infos, key, single_info=False):
if single_info:
infos = [infos]
if isinstance(infos[0], Sequence):
out = []
for info in infos:
time_out = []
for env_info in info:
time_out.append(env_info[key])
out.append(np.stack(time_out))
out = stack_data(out)
elif isinstance(infos[0], dict):
out = []
for info in infos:
tensor = info[key]
out.append(tensor)
out = stack_data(out)
else:
raise NotImplementedError
if single_info:
out = out.squeeze(0)
return out
# Path: dexenv/utils/info_util.py
def get_element_from_traj_infos(infos, key, i, j):
if isinstance(infos[0], Sequence):
if key is None:
return infos[i][j]
else:
return infos[i][j][key]
elif isinstance(infos[0], dict):
if key is None:
keys = infos[i].keys()
out = dict()
for key in keys:
try:
out[key] = infos[i][key][j]
except:
continue
return out
else:
return infos[i][key][j]
else:
raise NotImplementedError
# Path: dexenv/utils/info_util.py
def get_element_from_traj_infos_row_ids(infos, key, row_ids):
if isinstance(infos[0], Sequence):
out = []
for col_id in range(len(row_ids)):
for row_id in row_ids[col_id]:
if key in infos[row_id][col_id]:
out.append(torch_to_np(infos[row_id][col_id][key]))
return out
elif isinstance(infos[0], dict):
out = []
for col_id in range(len(row_ids)):
for row_id in row_ids[col_id]:
if key in infos[row_id]:
out.append(torch_to_np(infos[row_id][key][col_id]))
return out
else:
raise NotImplementedError
# Path: dexenv/utils/info_util.py
def info_has_key(infos, key, single_info=False):
if not single_info:
infos = infos[0]
if isinstance(infos, Sequence):
return key in infos[0]
elif isinstance(infos, dict):
return key in infos
else:
raise NotImplementedError
# Path: dexenv/utils/torch_utils.py
def swap_axes(data, axes):
if isinstance(data, torch.Tensor):
return torch.transpose(data, *axes)
else:
return np.swapaxes(data, *axes)
# Path: dexenv/utils/torch_utils.py
def torch_to_np(tensor):
if isinstance(tensor, np.ndarray):
return tensor
else:
return tensor.cpu().detach().numpy()
# Path: dexenv/engine/rnn_engine.py
import numpy as np
import time
import torch
from dataclasses import dataclass
from itertools import count
from tqdm import tqdm
from dexenv.engine.base_engine import BaseEngine
from dexenv.utils.common import get_list_stats
from dexenv.utils.common import smooth_value
from dexenv.utils.common import stack_data
from dexenv.utils.common import stat_for_traj_data
from dexenv.utils.dataset import DictDataset
from dexenv.utils.info_util import aggregate_traj_info
from dexenv.utils.info_util import get_element_from_traj_infos
from dexenv.utils.info_util import get_element_from_traj_infos_row_ids
from dexenv.utils.info_util import info_has_key
from dexenv.utils.torch_utils import swap_axes
from dexenv.utils.torch_utils import torch_to_np
def extract_traj_data_rnn(cfg, states, data, act_dim=9):
states_ori = states
if cfg.vision.pred_rot_dist:
if cfg.task.env.robot == 'dclaw_4f':
quat_diff = states[..., 41:45]
else:
raise NotImplementedError
rot_dist = 2.0 * torch.asin(torch.clamp(torch.norm(quat_diff[..., 0:3], p=2, dim=-1), max=1.0))
data['rot_dist'] = rot_dist
if cfg.vision.act_in:
states = states_ori
prev_actions = states[..., -act_dim:]
data['prev_actions'] = prev_actions
@dataclass
class RNNEngine(BaseEngine):
act_dim: int = 12
def train(self):
for iter_t in count():
if iter_t % self.cfg.logging.eval_interval == 0 and self.cfg.alg.run_eval:
self.do_eval()
traj, rollout_time = self.rollout_once(sample=True,
get_last_val=False,
reset_first=True,
time_steps=self.cfg.alg.train_rollout_steps)
optim_infos = self.train_once(traj)
if iter_t % self.cfg.logging.log_interval == 0:
t1 = time.perf_counter()
train_log_info = self.get_train_log(optim_infos, traj)
train_log_info['train/optim_time'] = t1 - self.optim_stime
train_log_info['train/rollout_time'] = rollout_time
if self.cur_step > self.cfg.alg.max_steps:
break
if iter_t % self.cfg.logging.ckpt_interval == 0:
| self.agent.save_model(is_best=self._train_is_best, |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: ai-safety-foundation/sparse_autoencoder
# Path: sparse_autoencoder/autoencoder/components/linear_encoder.py
class LinearEncoder(Module):
r"""Linear encoder layer.
Linear encoder layer (essentially `nn.Linear`, with a ReLU activation function). Designed to be
used as the encoder in a sparse autoencoder (excluding any outer tied bias).
$$
\begin{align*}
m &= \text{learned features dimension} \\
n &= \text{input and output dimension} \\
b &= \text{batch items dimension} \\
\overline{\mathbf{x}} \in \mathbb{R}^{b \times n} &= \text{input after tied bias} \\
W_e \in \mathbb{R}^{m \times n} &= \text{weight matrix} \\
b_e \in \mathbb{R}^{m} &= \text{bias vector} \\
f &= \text{ReLU}(\overline{\mathbf{x}} W_e^T + b_e) = \text{LinearEncoder output}
\end{align*}
$$
"""
_learnt_features: int
"""Number of learnt features (inputs to this layer)."""
_input_features: int
"""Number of input features from the source model."""
_n_components: int | None
weight: Float[
Parameter,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),
]
"""Weight parameter.
Each row in the weights matrix acts as a dictionary vector, representing a single basis
element in the learned activation space.
"""
bias: Float[Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)]
"""Bias parameter."""
@property
def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:
"""Reset optimizer parameter details.
Details of the parameters that should be reset in the optimizer, when resetting
dictionary vectors.
Returns:
List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to
reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.
"""
return [
ResetOptimizerParameterDetails(parameter=self.weight, axis=-2),
ResetOptimizerParameterDetails(parameter=self.bias, axis=-1),
]
activation_function: ReLU
"""Activation function."""
@validate_call
def __init__(
self,
input_features: PositiveInt,
learnt_features: PositiveInt,
n_components: PositiveInt | None,
):
"""Initialize the linear encoder layer.
Args:
input_features: Number of input features to the autoencoder.
learnt_features: Number of learnt features in the autoencoder.
n_components: Number of source model components the SAE is trained on.
"""
super().__init__()
self._learnt_features = learnt_features
self._input_features = input_features
self._n_components = n_components
self.weight = Parameter(
torch.empty(
shape_with_optional_dimensions(n_components, learnt_features, input_features),
)
)
self.bias = Parameter(
torch.zeros(shape_with_optional_dimensions(n_components, learnt_features))
)
self.activation_function = ReLU()
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize or reset the parameters."""
# Assumes we are using ReLU activation function (for e.g. leaky ReLU, the `a` parameter and
# `nonlinerity` must be changed.
init.kaiming_uniform_(self.weight, nonlinearity="relu")
# Bias (approach from nn.Linear)
fan_in = self.weight.size(1)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(
self,
x: Float[
Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
],
) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)]:
"""Forward pass.
Args:
x: Input tensor.
Returns:
Output of the forward pass.
"""
z = (
einops.einsum(
x,
self.weight,
f"{Axis.BATCH} ... {Axis.INPUT_OUTPUT_FEATURE}, \
... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE} \
-> {Axis.BATCH} ... {Axis.LEARNT_FEATURE}",
)
+ self.bias
)
return self.activation_function(z)
@final
def update_dictionary_vectors(
self,
dictionary_vector_indices: Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)],
updated_dictionary_weights: Float[
Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX, Axis.INPUT_OUTPUT_FEATURE)
],
component_idx: int | None = None,
) -> None:
"""Update encoder dictionary vectors.
Updates the dictionary vectors (columns in the weight matrix) with the given values.
Args:
dictionary_vector_indices: Indices of the dictionary vectors to update.
updated_dictionary_weights: Updated weights for just these dictionary vectors.
component_idx: Component index to update.
Raises:
ValueError: If there are multiple components and `component_idx` is not specified.
"""
if dictionary_vector_indices.numel() == 0:
return
with torch.no_grad():
if component_idx is None:
if self._n_components is not None:
error_message = "component_idx must be specified when n_components is not None"
raise ValueError(error_message)
self.weight[dictionary_vector_indices] = updated_dictionary_weights
else:
self.weight[component_idx, dictionary_vector_indices] = updated_dictionary_weights
@final
def update_bias(
self,
update_parameter_indices: Int64[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)
],
updated_bias_features: Float[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)
],
component_idx: int | None = None,
) -> None:
"""Update encoder bias.
Args:
update_parameter_indices: Indices of the bias features to update.
updated_bias_features: Updated bias features for just these indices.
component_idx: Component index to update.
Raises:
ValueError: If there are multiple components and `component_idx` is not specified.
"""
if update_parameter_indices.numel() == 0:
return
with torch.no_grad():
if component_idx is None:
if self._n_components is not None:
error_message = "component_idx must be specified when n_components is not None"
raise ValueError(error_message)
self.bias[update_parameter_indices] = updated_bias_features
else:
self.bias[component_idx, update_parameter_indices] = updated_bias_features
def extra_repr(self) -> str:
"""String extra representation of the module."""
return (
f"input_features={self._input_features}, "
f"learnt_features={self._learnt_features}, "
f"n_components={self._n_components}"
)
# Path: sparse_autoencoder/autoencoder/components/tied_bias.py
class TiedBias(Module):
"""Tied Bias Layer.
The tied pre-encoder bias is a learned bias term that is subtracted from the input before
encoding, and added back after decoding.
The bias parameter must be initialised in the parent module, and then passed to this layer.
https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-bias
"""
_bias_position: TiedBiasPosition
_bias_reference: Float[
Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
]
@property
def bias(
self,
) -> Float[Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:
"""Bias."""
return self._bias_reference
def __init__(
self,
bias_reference: Float[
Parameter, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
],
position: TiedBiasPosition,
) -> None:
"""Initialize the bias layer.
Args:
bias_reference: Tied bias parameter (initialised in the parent module), used for both
the pre-encoder and post-encoder bias. The original paper initialised this using the
geometric median of the dataset.
position: Whether this is the pre-encoder or post-encoder bias.
"""
super().__init__()
self._bias_reference = bias_reference
# Support string literals as well as enums
self._bias_position = position
def forward(
self,
x: Float[
Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)
],
) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:
"""Forward Pass.
Args:
x: Input tensor.
Returns:
Output of the forward pass.
"""
# If this is the pre-encoder bias, we subtract the bias from the input.
if self._bias_position == TiedBiasPosition.PRE_ENCODER:
return x - self.bias
# If it's the post-encoder bias, we add the bias to the input.
return x + self.bias
def extra_repr(self) -> str:
"""String extra representation of the module."""
return f"position={self._bias_position.value}"
# Path: sparse_autoencoder/autoencoder/components/tied_bias.py
class TiedBiasPosition(str, Enum):
"""Tied Bias Position."""
PRE_ENCODER = "pre_encoder"
POST_DECODER = "post_decoder"
# Path: sparse_autoencoder/autoencoder/components/unit_norm_decoder.py
class UnitNormDecoder(Module):
r"""Constrained unit norm linear decoder layer.
Linear layer decoder, where the dictionary vectors (columns of the weight matrix) are
constrained to have unit norm. This is done by removing the gradient information parallel to the
dictionary vectors before applying the gradient step, using a backward hook. It also requires
`constrain_weights_unit_norm` to be called after each gradient step, to prevent drift of the
dictionary vectors away from unit norm (as optimisers such as Adam don't strictly follow the
gradient, but instead follow a modified gradient that includes momentum).
$$ \begin{align*}
m &= \text{learned features dimension} \\
n &= \text{input and output dimension} \\
b &= \text{batch items dimension} \\
f \in \mathbb{R}^{b \times m} &= \text{encoder output} \\
W_d \in \mathbb{R}^{n \times m} &= \text{weight matrix} \\
z \in \mathbb{R}^{b \times m} &= f W_d^T = \text{UnitNormDecoder output (pre-tied bias)}
\end{align*} $$
Motivation:
Normalisation of the columns (dictionary features) prevents the model from reducing the
sparsity loss term by increasing the size of the feature vectors in $W_d$.
Note that the *Towards Monosemanticity: Decomposing Language Models With Dictionary
Learning* paper found that removing the gradient information parallel to the dictionary
vectors before applying the gradient step, rather than resetting the dictionary vectors to
unit norm after each gradient step, results in a small but real reduction in total
loss](https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-optimization).
"""
_learnt_features: int
"""Number of learnt features (inputs to this layer)."""
_decoded_features: int
"""Number of decoded features (outputs from this layer)."""
_n_components: int | None
weight: Float[
Parameter,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE),
]
"""Weight parameter.
Each column in the weights matrix acts as a dictionary vector, representing a single basis
element in the learned activation space.
"""
@property
def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:
"""Reset optimizer parameter details.
Details of the parameters that should be reset in the optimizer, when resetting
dictionary vectors.
Returns:
List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to
reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.
"""
return [ResetOptimizerParameterDetails(parameter=self.weight, axis=-1)]
@validate_call
def __init__(
self,
learnt_features: PositiveInt,
decoded_features: PositiveInt,
n_components: PositiveInt | None,
*,
enable_gradient_hook: bool = True,
) -> None:
"""Initialize the constrained unit norm linear layer.
Args:
learnt_features: Number of learnt features in the autoencoder.
decoded_features: Number of decoded (output) features in the autoencoder.
n_components: Number of source model components the SAE is trained on.
enable_gradient_hook: Enable the gradient backwards hook (modify the gradient before
applying the gradient step, to maintain unit norm of the dictionary vectors).
"""
super().__init__()
self._learnt_features = learnt_features
self._decoded_features = decoded_features
self._n_components = n_components
# Create the linear layer as per the standard PyTorch linear layer
self.weight = Parameter(
torch.empty(
shape_with_optional_dimensions(n_components, decoded_features, learnt_features),
)
)
self.reset_parameters()
# Register backward hook to remove any gradient information parallel to the dictionary
# vectors (columns of the weight matrix) before applying the gradient step.
if enable_gradient_hook:
self.weight.register_hook(self._weight_backward_hook)
def update_dictionary_vectors(
self,
dictionary_vector_indices: Int64[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)
],
updated_weights: Float[
Tensor,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE_IDX),
],
component_idx: int | None = None,
) -> None:
"""Update decoder dictionary vectors.
Updates the dictionary vectors (rows in the weight matrix) with the given values. Typically
this is used when resampling neurons (dictionary vectors) that have died.
Args:
dictionary_vector_indices: Indices of the dictionary vectors to update.
updated_weights: Updated weights for just these dictionary vectors.
component_idx: Component index to update.
Raises:
ValueError: If `component_idx` is not specified when `n_components` is not None.
"""
if dictionary_vector_indices.numel() == 0:
return
with torch.no_grad():
if component_idx is None:
if self._n_components is not None:
error_message = "component_idx must be specified when n_components is not None"
raise ValueError(error_message)
self.weight[:, dictionary_vector_indices] = updated_weights
else:
self.weight[component_idx, :, dictionary_vector_indices] = updated_weights
def constrain_weights_unit_norm(self) -> None:
"""Constrain the weights to have unit norm.
Warning:
Note this must be called after each gradient step. This is because optimisers such as
Adam don't strictly follow the gradient, but instead follow a modified gradient that
includes momentum. This means that the gradient step can change the norm of the
dictionary vectors, even when the hook `_weight_backward_hook` is applied.
Note this can't be applied directly in the backward hook, as it would interfere with a
variety of use cases (e.g. gradient accumulation across mini-batches, concurrency issues
with asynchronous operations, etc).
Example:
>>> import torch
>>> layer = UnitNormDecoder(3, 3, None)
>>> layer.weight.data = torch.ones((3, 3)) * 10
>>> layer.constrain_weights_unit_norm()
>>> column_norms = torch.sqrt(torch.sum(layer.weight ** 2, dim=0))
>>> column_norms.round(decimals=3).tolist()
[1.0, 1.0, 1.0]
"""
with torch.no_grad():
torch.nn.functional.normalize(self.weight, dim=-2, out=self.weight)
def reset_parameters(self) -> None:
"""Initialize or reset the parameters.
Example:
>>> import torch
>>> # Create a layer with 4 columns (learnt features) and 3 rows (decoded features)
>>> layer = UnitNormDecoder(learnt_features=4, decoded_features=3, n_components=None)
>>> layer.reset_parameters()
>>> # Get the norm across the rows (by summing across the columns)
>>> column_norms = torch.sum(layer.weight ** 2, dim=0)
>>> column_norms.round(decimals=3).tolist()
[1.0, 1.0, 1.0, 1.0]
"""
# Initialize the weights with a normal distribution. Note we don't use e.g. kaiming
# normalisation here, since we immediately scale the weights to have unit norm (so the
# initial standard deviation doesn't matter). Note also that `init.normal_` is in place.
self.weight: Float[
Parameter,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),
] = init.normal_(self.weight, mean=0, std=1) # type: ignore
# Scale so that each row has unit norm
self.constrain_weights_unit_norm()
def _weight_backward_hook(
self,
grad: Float[
Tensor,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),
],
) -> Float[
Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)
]:
r"""Unit norm backward hook.
By subtracting the projection of the gradient onto the dictionary vectors, we remove the
component of the gradient that is parallel to the dictionary vectors and just keep the
component that is orthogonal to the dictionary vectors (i.e. moving around the hypersphere).
The result is that the backward pass does not change the norm of the dictionary vectors.
$$
\begin{align*}
W_d &\in \mathbb{R}^{n \times m} = \text{Decoder weight matrix} \\
g &\in \mathbb{R}^{n \times m} = \text{Gradient w.r.t. } W_d
\text{ from the backward pass} \\
W_{d, \text{norm}} &= \frac{W_d}{\|W_d\|} = \text{Normalized decoder weight matrix
(over columns)} \\
g_{\parallel} &\in \mathbb{R}^{n \times m} = \text{Component of } g
\text{ parallel to } W_{d, \text{norm}} \\
g_{\perp} &\in \mathbb{R}^{n \times m} = \text{Component of } g \text{ orthogonal to }
W_{d, \text{norm}} \\
g_{\parallel} &= W_{d, \text{norm}} \cdot (W_{d, \text{norm}}^\top \cdot g) \\
g_{\perp} &= g - g_{\parallel} =
\text{Adjusted gradient with parallel component removed} \\
\end{align*}
$$
Args:
grad: Gradient with respect to the weights.
Returns:
Gradient with respect to the weights, with the component parallel to the dictionary
vectors removed.
"""
# Project the gradients onto the dictionary vectors. Intuitively the dictionary vectors can
# be thought of as vectors that end on the circumference of a hypersphere. The projection of
# the gradient onto the dictionary vectors is the component of the gradient that is parallel
# to the dictionary vectors, i.e. the component that moves to or from the center of the
# hypersphere.
normalized_weight: Float[
Tensor,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE),
] = self.weight / torch.norm(self.weight, dim=-2, keepdim=True)
scalar_projections = einops.einsum(
grad,
normalized_weight,
f"... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE}, \
... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE} \
-> ... {Axis.INPUT_OUTPUT_FEATURE}",
)
projection = einops.einsum(
scalar_projections,
normalized_weight,
f"... {Axis.INPUT_OUTPUT_FEATURE}, \
... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE} \
-> ... {Axis.LEARNT_FEATURE} {Axis.INPUT_OUTPUT_FEATURE}",
)
# Subtracting the parallel component from the gradient leaves only the component that is
# orthogonal to the dictionary vectors, i.e. the component that moves around the surface of
# the hypersphere.
return grad - projection
def forward(
self, x: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)]
) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]:
"""Forward pass.
Args:
x: Input tensor.
Returns:
Output of the forward pass.
"""
return einops.einsum(
x,
self.weight,
f"{Axis.BATCH} ... {Axis.LEARNT_FEATURE}, \
... {Axis.INPUT_OUTPUT_FEATURE} {Axis.LEARNT_FEATURE} \
-> {Axis.BATCH} ... {Axis.INPUT_OUTPUT_FEATURE}",
)
def extra_repr(self) -> str:
"""String extra representation of the module."""
return (
f"learnt_features={self._learnt_features}, "
f"decoded_features={self._decoded_features}, "
f"n_components={self._n_components}"
)
# Path: sparse_autoencoder/autoencoder/types.py
class ResetOptimizerParameterDetails(NamedTuple):
"""Reset Optimizer Parameter Details.
Details of a parameter that should be reset in the optimizer, when resetting
it's corresponding dictionary vectors.
"""
parameter: Parameter
"""Parameter to reset."""
axis: int
"""Axis of the parameter to reset."""
# Path: sparse_autoencoder/tensor_types.py
class Axis(LowercaseStrEnum):
"""Tensor axis names.
Used to annotate tensor types.
Example:
When used directly it prints a string:
>>> print(Axis.INPUT_OUTPUT_FEATURE)
input_output_feature
The primary use is to annotate tensor types:
>>> from jaxtyping import Float
>>> from torch import Tensor
>>> from typing import TypeAlias
>>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]
>>> print(batch)
<class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>
You can also join multiple axis together to represent the dimensions of a tensor:
>>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))
batch input_output_feature
"""
# Component idx
COMPONENT = auto()
"""Component index."""
COMPONENT_OPTIONAL = "*component"
"""Optional component index."""
# Batches
SOURCE_DATA_BATCH = auto()
"""Batch of prompts used to generate source model activations."""
BATCH = auto()
"""Batch of items that the SAE is being trained on."""
STORE_BATCH = auto()
"""Batch of items to be written to the store."""
ITEMS = auto()
"""Arbitrary number of items."""
# Features
INPUT_OUTPUT_FEATURE = auto()
"""Input or output feature (e.g. feature in activation vector from source model)."""
LEARNT_FEATURE = auto()
"""Learn feature (e.g. feature in learnt activation vector)."""
DEAD_FEATURE = auto()
"""Dead feature."""
ALIVE_FEATURE = auto()
"""Alive feature."""
# Feature indices
INPUT_OUTPUT_FEATURE_IDX = auto()
"""Input or output feature index."""
LEARNT_FEATURE_IDX = auto()
"""Learn feature index."""
# Other
POSITION = auto()
"""Token position."""
SINGLE_ITEM = ""
"""Single item axis."""
ANY = "..."
"""Any number of axis."""
@staticmethod
def names(*axis: "Axis") -> str:
"""Join multiple axis together, to represent the dimensions of a tensor.
Example:
>>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))
batch input_output_feature
Args:
*axis: Axis to join.
Returns:
Joined axis string.
"""
return " ".join(a.value for a in axis)
# Path: sparse_autoencoder/utils/tensor_shape.py
def shape_with_optional_dimensions(*shape: int | None) -> tuple[int, ...]:
"""Create a shape from a tuple of optional dimensions.
Motivation:
By default PyTorch tensor shapes will error if you set an axis to `None`. This allows
you to set that size and then the resulting output simply removes that axis.
Examples:
>>> shape_with_optional_dimensions(1, 2, 3)
(1, 2, 3)
>>> shape_with_optional_dimensions(1, None, 3)
(1, 3)
>>> shape_with_optional_dimensions(1, None, None)
(1,)
>>> shape_with_optional_dimensions(None, None, None)
()
Args:
*shape: Axis sizes, with `None` representing an optional axis.
Returns:
Axis sizes.
"""
return tuple(dimension for dimension in shape if dimension is not None)
# Path: sparse_autoencoder/autoencoder/model.py
from pathlib import Path
from tempfile import gettempdir
from typing import NamedTuple
from huggingface_hub import HfApi, hf_hub_download
from jaxtyping import Float
from pydantic import (
BaseModel,
DirectoryPath,
NonNegativeInt,
PositiveInt,
validate_call,
)
from torch import Tensor
from torch.nn import Module, Parameter
from torch.serialization import FILE_LIKE
from sparse_autoencoder.autoencoder.components.linear_encoder import LinearEncoder
from sparse_autoencoder.autoencoder.components.tied_bias import TiedBias, TiedBiasPosition
from sparse_autoencoder.autoencoder.components.unit_norm_decoder import UnitNormDecoder
from sparse_autoencoder.autoencoder.types import ResetOptimizerParameterDetails
from sparse_autoencoder.tensor_types import Axis
from sparse_autoencoder.utils.tensor_shape import shape_with_optional_dimensions
import torch
import wandb
"""The Sparse Autoencoder Model."""
class SparseAutoencoderConfig(BaseModel, frozen=True):
"""SAE model config."""
n_input_features: PositiveInt
"""Number of input features.
E.g. `d_mlp` if training on MLP activations from TransformerLens).
"""
n_learned_features: PositiveInt
"""Number of learned features.
The initial paper experimented with 1 to 256 times the number of input features, and primarily
used a multiple of 8."""
| n_components: PositiveInt | None = None |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: LeapLabTHU/FamO2O
# Path: jax_cql/JaxCQL/sac.py
class SAC(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.discount = 0.99
config.alpha_multiplier = 1.0
config.use_automatic_entropy_tuning = True
config.backup_entropy = False
config.target_entropy = 0.0
config.policy_lr = 3e-4
config.qf_lr = 3e-4
config.optimizer_type = 'adam'
config.soft_target_update_rate = 5e-3
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, policy, qf):
self.config = self.get_default_config(config)
self.policy = policy
self.qf = qf
self.observation_dim = policy.observation_dim
self.action_dim = policy.action_dim
self._train_states = {}
optimizer_class = {
'adam': optax.adam,
'sgd': optax.sgd,
}[self.config.optimizer_type]
policy_params = self.policy.init(
next_rng(self.policy.rng_keys()),
jnp.zeros((10, self.observation_dim))
)
self._train_states['policy'] = TrainState.create(
params=policy_params,
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
qf1_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf1'] = TrainState.create(
params=qf1_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
qf2_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf2'] = TrainState.create(
params=qf2_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})
model_keys = ['policy', 'qf1', 'qf2']
if self.config.use_automatic_entropy_tuning:
self.log_alpha = Scalar(0.0)
self._train_states['log_alpha'] = TrainState.create(
params=self.log_alpha.init(next_rng()),
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
model_keys.append('log_alpha')
self._model_keys = tuple(model_keys)
self._total_steps = 0
def train(self, batch):
self._total_steps += 1
self._train_states, self._target_qf_params, metrics = self._train_step(
self._train_states, self._target_qf_params, next_rng(), batch
)
return metrics
@partial(jax.jit, static_argnames='self')
def _train_step(self, train_states, target_qf_params, rng, batch):
rng_generator = JaxRNG(rng)
def loss_fn(train_params, rng):
observations = batch['observations']
actions = batch['actions']
rewards = batch['rewards']
next_observations = batch['next_observations']
dones = batch['dones']
loss_collection = {}
@wrap_function_with_rng(rng_generator())
def forward_policy(rng, *args, **kwargs):
return self.policy.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.policy.rng_keys())
)
@wrap_function_with_rng(rng_generator())
def forward_qf(rng, *args, **kwargs):
return self.qf.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.qf.rng_keys())
)
new_actions, log_pi = forward_policy(train_params['policy'], observations)
if self.config.use_automatic_entropy_tuning:
alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()
loss_collection['log_alpha'] = alpha_loss
alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier
else:
alpha_loss = 0.0
alpha = self.config.alpha_multiplier
""" Policy loss """
q_new_actions = jnp.minimum(
forward_qf(train_params['qf1'], observations, new_actions),
forward_qf(train_params['qf2'], observations, new_actions),
)
policy_loss = (alpha*log_pi - q_new_actions).mean()
loss_collection['policy'] = policy_loss
""" Q function loss """
q1_pred = forward_qf(train_params['qf1'], observations, actions)
q2_pred = forward_qf(train_params['qf2'], observations, actions)
new_next_actions, next_log_pi = forward_policy(train_params['policy'], next_observations)
target_q_values = jnp.minimum(
forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),
forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),
)
if self.config.backup_entropy:
target_q_values = target_q_values - alpha * next_log_pi
q_target = jax.lax.stop_gradient(
rewards + (1. - dones) * self.config.discount * target_q_values
)
qf1_loss = mse_loss(q1_pred, q_target)
qf2_loss = mse_loss(q2_pred, q_target)
loss_collection['qf1'] = qf1_loss
loss_collection['qf2'] = qf2_loss
return tuple(loss_collection[key] for key in self.model_keys), locals()
train_params = {key: train_states[key].params for key in self.model_keys}
(_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)
new_train_states = {
key: train_states[key].apply_gradients(grads=grads[i][key])
for i, key in enumerate(self.model_keys)
}
new_target_qf_params = {}
new_target_qf_params['qf1'] = update_target_network(
new_train_states['qf1'].params, target_qf_params['qf1'],
self.config.soft_target_update_rate
)
new_target_qf_params['qf2'] = update_target_network(
new_train_states['qf2'].params, target_qf_params['qf2'],
self.config.soft_target_update_rate
)
metrics = collect_jax_metrics(
aux_values,
['log_pi', 'policy_loss', 'qf1_loss', 'qf2_loss', 'alpha_loss',
'alpha', 'q1_pred', 'q2_pred', 'target_q_values']
)
return new_train_states, new_target_qf_params, metrics
@property
def model_keys(self):
return self._model_keys
@property
def train_states(self):
return self._train_states
@property
def train_params(self):
return {key: self.train_states[key].params for key in self.model_keys}
@property
def total_steps(self):
return self._total_steps
# Path: jax_cql/JaxCQL/replay_buffer.py
class ReplayBuffer(object):
def __init__(self, max_size, reward_scale, reward_bias, data=None):
self._max_size = max_size
self._next_idx = 0
self._size = 0
self._initialized = False
self._total_steps = 0
self.reward_scale = reward_scale
self.reward_bias = reward_bias
if data is not None:
if self._max_size < data['observations'].shape[0]:
self._max_size = data['observations'].shape[0]
self.add_batch(data)
def __len__(self):
return self._size
def clear(self):
self._size = 0
self._next_idx = 0
self._total_steps = 0
def _init_storage(self, observation_dim, action_dim):
self._observation_dim = observation_dim
self._action_dim = action_dim
self._observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)
self._next_observations = np.zeros((self._max_size, observation_dim), dtype=np.float32)
self._actions = np.zeros((self._max_size, action_dim), dtype=np.float32)
self._rewards = np.zeros(self._max_size, dtype=np.float32)
self._dones = np.zeros(self._max_size, dtype=np.float32)
self._next_idx = 0
self._size = 0
self._initialized = True
def add_sample(self, observation, action, reward, next_observation, done):
if not self._initialized:
self._init_storage(observation.size, action.size)
# newly add
reward = self.reward_scale * reward + self.reward_bias
self._observations[self._next_idx, :] = np.array(observation, dtype=np.float32)
self._next_observations[self._next_idx, :] = np.array(next_observation, dtype=np.float32)
self._actions[self._next_idx, :] = np.array(action, dtype=np.float32)
self._rewards[self._next_idx] = reward
self._dones[self._next_idx] = float(done)
if self._size < self._max_size:
self._size += 1
self._next_idx = (self._next_idx + 1) % self._max_size
self._total_steps += 1
def add_traj(self, observations, actions, rewards, next_observations, dones):
for o, a, r, no, d in zip(observations, actions, rewards, next_observations, dones):
self.add_sample(o, a, r, no, d)
def add_batch(self, batch):
self.add_traj(
batch['observations'], batch['actions'], batch['rewards'],
batch['next_observations'], batch['dones']
)
def sample(self, batch_size):
indices = np.random.randint(len(self), size=batch_size)
return self.select(indices)
def select(self, indices):
return dict(
observations=self._observations[indices, ...],
actions=self._actions[indices, ...],
rewards=self._rewards[indices, ...],
next_observations=self._next_observations[indices, ...],
dones=self._dones[indices, ...],
)
def generator(self, batch_size, n_batchs=None):
i = 0
while n_batchs is None or i < n_batchs:
yield self.sample(batch_size)
i += 1
@property
def total_steps(self):
return self._total_steps
@property
def data(self):
return dict(
observations=self._observations[:self._size, ...],
actions=self._actions[:self._size, ...],
rewards=self._rewards[:self._size, ...],
next_observations=self._next_observations[:self._size, ...],
dones=self._dones[:self._size, ...]
)
# Path: jax_cql/JaxCQL/jax_utils.py
@jax.jit
def batch_to_jax(batch):
return jax.tree_util.tree_map(jax.device_put, batch)
# Path: jax_cql/JaxCQL/model.py
class TanhGaussianPolicy(nn.Module):
observation_dim: int
action_dim: int
arch: str = '256-256'
orthogonal_init: bool = False
log_std_multiplier: float = 1.0
log_std_offset: float = -1.0
def setup(self):
self.base_network = FullyConnectedNetwork(
output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init
)
self.log_std_multiplier_module = Scalar(self.log_std_multiplier)
self.log_std_offset_module = Scalar(self.log_std_offset)
def log_prob(self, observations, actions):
if actions.ndim == 3:
observations = extend_and_repeat(observations, 1, actions.shape[1])
base_network_output = self.base_network(observations)
mean, log_std = jnp.split(base_network_output, 2, axis=-1)
log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()
log_std = jnp.clip(log_std, -20.0, 2.0)
action_distribution = distrax.Transformed(
distrax.MultivariateNormalDiag(mean, jnp.exp(log_std)),
distrax.Block(distrax.Tanh(), ndims=1)
)
return action_distribution.log_prob(actions)
def __call__(self, observations, deterministic=False, repeat=None):
if repeat is not None:
observations = extend_and_repeat(observations, 1, repeat)
base_network_output = self.base_network(observations)
mean, log_std = jnp.split(base_network_output, 2, axis=-1)
log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()
log_std = jnp.clip(log_std, -20.0, 2.0)
action_distribution = distrax.Transformed(
distrax.MultivariateNormalDiag(mean, jnp.exp(log_std)),
distrax.Block(distrax.Tanh(), ndims=1)
)
if deterministic:
samples = jnp.tanh(mean)
log_prob = action_distribution.log_prob(samples)
else:
samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))
return samples, log_prob
@nn.nowrap
def rng_keys(self):
return ('params', 'noise')
# Path: jax_cql/JaxCQL/model.py
class FullyConnectedQFunction(nn.Module):
observation_dim: int
action_dim: int
arch: str = '256-256'
orthogonal_init: bool = False
@nn.compact
@multiple_action_q_function
def __call__(self, observations, actions):
x = jnp.concatenate([observations, actions], axis=-1)
x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)
return jnp.squeeze(x, -1)
@nn.nowrap
def rng_keys(self):
return ('params',)
# Path: jax_cql/JaxCQL/model.py
class SamplerPolicy(object):
def __init__(self, policy, params):
self.policy = policy
self.params = params
def update_params(self, params):
self.params = params
return self
@partial(jax.jit, static_argnames=('self', 'deterministic'))
def act(self, params, rng, observations, deterministic):
return self.policy.apply(params, observations, deterministic, repeat=None,
rngs=JaxRNG(rng)(self.policy.rng_keys()), )
def __call__(self, observations, deterministic=False):
actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)
assert jnp.all(jnp.isfinite(actions))
return jax.device_get(actions)
# Path: jax_cql/JaxCQL/sampler.py
class StepSampler(object):
def __init__(self, env, max_traj_length=1000):
self.max_traj_length = max_traj_length
self._env = env
self._traj_steps = 0
self._current_observation = self.env.reset()
def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):
observations = []
actions = []
rewards = []
next_observations = []
dones = []
for _ in range(n_steps):
self._traj_steps += 1
observation = self._current_observation
action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)
next_observation, reward, done, _ = self.env.step(action)
observations.append(observation)
actions.append(action)
rewards.append(reward)
dones.append(done)
next_observations.append(next_observation)
if replay_buffer is not None:
replay_buffer.add_sample(
observation, action, reward, next_observation, done
)
self._current_observation = next_observation
if done or self._traj_steps >= self.max_traj_length:
self._traj_steps = 0
self._current_observation = self.env.reset()
return dict(
observations=np.array(observations, dtype=np.float32),
actions=np.array(actions, dtype=np.float32),
rewards=np.array(rewards, dtype=np.float32),
next_observations=np.array(next_observations, dtype=np.float32),
dones=np.array(dones, dtype=np.float32),
)
@property
def env(self):
return self._env
# Path: jax_cql/JaxCQL/sampler.py
class TrajSampler(object):
def __init__(self, env, max_traj_length=1000):
self.max_traj_length = max_traj_length
self._env = env
def sample(self, policy, n_trajs, deterministic=False, replay_buffer=None):
trajs = []
for _ in range(n_trajs):
observations = []
actions = []
rewards = []
next_observations = []
dones = []
observation = self.env.reset()
for _ in range(self.max_traj_length):
action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)
next_observation, reward, done, _ = self.env.step(action)
observations.append(observation)
actions.append(action)
rewards.append(reward)
dones.append(done)
next_observations.append(next_observation)
if replay_buffer is not None:
replay_buffer.add_sample(
observation, action, reward, next_observation, done
)
observation = next_observation
if done:
break
trajs.append(dict(
observations=np.array(observations, dtype=np.float32),
actions=np.array(actions, dtype=np.float32),
rewards=np.array(rewards, dtype=np.float32),
next_observations=np.array(next_observations, dtype=np.float32),
dones=np.array(dones, dtype=np.float32),
))
return trajs
@property
def env(self):
return self._env
# Path: jax_cql/JaxCQL/utils.py
class Timer(object):
def __init__(self):
self._time = None
def __enter__(self):
self._start_time = time.time()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self._time = time.time() - self._start_time
def __call__(self):
return self._time
# Path: jax_cql/JaxCQL/utils.py
def define_flags_with_default(**kwargs):
for key, val in kwargs.items():
if isinstance(val, ConfigDict):
config_flags.DEFINE_config_dict(key, val)
elif isinstance(val, bool):
# Note that True and False are instances of int.
absl.flags.DEFINE_bool(key, val, 'automatically defined flag')
elif isinstance(val, int):
absl.flags.DEFINE_integer(key, val, 'automatically defined flag')
elif isinstance(val, float):
absl.flags.DEFINE_float(key, val, 'automatically defined flag')
elif isinstance(val, str):
absl.flags.DEFINE_string(key, val, 'automatically defined flag')
else:
raise ValueError('Incorrect value type')
return kwargs
# Path: jax_cql/JaxCQL/utils.py
def set_random_seed(seed):
np.random.seed(seed)
random.seed(seed)
init_rng(seed)
# Path: jax_cql/JaxCQL/utils.py
def print_flags(flags, flags_def):
logging.info(
'Running training with hyperparameters: \n{}'.format(
pprint.pformat(
['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]
)
)
)
# Path: jax_cql/JaxCQL/utils.py
def get_user_flags(flags, flags_def):
output = {}
for key in flags_def:
val = getattr(flags, key)
if isinstance(val, ConfigDict):
output.update(flatten_config_dict(val, prefix=key))
else:
output[key] = val
return output
# Path: jax_cql/JaxCQL/utils.py
def prefix_metrics(metrics, prefix):
return {
'{}/{}'.format(prefix, key): value for key, value in metrics.items()
}
# Path: jax_cql/JaxCQL/utils.py
class WandBLogger(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.online = False
config.prefix = 'FamilyJaxCQL'
config.project = 'sac'
config.output_dir = '/tmp/FamilyJaxCQL'
config.random_delay = 0.0
config.experiment_id = config_dict.placeholder(str)
config.anonymous = config_dict.placeholder(str)
config.notes = config_dict.placeholder(str)
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, variant):
self.config = self.get_default_config(config)
if self.config.experiment_id is None:
self.config.experiment_id = uuid.uuid4().hex
if self.config.prefix != '':
self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)
if self.config.output_dir == '':
self.config.output_dir = tempfile.mkdtemp()
else:
self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)
os.makedirs(self.config.output_dir, exist_ok=True)
self._variant = copy(variant)
if 'hostname' not in self._variant:
self._variant['hostname'] = gethostname()
if self.config.random_delay > 0:
time.sleep(np.random.uniform(0, self.config.random_delay))
self.run = wandb.init(
reinit=True,
config=self._variant,
project=self.config.project,
dir=self.config.output_dir,
id=self.config.experiment_id,
anonymous=self.config.anonymous,
notes=self.config.notes,
settings=wandb.Settings(
start_method="thread",
_disable_stats=True,
),
mode='online' if self.config.online else 'offline',
)
def log(self, *args, **kwargs):
self.run.log(*args, **kwargs)
def save_pickle(self, obj, filename):
with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:
pickle.dump(obj, fout)
@property
def experiment_id(self):
return self.config.experiment_id
@property
def variant(self):
return self.config.variant
@property
def output_dir(self):
return self.config.output_dir
# Path: jax_cql/JaxCQL/sac_main.py
import os
import time
import uuid
import numpy as np
import pprint
import gym
import jax
import jax.numpy as jnp
import flax
import absl.app
import absl.flags
from copy import deepcopy
from .sac import SAC
from .replay_buffer import ReplayBuffer
from .jax_utils import batch_to_jax
from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy
from .sampler import StepSampler, TrajSampler
from .utils import (
Timer, define_flags_with_default, set_random_seed, print_flags,
get_user_flags, prefix_metrics, WandBLogger
)
from viskit.logging import logger, setup_logger
FLAGS_DEF = define_flags_with_default(
env='HalfCheetah-v2',
max_traj_length=1000,
replay_buffer_size=1000000,
seed=42,
save_model=False,
policy_arch='256-256',
qf_arch='256-256',
orthogonal_init=False,
policy_log_std_multiplier=1.0,
policy_log_std_offset=-1.0,
n_epochs=2000,
n_env_steps_per_epoch=1000,
n_train_step_per_epoch=1000,
eval_period=10,
eval_n_trajs=5,
batch_size=256,
sac=SAC.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
| FLAGS = absl.flags.FLAGS |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: DAMO-NLP-SG/CLEX
# Path: CLEX/phi2/configuration_phi2_clex.py
class CLEXPhiConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PhiModel`]. It is used to instantiate an Phi
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Phi
[microsoft/phi-1](https://huggingface.co/microsoft/phi-1).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 51200):
Vocabulary size of the Phi model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PhiModel`].
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details checkout [this
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
`num_attention_heads`.
resid_pdrop (`float`, *optional*, defaults to 0.0):
Dropout probability for mlp outputs.
embd_pdrop (`int`, *optional*, defaults to 0.0):
The dropout ratio for the embeddings.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio after computing the attention scores.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu_new"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Phi-1 and Phi-1.5 supports up to 2048
tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format
is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
`max_position_embeddings` to the expected new maximum. See the following thread for more information on how
these scaling strategies behave:
https://www.reddit.com/r/LocalPersimmon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This
is an experimental feature, subject to breaking API changes in future versions.
partial_rotary_factor (`float`, *optional*, defaults to 0.5):
Percentage of the query and keys which will have rotary embedding.
qk_layernorm (`bool`, *optional*, defaults to `False`):
Whether or not to normalize the Queries and Keys after projecting the hidden states.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import PhiModel, PhiConfig
>>> # Initializing a Phi-1 style configuration
>>> configuration = PhiConfig.from_pretrained("microsoft/phi-1")
>>> # Initializing a model from the configuration
>>> model = PhiModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "phi"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=51200,
hidden_size=2048,
intermediate_size=8192,
num_hidden_layers=24,
num_attention_heads=32,
num_key_value_heads=None,
resid_pdrop=0.0,
embd_pdrop=0.0,
attention_dropout=0.0,
hidden_act="gelu_new",
max_position_embeddings=2048,
initializer_range=0.02,
layer_norm_eps=1e-5,
use_cache=True,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
partial_rotary_factor=0.5,
qk_layernorm=False,
bos_token_id=1,
eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attention_dropout = attention_dropout
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.partial_rotary_factor = partial_rotary_factor
self.qk_layernorm = qk_layernorm
self._rope_scaling_validation()
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation
def _rope_scaling_validation(self):
"""
Validate the `rope_scaling` configuration.
"""
if self.rope_scaling is None:
return
# if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
# raise ValueError(
# "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
# f"got {self.rope_scaling}"
# )
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_max_factor = self.rope_scaling.get("max_factor", None)
rope_scaling_param_factor = self.rope_scaling.get("param_factor", None)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic", "clex"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
)
# Path: CLEX/clex_layer.py
class LlamaCLEXScalingRotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, rope_scaling=None, base=1000000, device=None) -> None:
super().__init__()
self.max_t = rope_scaling["max_factor"]
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq)
self.proj_func = ODELinear(dim, rope_scaling["param_factor"], rope_scaling["act"])
self.rope_cached = None
self.max_t_cached = 0
self.freq_cached = None
self.time_dt = rope_scaling["time_dt"]
self.ode_args = {
"method": "rk4",
"options": {"step_size": self.time_dt},
}
def sample_random_times(self, max_t, device):
return torch.randint(1, max_t, (1,), dtype = torch.long, device=device)
def get_random_position_ids(self, n=2048, max=8192):
positions = torch.randperm(max)[:n].sort().values
return positions
def get_continuous_freq(self, time_grid, ex_positions, device):
solution = odeint(
self.proj_func, torch.log(self.inv_freq.to(device, dtype=torch.float32)), time_grid, **self.ode_args
)
if time_grid.size(0) == 2:
scale_inv_freq = torch.exp(solution[1])
freqs = torch.outer(ex_positions.float().squeeze(), scale_inv_freq)
else:
scale_inv_freq = torch.exp(solution)
return scale_inv_freq
embed = torch.cat((freqs,freqs), dim=-1)
return embed
def forward(self, input_embeds, seq_len, do_train=False):
device = self.proj_func.ode_up_proj.device
dtype = input_embeds.dtype
scale_factor = seq_len // self.max_position_embeddings
if do_train:
t_val = self.sample_random_times(self.max_t+1, device)[0]
if scale_factor < 1.0:
scale_factor = 1
sampled_position_ids = self.get_random_position_ids(n=seq_len-2, max=seq_len*t_val-2).float()
ex_positions = torch.cat([
torch.tensor([0]),
(sampled_position_ids + 1) / scale_factor,
torch.tensor([seq_len*t_val//scale_factor-1])]
).to(device, dtype=torch.float32)
else:
t_val = scale_factor if seq_len%self.max_position_embeddings == 0.0 else scale_factor + 1
t_val = t_val if t_val <= self.max_t else self.max_t
ex_positions = torch.arange(0, self.max_position_embeddings * t_val, dtype=torch.float32).to(device)
if t_val == 1.0:
scale_inv_freq = self.inv_freq.to(device)
freqs = torch.outer(ex_positions.float().squeeze(), scale_inv_freq)
embed = torch.cat((freqs,freqs), dim=-1)
cos, sin = embed.cos(), embed.sin()
elif do_train:
time_grid = torch.tensor([1.0, t_val]).float().to(device)
embed = self.get_continuous_freq(time_grid, ex_positions, device)
cos, sin = embed.cos(), embed.sin()
else:
if self.freq_cached is None:
time_grid = torch.arange(1.0, self.max_t+1.0, dtype=torch.float32).to(device)
self.freq_cached = self.get_continuous_freq(time_grid, ex_positions, device)
if t_val != self.max_t_cached:
scale_inv_freq = self.freq_cached[int(t_val-1.0)]
freqs = torch.outer(ex_positions.float().squeeze(), scale_inv_freq)
embed = torch.cat((freqs,freqs), dim=-1)
self.rope_cached = torch.cat((embed.cos()[None, :, :], embed.sin()[None, :, :]), dim=0)
self.max_t_cached = t_val
cos, sin = self.rope_cached
return torch.cat(
(cos[None, :seq_len].to(dtype=dtype),
sin[None, :seq_len].to(dtype=dtype)),
dim=0
)
# Path: CLEX/phi2/modeling_phi2_clex.py
import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from typing import List, Optional, Tuple, Union
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.cache_utils import Cache, DynamicCache
from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
is_flash_attn_2_available,
is_flash_attn_greater_or_equal_2_10,
logging,
replace_return_docstrings,
)
from .configuration_phi2_clex import CLEXPhiConfig
from ..clex_layer import LlamaCLEXScalingRotaryEmbedding
from flash_attn import flash_attn_func, flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
next_cache = None
if use_cache:
next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
if not return_dict:
return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
class PhiForCausalLM(PhiPreTrainedModel):
_tied_weights_keys = ["lm_head.weight"]
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.__init__ with Llama->Phi,bias=False->bias=True
def __init__(self, config):
super().__init__(config)
self.model = PhiModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=True)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_input_embeddings
def get_input_embeddings(self):
return self.model.embed_tokens
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_input_embeddings
def set_input_embeddings(self, value):
self.model.embed_tokens = value
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_output_embeddings
def get_output_embeddings(self):
return self.lm_head
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_output_embeddings
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.set_decoder
def set_decoder(self, decoder):
self.model = decoder
# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM.get_decoder
def get_decoder(self):
return self.model
@add_start_docstrings_to_model_forward(PHI_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, PhiForCausalLM
>>> model = PhiForCausalLM.from_pretrained("microsoft/phi-1")
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1")
>>> prompt = "This is an example script ."
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'This is an example script .\n\n\n\nfrom typing import List\n\ndef find_most_common_letter(words: List[str'
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
logits = logits.float()
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
| loss_fct = CrossEntropyLoss() |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: RenShuhuai-Andy/TESTA
# Path: models/timesformer/models/features.py
class FeatureListNet(FeatureDictNet):
""" Feature extractor with list return
See docstring for FeatureDictNet above, this class exists only to appease Torchscript typing constraints.
In eager Python we could have returned List[Tensor] vs Dict[id, Tensor] based on a member bool.
"""
def __init__(
self, model,
out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False):
super(FeatureListNet, self).__init__(
model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat,
flatten_sequential=flatten_sequential)
def forward(self, x) -> (List[torch.Tensor]):
return list(self._collect(x).values())
# Path: models/timesformer/models/features.py
class FeatureDictNet(nn.ModuleDict):
""" Feature extractor with OrderedDict return
Wrap a model and extract features as specified by the out indices, the network is
partially re-built from contained modules.
There is a strong assumption that the modules have been registered into the model in the same
order as they are used. There should be no reuse of the same nn.Module more than once, including
trivial modules like `self.relu = nn.ReLU`.
Only submodules that are directly assigned to the model class (`model.feature1`) or at most
one Sequential container deep (`model.features.1`, with flatten_sequent=True) can be captured.
All Sequential containers that are directly assigned to the original model will have their
modules assigned to this module with the name `model.features.1` being changed to `model.features_1`
Arguments:
model (nn.Module): model from which we will extract the features
out_indices (tuple[int]): model output indices to extract features for
out_map (sequence): list or tuple specifying desired return id for each out index,
otherwise str(index) is used
feature_concat (bool): whether to concatenate intermediate features that are lists or tuples
vs select element [0]
flatten_sequential (bool): whether to flatten sequential modules assigned to model
"""
def __init__(
self, model,
out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False):
super(FeatureDictNet, self).__init__()
self.feature_info = _get_feature_info(model, out_indices)
self.concat = feature_concat
self.return_layers = {}
return_layers = _get_return_layers(self.feature_info, out_map)
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = set(return_layers.keys())
layers = OrderedDict()
for new_name, old_name, module in modules:
layers[new_name] = module
if old_name in remaining:
# return id has to be consistently str type for torchscript
self.return_layers[new_name] = str(return_layers[old_name])
remaining.remove(old_name)
if not remaining:
break
assert not remaining and len(self.return_layers) == len(return_layers), \
f'Return layers ({remaining}) are not present in model'
self.update(layers)
def _collect(self, x) -> (Dict[str, torch.Tensor]):
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_id = self.return_layers[name]
if isinstance(x, (tuple, list)):
# If model tap is a tuple or list, concat or select first element
# FIXME this may need to be more generic / flexible for some nets
out[out_id] = torch.cat(x, 1) if self.concat else x[0]
else:
out[out_id] = x
return out
def forward(self, x) -> Dict[str, torch.Tensor]:
return self._collect(x)
# Path: models/timesformer/models/features.py
class FeatureHookNet(nn.ModuleDict):
""" FeatureHookNet
Wrap a model and extract features specified by the out indices using forward/forward-pre hooks.
If `no_rewrite` is True, features are extracted via hooks without modifying the underlying
network in any way.
If `no_rewrite` is False, the model will be re-written as in the
FeatureList/FeatureDict case by folding first to second (Sequential only) level modules into this one.
FIXME this does not currently work with Torchscript, see FeatureHooks class
"""
def __init__(
self, model,
out_indices=(0, 1, 2, 3, 4), out_map=None, out_as_dict=False, no_rewrite=False,
feature_concat=False, flatten_sequential=False, default_hook_type='forward'):
super(FeatureHookNet, self).__init__()
assert not torch.jit.is_scripting()
self.feature_info = _get_feature_info(model, out_indices)
self.out_as_dict = out_as_dict
layers = OrderedDict()
hooks = []
if no_rewrite:
assert not flatten_sequential
if hasattr(model, 'reset_classifier'): # make sure classifier is removed?
model.reset_classifier(0)
layers['body'] = model
hooks.extend(self.feature_info.get_dicts())
else:
modules = _module_list(model, flatten_sequential=flatten_sequential)
remaining = {f['module']: f['hook_type'] if 'hook_type' in f else default_hook_type
for f in self.feature_info.get_dicts()}
for new_name, old_name, module in modules:
layers[new_name] = module
for fn, fm in module.named_modules(prefix=old_name):
if fn in remaining:
hooks.append(dict(module=fn, hook_type=remaining[fn]))
del remaining[fn]
if not remaining:
break
assert not remaining, f'Return layers ({remaining}) are not present in model'
self.update(layers)
self.hooks = FeatureHooks(hooks, model.named_modules(), out_map=out_map)
def forward(self, x):
for name, module in self.items():
x = module(x)
out = self.hooks.get_output(x.device)
return out if self.out_as_dict else list(out.values())
# Path: models/timesformer/models/conv2d_same.py
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
def forward(self, x):
return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# Path: models/timesformer/models/linear.py
class Linear(nn.Linear):
def forward(self, input: torch.Tensor) -> torch.Tensor:
if torch.jit.is_scripting():
bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None
return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias)
else:
return F.linear(input, self.weight, self.bias)
# Path: models/timesformer/models/helpers.py
import logging
import os
import math
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from collections import OrderedDict
from copy import deepcopy
from typing import Callable
from models.timesformer.models.features import FeatureListNet, FeatureDictNet, FeatureHookNet
from models.timesformer.models.conv2d_same import Conv2dSame
from models.timesformer.models.linear import Linear
conv1_name = cfg['first_conv']
_logger.info('Converting first conv (%s) pretrained weights from 3 to 1 channel' % conv1_name)
conv1_weight = state_dict[conv1_name + '.weight']
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I > 3:
assert conv1_weight.shape[1] % 3 == 0
# For models with space2depth stems
conv1_weight = conv1_weight.reshape(O, I // 3, 3, J, K)
conv1_weight = conv1_weight.sum(dim=2, keepdim=False)
else:
conv1_weight = conv1_weight.sum(dim=1, keepdim=True)
conv1_weight = conv1_weight.to(conv1_type)
state_dict[conv1_name + '.weight'] = conv1_weight
elif in_chans != 3:
conv1_name = cfg['first_conv']
conv1_weight = state_dict[conv1_name + '.weight']
conv1_type = conv1_weight.dtype
conv1_weight = conv1_weight.float()
O, I, J, K = conv1_weight.shape
if I != 3:
_logger.warning('Deleting first conv (%s) from pretrained weights.' % conv1_name)
del state_dict[conv1_name + '.weight']
strict = False
else:
_logger.info('Repeating first conv (%s) weights in channel dim.' % conv1_name)
repeat = int(math.ceil(in_chans / 3))
conv1_weight = conv1_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
conv1_weight *= (3 / float(in_chans))
conv1_weight = conv1_weight.to(conv1_type)
state_dict[conv1_name + '.weight'] = conv1_weight
classifier_name = cfg['classifier']
if num_classes == 1000 and cfg['num_classes'] == 1001:
# special case for imagenet trained models with extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[1:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[1:]
elif num_classes != state_dict[classifier_name + '.weight'].size(0):
#print('Removing the last fully connected layer due to dimensions mismatch ('+str(num_classes)+ ' != '+str(state_dict[classifier_name + '.weight'].size(0))+').', flush=True)
# completely discard fully connected for all other differences between pretrained and created model
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
## Resizing the positional embeddings in case they don't match
if num_patches + 1 != state_dict['pos_embed'].size(1):
pos_embed = state_dict['pos_embed']
cls_pos_embed = pos_embed[0,0,:].unsqueeze(0).unsqueeze(1)
other_pos_embed = pos_embed[0,1:,:].unsqueeze(0).transpose(1, 2)
new_pos_embed = F.interpolate(other_pos_embed, size=(num_patches), mode='nearest')
new_pos_embed = new_pos_embed.transpose(1, 2)
new_pos_embed = torch.cat((cls_pos_embed, new_pos_embed), 1)
state_dict['pos_embed'] = new_pos_embed
## Resizing time embeddings in case they don't match
if 'time_embed' in state_dict and num_frames != state_dict['time_embed'].size(1):
time_embed = state_dict['time_embed'].transpose(1, 2)
new_time_embed = F.interpolate(time_embed, size=(num_frames), mode='nearest')
state_dict['time_embed'] = new_time_embed.transpose(1, 2)
## Initializing temporal attention
if attention_type == 'divided_space_time':
new_state_dict = state_dict.copy()
for key in state_dict:
if 'blocks' in key and 'attn' in key:
new_key = key.replace('attn','temporal_attn')
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
if 'blocks' in key and 'norm1' in key:
new_key = key.replace('norm1','temporal_norm1')
if not new_key in state_dict:
new_state_dict[new_key] = state_dict[key]
else:
new_state_dict[new_key] = state_dict[new_key]
state_dict = new_state_dict
## Loading the weights
model.load_state_dict(state_dict, strict=False)
def extract_layer(model, layer):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
if not hasattr(model, 'module') and layer[0] == 'module':
layer = layer[1:]
for l in layer:
if hasattr(module, l):
if not l.isdigit():
module = getattr(module, l)
else:
module = module[int(l)]
else:
return module
return module
def set_layer(model, layer, val):
layer = layer.split('.')
module = model
if hasattr(model, 'module') and layer[0] != 'module':
module = model.module
lst_index = 0
module2 = module
for l in layer:
if hasattr(module2, l):
if not l.isdigit():
module2 = getattr(module2, l)
else:
module2 = module2[int(l)]
lst_index += 1
lst_index -= 1
| for l in layer[:lst_index]: |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: microsoft/MathOctopus
# Path: utils/data/data_utils.py
def create_prompt_dataset(local_rank,
data_path,
data_split,
output_path,
train_phase,
seed,
tokenizer,
max_seq_len,
end_of_conversation_token="<|endoftext|>",
sft_only_data_path=[],
reload=True):
"""
Creates the prompt dataset
"""
os.makedirs(output_path, exist_ok=True)
fname = "_".join(data_path)
sft_cache_key = "_".join(sft_only_data_path)
tokenizer_name = tokenizer.init_kwargs["name_or_path"].replace("/", "_")
fname = f"{fname}_split{data_split}_phase{train_phase}_seed{seed}_tokenizer{tokenizer_name}_seqlen{max_seq_len}_sft{sft_cache_key}"
fname = "_".join(fname.split("/"))
fname = hashlib.sha256(fname.encode()).hexdigest(
) # hash the file name to avoid too long file name
train_fname = f"{output_path}/traindata_{fname}.pt"
eval_fname = f"{output_path}/evaldata_{fname}.pt"
cache_found = os.path.isfile(train_fname) and os.path.isfile(eval_fname)
buf_create_cache = torch.ByteTensor([not cache_found]).cuda()
torch.distributed.all_reduce(buf_create_cache)
if local_rank <= 0 and (buf_create_cache.item() != 0 or reload):
if len(data_path) == 1: # Single dataset.
train_dataset, eval_dataset = create_dataset(
local_rank, data_path[0], data_split, output_path, train_phase,
seed, tokenizer, end_of_conversation_token, max_seq_len)
else: # Blending datasets.
train_datasets = []
eval_datasets = []
train_size = 0
eval_size = 0
for d_path in data_path:
train_dataset, eval_dataset = create_dataset(
local_rank, d_path, data_split, output_path, train_phase,
seed, tokenizer, end_of_conversation_token, max_seq_len)
train_datasets.append(train_dataset)
eval_datasets.append(eval_dataset)
train_size += len(train_dataset)
eval_size += len(eval_dataset)
train_dataset = ConcatDataset(train_datasets)
shuffle_idx = get_shuffle_idx(seed, train_size)
train_dataset = Subset(train_dataset, shuffle_idx.tolist())
eval_dataset = ConcatDataset(eval_datasets)
shuffle_idx = get_shuffle_idx(seed, eval_size)
eval_dataset = Subset(eval_dataset, shuffle_idx.tolist())
# Append the SFT-only dataset if it exists, and current phase is 1(SFT).
if train_phase == 1 and sft_only_data_path:
sft_train_datasets = []
sft_eval_datasets = []
sft_train_size = 0
sft_eval_size = 0
for sft_path in sft_only_data_path:
sft_train_dataset, sft_eval_dataset = create_dataset(
local_rank,
sft_path,
"10,0,0",
output_path,
train_phase,
seed,
tokenizer,
end_of_conversation_token,
max_seq_len,
)
sft_train_datasets.append(sft_train_dataset)
sft_eval_datasets.append(sft_eval_dataset)
sft_train_size += len(sft_train_dataset)
sft_eval_size += len(sft_eval_dataset)
if sft_train_datasets: # Check if sft_train_datasets is not empty
sft_train_dataset = ConcatDataset(sft_train_datasets)
train_dataset = ConcatDataset(
[train_dataset, sft_train_dataset])
shuffle_idx = get_shuffle_idx(seed, len(train_dataset))
train_dataset = Subset(train_dataset, shuffle_idx.tolist())
if sft_eval_datasets: # Check if sft_eval_datasets is not empty
sft_eval_dataset = ConcatDataset(sft_eval_datasets)
eval_dataset = ConcatDataset([eval_dataset, sft_eval_dataset])
shuffle_idx = get_shuffle_idx(seed, len(eval_dataset))
eval_dataset = Subset(eval_dataset, shuffle_idx.tolist())
torch.save(train_dataset, train_fname)
torch.save(eval_dataset, eval_fname)
torch.distributed.barrier()
return torch.load(train_fname), torch.load(eval_fname)
# Path: utils/utils.py
def print_rank_0(msg, rank=0):
if rank <= 0:
print(msg)
# Path: utils/utils.py
def to_device(batch, device):
output = {}
for k, v in batch.items():
try:
output[k] = v.to(device)
except:
output[k] = v
return output
# Path: utils/utils.py
def save_hf_format(model, tokenizer, args, sub_folder=""):
# used to save huggingface format, so we can use it for hf.from_pretrained
model_to_save = model.module if hasattr(model, 'module') else model
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
output_dir = os.path.join(args.output_dir, sub_folder)
os.makedirs(output_dir, exist_ok=True)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
save_dict = model_to_save.state_dict()
for key in list(save_dict.keys()):
if "lora" in key:
del save_dict[key]
torch.save(save_dict, output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
# Path: utils/utils.py
def set_random_seed(seed):
if seed is not None:
set_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Path: utils/utils.py
def get_all_reduce_mean(tensor):
torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)
tensor = tensor / torch.distributed.get_world_size()
return tensor
# Path: utils/utils.py
def get_optimizer_grouped_parameters(model,
weight_decay,
no_decay_name_list=[
"bias", "LayerNorm.weight"
]):
optimizer_grouped_parameters = [
{
"params": [
p for n, p in model.named_parameters()
if (not any(nd in n
for nd in no_decay_name_list) and p.requires_grad)
],
"weight_decay":
weight_decay,
},
{
"params": [
p for n, p in model.named_parameters()
if (any(nd in n
for nd in no_decay_name_list) and p.requires_grad)
],
"weight_decay":
0.0,
},
]
return optimizer_grouped_parameters
# Path: utils/utils.py
def save_zero_three_model(model_ema, tokenizer, global_rank, save_dir, zero_stage=0):
zero_stage_3 = (zero_stage == 3)
os.makedirs(save_dir, exist_ok=True)
WEIGHTS_NAME = "pytorch_model.bin"
CONFIG_NAME = 'config.json'
output_model_file = os.path.join(save_dir, WEIGHTS_NAME)
output_config_file = os.path.join(save_dir, CONFIG_NAME)
model_to_save = model_ema.module if hasattr(model_ema,
'module') else model_ema
if not zero_stage_3:
if global_rank == 0:
torch.save(model_to_save.state_dict(), output_model_file)
else:
output_state_dict = {}
for k, v in model_to_save.named_parameters():
if hasattr(v, 'ds_id'):
with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([v
]),
enabled=zero_stage_3):
v_p = v.data.cpu()
else:
v_p = v.cpu()
if global_rank == 0 and "lora" not in k:
output_state_dict[k] = v_p
if global_rank == 0:
torch.save(output_state_dict, output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_pretrained(save_dir)
del output_state_dict
# Path: utils/utils.py
def load_hf_tokenizer(model_name_or_path, fast_tokenizer=True):
# if os.path.exists(model_name_or_path):
# # Locally tokenizer loading has some issue, so we need to force download
# model_json = os.path.join(model_name_or_path, "config.json")
# if os.path.exists(model_json):
# model_json_file = json.load(open(model_json))
# model_name = model_json_file["_name_or_path"]
# # tokenizer = AutoTokenizer.from_pretrained(model_name,
# # fast_tokenizer=True)
# tokenizer = LlamaTokenizer.from_pretrained(model_name,
# padding_side = 'left',
# fast_tokenizer=True)
# print('i am loading here')
# else:
tokenizer = LlamaTokenizer.from_pretrained(model_name_or_path,
padding_side = 'left',
fast_tokenizer=True)
# tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,
# fast_tokenizer=True)
return tokenizer
# Path: utils/ds_utils.py
def get_train_ds_config(offload,
stage=2,
enable_hybrid_engine=False,
inference_tp_size=1,
release_inference_cache=False,
pin_parameters=True,
tp_gather_partition_size=8,
max_out_tokens=512):
device = "cpu" if offload else "none"
zero_opt_dict = {
"stage": stage,
"offload_param": {
"device": device
},
"offload_optimizer": {
"device": device
},
"stage3_param_persistence_threshold": 1e4,
"stage3_max_live_parameters": 3e7,
"stage3_prefetch_bucket_size": 3e7,
"memory_efficient_linear": False
}
return {
"train_batch_size": GLOBAL_BATCH_SIZE,
"train_micro_batch_size_per_gpu": MICRO_BATCH_SIZE,
"steps_per_print": 10,
"zero_optimization": zero_opt_dict,
"bf16": {
"enabled": True,
"loss_scale_window": 50,
"min_loss_scale": 1e-10,
},
"gradient_clipping": 1.0,
"prescale_gradients": False,
"wall_clock_breakdown": False,
"hybrid_engine": {
"enabled": enable_hybrid_engine,
"max_out_tokens": max_out_tokens,
"inference_tp_size": inference_tp_size,
"release_inference_cache": release_inference_cache,
"pin_parameters": pin_parameters,
"tp_gather_partition_size": tp_gather_partition_size,
}
}
# Path: utils/module/lora.py
def convert_linear_layer_to_lora(model,
part_module_name,
lora_dim=0,
lora_scaling=1,
lora_droppout=0):
repalce_name = []
for name, module in model.named_modules():
if isinstance(module, nn.Linear) and part_module_name in name:
repalce_name.append(name)
for name in repalce_name:
module = recursive_getattr(model, name)
tmp = LinearLayer_LoRA(
module.weight, lora_dim, lora_scaling, lora_droppout,
module.bias).to(module.weight.device).to(module.weight.dtype)
recursive_setattr(model, name, tmp)
return model
# Path: utils/module/lora.py
def convert_lora_to_linear_layer(model):
repalce_name = []
for name, module in model.named_modules():
if isinstance(module, LinearLayer_LoRA):
repalce_name.append(name)
for name in repalce_name:
module = recursive_getattr(model, name)
zero_stage_3 = hasattr(module.weight, 'ds_id')
with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([
module.weight, module.bias, module.lora_left_weight,
module.lora_right_weight
]),
modifier_rank=0,
enabled=zero_stage_3):
module.fuse_lora_weight()
return model
# Path: utils/module/lora.py
def only_optimize_lora_parameters(model):
# turn off the gradient of all the parameters except the LoRA parameters
for name, param in model.named_parameters():
if "lora_right_weight" in name or "lora_left_weight" in name:
param.requires_grad = True
else:
param.requires_grad = False
return model
# Path: utils/model/model_utils.py
def create_hf_model(model_class,
model_name_or_path,
tokenizer,
ds_config=None,
rlhf_training=False,
disable_dropout=False,
bf16 = True):
model_config = AutoConfig.from_pretrained(model_name_or_path)
if disable_dropout:
model_config.dropout = 0.0
# Note: dschf is defined in function scope to avoid global effects
# https://huggingface.co/docs/transformers/main_classes/deepspeed#nontrainer-deepspeed-integration
if ds_config is not None and ds_config["zero_optimization"]["stage"] == 3:
dschf = HfDeepSpeedConfig(ds_config)
else:
dschf = None
if rlhf_training:
# the weight loading is handled by create critic model
model = model_class.from_config(model_config)
else:
if not bf16:
model = model_class.from_pretrained(
model_name_or_path,
from_tf=bool(".ckpt" in model_name_or_path),
config=model_config)
else:
model = model_class.from_pretrained(
model_name_or_path,
from_tf=bool(".ckpt" in model_name_or_path),
config=model_config,
torch_dtype=torch.bfloat16)
model.config.end_token_id = tokenizer.eos_token_id
model.config.pad_token_id = model.config.eos_token_id
# model.resize_token_embeddings(len(tokenizer))
model.resize_token_embeddings(int(
8 *
math.ceil(len(tokenizer) / 8.0))) # make the vocab size multiple of 8
return model
# Path: step1_supervised_finetuning/main.py
import argparse
import os
import math
import sys
import torch
import transformers
import deepspeed
from torch.utils import tensorboard
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from typing import Optional, Dict, Sequence
from transformers import (
AutoModelForCausalLM,
SchedulerType,
default_data_collator,
get_scheduler,
LlamaTokenizer
)
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam
from utils.data.data_utils import create_prompt_dataset
from utils.utils import print_rank_0, to_device, save_hf_format, set_random_seed, get_all_reduce_mean, get_optimizer_grouped_parameters, save_zero_three_model, load_hf_tokenizer
from utils.ds_utils import get_train_ds_config
from utils.module.lora import convert_linear_layer_to_lora, convert_lora_to_linear_layer, only_optimize_lora_parameters
from utils.model.model_utils import create_hf_model
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# import matplotlib.pyplot as plt
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
def parse_args():
parser = argparse.ArgumentParser(
description=
| "Finetune a transformers model on a causal language modeling task") |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: OATML-Markslab/ProteinNPT
# Path: utils/esm/modules.py
class AxialTransformerLayer(nn.Module):
"""Implements an Axial MSA Transformer block."""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
max_tokens_per_msa: int = 2**14,
deactivate_col_attention: bool = False,
tranception_attention: bool = False,
num_targets: int = 1,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout_prob = dropout
self.deactivate_col_attention = deactivate_col_attention
row_self_attention = RowSelfAttention(
embedding_dim,
num_attention_heads,
dropout=dropout,
max_tokens_per_msa=max_tokens_per_msa,
tranception_attention=tranception_attention,
num_targets=num_targets,
)
if not self.deactivate_col_attention:
column_self_attention = ColumnSelfAttention(
embedding_dim,
num_attention_heads,
dropout=dropout,
max_tokens_per_msa=max_tokens_per_msa,
)
else:
print("No column attention in the underlying axial transformer module")
feed_forward_layer = FeedForwardNetwork(
embedding_dim,
ffn_embedding_dim,
activation_dropout=activation_dropout,
max_tokens_per_msa=max_tokens_per_msa,
)
self.row_self_attention = self.build_residual(row_self_attention)
if not self.deactivate_col_attention: self.column_self_attention = self.build_residual(column_self_attention)
self.feed_forward_layer = self.build_residual(feed_forward_layer)
def build_residual(self, layer: nn.Module):
return NormalizedResidualBlock(
layer,
self.embedding_dim,
self.dropout_prob,
)
def forward(
self,
x: torch.Tensor,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_head_weights: bool = False,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer implementation.
"""
x, row_attn = self.row_self_attention(
x,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
if not self.deactivate_col_attention:
x, column_attn = self.column_self_attention(
x,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
)
else:
column_attn = None
x = self.feed_forward_layer(x)
if need_head_weights:
return x, column_attn, row_attn
else:
return x
# Path: utils/esm/modules.py
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
if padding_idx is not None:
num_embeddings_ = num_embeddings + padding_idx + 1
else:
num_embeddings_ = num_embeddings
super().__init__(num_embeddings_, embedding_dim, padding_idx)
self.max_positions = num_embeddings
def forward(self, input: torch.Tensor):
"""Input is expected to be of size [bsz x seqlen]."""
if input.size(1) > self.max_positions:
raise ValueError(
f"Sequence length {input.size(1)} above maximum "
f" sequence length of {self.max_positions}"
)
mask = input.ne(self.padding_idx).int()
positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Path: utils/esm/modules.py
class RobertaLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, weight):
super().__init__()
self.dense = nn.Linear(embed_dim, embed_dim)
self.layer_norm = ESM1bLayerNorm(embed_dim)
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight) + self.bias
return x
# Path: utils/esm/modules.py
class ESM1bLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
# Path: utils/esm/axial_attention.py
class RowSelfAttention(nn.Module):
"""Compute self-attention over rows of a 2D input."""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
max_tokens_per_msa: int = 2 ** 16,
tranception_attention: bool = False,
num_targets: int = 1,
):
super().__init__()
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim ** -0.5
self.max_tokens_per_msa = max_tokens_per_msa
self.attn_shape = "hnij"
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim)
self.dropout_module = nn.Dropout(dropout)
self.tranception_attention = tranception_attention
self.num_targets = num_targets
if self.tranception_attention:
assert self.num_heads%4==0, "Invalid number of heads. Tranception requires the number of heads to be a multiple of 4."
self.num_heads_per_kernel_size = self.num_heads // 4
self.query_depthwiseconv = nn.ModuleDict()
self.key_depthwiseconv = nn.ModuleDict()
self.value_depthwiseconv = nn.ModuleDict()
for kernel_idx, kernel in enumerate([3,5,7]):
self.query_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel,self.num_targets)
self.key_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel,self.num_targets)
self.value_depthwiseconv[str(kernel_idx)] = SpatialDepthWiseConvolution(self.head_dim,kernel,self.num_targets)
def align_scaling(self, q):
num_rows = q.size(0)
return self.scaling / math.sqrt(num_rows)
def _batched_forward(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
):
num_rows, num_cols, batch_size, embed_dim = x.size()
max_rows = max(1, self.max_tokens_per_msa // num_cols)
attns = 0
scaling = self.align_scaling(x)
for start in range(0, num_rows, max_rows):
attn_weights = self.compute_attention_weights(
x[start : start + max_rows],
scaling,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask[:, start : start + max_rows]
if self_attn_padding_mask is not None
else None,
)
attns += attn_weights
attn_probs = attns.softmax(-1)
attn_probs = self.dropout_module(attn_probs)
outputs = []
for start in range(0, num_rows, max_rows):
output = self.compute_attention_update(x[start : start + max_rows], attn_probs)
outputs.append(output)
output = torch.cat(outputs, 0)
return output, attn_probs
def compute_attention_weights(
self,
x,
scaling: float,
self_attn_mask=None,
self_attn_padding_mask=None,
):
num_rows, num_cols, batch_size, embed_dim = x.size()
q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)
k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)
q *= scaling
if self_attn_padding_mask is not None:
# Zero out any padded aligned positions - this is important since
# we take a sum across the alignment axis.
q *= 1 - self_attn_padding_mask.permute(1, 2, 0).unsqueeze(3).unsqueeze(4).to(q)
if self.tranception_attention:
# We do not do anything on the first self.num_heads_per_kernel_size heads (kernel =1)
query_list=[q[:,:,:,:self.num_heads_per_kernel_size,:]]
key_list=[k[:,:,:,:self.num_heads_per_kernel_size,:]]
for kernel_idx in range(3):
query_list.append(self.query_depthwiseconv[str(kernel_idx)](q[:,:,:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:]))
key_list.append(self.key_depthwiseconv[str(kernel_idx)](k[:,:,:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:]))
q=torch.cat(query_list, dim=1)
k=torch.cat(key_list, dim=1)
attn_weights = torch.einsum(f"rinhd,rjnhd->{self.attn_shape}", q, k)
if self_attn_mask is not None:
raise NotImplementedError
# Mask Size: [B x R x C], Weights Size: [H x B x C x C]
if self_attn_padding_mask is not None:
attn_weights = attn_weights.masked_fill(
self_attn_padding_mask[:, 0].unsqueeze(0).unsqueeze(2),
-10000,
)
return attn_weights
def compute_attention_update(
self,
x,
attn_probs,
):
num_rows, num_cols, batch_size, embed_dim = x.size()
v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)
if self.tranception_attention:
value_list=[v[:,:,:,:self.num_heads_per_kernel_size,:]]
for kernel_idx in range(3):
value_list.append(self.value_depthwiseconv[str(kernel_idx)](v[:,:,:,(kernel_idx+1)*self.num_heads_per_kernel_size:(kernel_idx+2)*self.num_heads_per_kernel_size,:]))
v=torch.cat(value_list, dim=1)
context = torch.einsum(f"{self.attn_shape},rjnhd->rinhd", attn_probs, v)
context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)
output = self.out_proj(context)
return output
def forward(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
):
num_rows, num_cols, batch_size, embed_dim = x.size()
if (num_rows * num_cols > self.max_tokens_per_msa) and not torch.is_grad_enabled():
return self._batched_forward(x, self_attn_mask, self_attn_padding_mask)
else:
scaling = self.align_scaling(x)
attn_weights = self.compute_attention_weights(
x, scaling, self_attn_mask, self_attn_padding_mask
)
attn_probs = attn_weights.softmax(-1)
attn_probs = self.dropout_module(attn_probs)
output = self.compute_attention_update(x, attn_probs)
return output, attn_probs
# Path: utils/esm/axial_attention.py
class ColumnSelfAttention(nn.Module):
"""Compute self-attention over columns of a 2D input."""
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
max_tokens_per_msa: int = 2 ** 16,
):
super().__init__()
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim ** -0.5
self.max_tokens_per_msa = max_tokens_per_msa
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.out_proj = nn.Linear(embed_dim, embed_dim)
self.dropout_module = nn.Dropout(dropout)
def _batched_forward(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
):
num_rows, num_cols, batch_size, embed_dim = x.size()
max_cols = max(1, self.max_tokens_per_msa // num_rows)
outputs = []
attns = []
for start in range(0, num_cols, max_cols):
output, attn = self(
x[:, start : start + max_cols],
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask[:, :, start : start + max_cols]
if self_attn_padding_mask is not None
else None,
)
outputs.append(output)
attns.append(attn)
output = torch.cat(outputs, 1)
attns = torch.cat(attns, 1)
return output, attns
def compute_attention_update(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
):
num_rows, num_cols, batch_size, embed_dim = x.size()
if num_rows == 1:
# if there is only 1 position, this is equivalent and doesn't break with padding
attn_probs = torch.ones(
self.num_heads,
num_cols,
batch_size,
num_rows,
num_rows,
device=x.device,
dtype=x.dtype,
)
output = self.out_proj(self.v_proj(x))
else:
q = self.q_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)
k = self.k_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)
v = self.v_proj(x).view(num_rows, num_cols, batch_size, self.num_heads, self.head_dim)
q *= self.scaling
attn_weights = torch.einsum("icnhd,jcnhd->hcnij", q, k)
if self_attn_mask is not None:
raise NotImplementedError
if self_attn_padding_mask is not None:
attn_weights = attn_weights.masked_fill(
self_attn_padding_mask.permute(2, 0, 1).unsqueeze(0).unsqueeze(3),
-10000,
)
attn_probs = attn_weights.softmax(-1)
attn_probs = self.dropout_module(attn_probs)
context = torch.einsum("hcnij,jcnhd->icnhd", attn_probs, v)
context = context.contiguous().view(num_rows, num_cols, batch_size, embed_dim)
output = self.out_proj(context)
return output, attn_probs
def forward(
self,
x,
self_attn_mask=None,
self_attn_padding_mask=None,
):
num_rows, num_cols, batch_size, embed_dim = x.size()
# if False and num_rows * num_cols > 2 ** 14 and not torch.is_grad_enabled():
if (num_rows * num_cols) > self.max_tokens_per_msa and not torch.is_grad_enabled():
return self._batched_forward(
x,
self_attn_mask,
self_attn_padding_mask,
)
else:
return self.compute_attention_update(x, self_attn_mask, self_attn_padding_mask)
# Path: proteinnpt/model.py
import sys,os
import json
import torch
import torch.nn as nn
import utils
from collections import defaultdict
from torch.optim import AdamW
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import ConvBertConfig, ConvBertLayer
from utils.esm.modules import (
AxialTransformerLayer,
LearnedPositionalEmbedding,
RobertaLMHead,
ESM1bLayerNorm,
)
from utils.esm.axial_attention import RowSelfAttention, ColumnSelfAttention
class ProteinNPTModel(nn.Module):
def __init__(self, args, alphabet):
super().__init__()
self.args = args
self.alphabet = alphabet
self.alphabet_size = len(alphabet)
self.padding_idx = alphabet.padding_idx
self.mask_idx = alphabet.mask_idx
self.cls_idx = alphabet.cls_idx
self.eos_idx = alphabet.eos_idx
self.prepend_bos = alphabet.prepend_bos
self.append_eos = alphabet.append_eos
self.target_names_input = self.args.target_config.keys()
self.target_names = [x for x in self.args.target_config.keys() if self.args.target_config[x]["in_NPT_loss"]]
self.num_targets_input = len(self.target_names_input) #Includes all targets, incl. zero-shot fitness predictions
self.num_targets = len(self.target_names) #Number of actual targets we want to predict
self.MSA_sample_sequences = None
self.training_sample_sequences_indices = None
self.device = None
self.optimizer = None
self.model_type = args.model_type
self.PNPT_ensemble_test_num_seeds = -1
self.PNPT_no_reconstruction_error = False
self.deactivate_col_attention = False
self.tranception_attention = False
assert self.args.embed_dim % self.args.attention_heads ==0, "Embedding size {} needs to be a multiple of number of heads {}".format(self.args.embed_dim, self.args.attention_heads)
if self.args.aa_embeddings in ["MSA_Transformer","ESM1v"]:
model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)
self.aa_embedding = model
self.aa_embedding_dim = self.aa_embedding.embed_tokens.weight.shape[-1]
elif self.args.aa_embeddings == "Tranception":
self.aa_embedding_dim = 1280
config = json.load(open(args.embedding_model_location+os.sep+'config.json'))
config = utils.tranception.config.TranceptionConfig(**config)
config.tokenizer = self.alphabet
config.inference_time_retrieval_type = None
config.retrieval_aggregation_mode = None
self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)
elif self.args.aa_embeddings == "Linear_embedding":
self.aa_embedding = nn.Embedding(
self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx
)
self.aa_positions_embedding = LearnedPositionalEmbedding(
self.args.max_positions,
self.args.embed_dim,
self.padding_idx,
)
self.aa_embedding_dim = self.args.embed_dim
if self.aa_embedding_dim != self.args.embed_dim: #Need to project internally
self.token_embedding_projection = nn.Linear(
self.aa_embedding_dim,
self.args.embed_dim
)
self.token_embedding_expansion = nn.Linear(
self.args.embed_dim,
self.aa_embedding_dim
)
self.target_embedding = nn.ModuleDict(
{
target_name:
nn.Linear(
self.args.target_config[target_name]["dim"] + 1, #Need to add one as we append the mask flag to each input target
self.args.embed_dim
)
if self.args.target_config[target_name]["type"]=="continuous"
else
nn.Embedding(
self.args.target_config[target_name]["dim"],
self.args.embed_dim
)
for target_name in self.target_names_input
}
)
self.dropout_module = nn.Dropout(self.args.dropout)
self.layers = nn.ModuleList(
[
AxialTransformerLayer(
self.args.embed_dim,
self.args.ffn_embed_dim,
self.args.attention_heads,
self.args.dropout,
self.args.attention_dropout,
self.args.activation_dropout,
getattr(self.args, "max_tokens_per_msa", self.args.max_tokens_per_msa),
self.deactivate_col_attention,
self.tranception_attention,
self.num_targets_input,
)
for _ in range(self.args.num_protein_npt_layers)
]
)
self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)
self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)
if self.args.aa_embeddings in ["MSA_Transformer","ESM1v"]:
| weight = self.aa_embedding.embed_tokens.weight |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: CVHub520/yolov5_obb
# Path: DOTA_devkit/DOTA2JSON.py
def generate_json_labels(src_path, out_path, trainval=True):
"""Generate .json labels which is similar to coco format
Args:
src_path: dataset path containing images and labelTxt folders.
out_path: output json file path
trainval: trainval or test?
"""
img_path = os.path.join(src_path, 'images')
label_path = os.path.join(src_path, 'labelTxt')
img_lists = os.listdir(img_path)
data_dict = []
with open(out_path, 'w') as f:
for id, img in enumerate(img_lists):
img_info = {}
img_name = osp.splitext(img)[0]
label = os.path.join(label_path, img_name+'.txt')
img = Image.open(osp.join(img_path, img))
img_info['filename'] = img_name+'.png'
img_info['height'] = img.height
img_info['width'] = img.width
img_info['id'] = id
if(trainval == True):
if(os.path.exists(label) == False):
print('Label:'+img_name+'.txt'+' Not Exist')
else:
bboxes, labels, bboxes_ignore, labels_ignore = parse_ann_info(
img_path, label_path, img_name)
ann = {}
ann['bboxes'] = bboxes
ann['labels'] = labels
ann['bboxes_ignore'] = bboxes_ignore
ann['labels_ignore'] = labels_ignore
img_info['annotations'] = ann
data_dict.append(img_info)
json.dump(data_dict, f)
# Path: DOTA_devkit/ImgSplit_multi_process.py
class splitbase():
def __init__(self,
basepath,
outpath,
code='utf-8',
gap=512,
subsize=1024,
thresh=0.7,
choosebestpoint=True,
ext='.png',
padding=True,
num_process=8
):
"""
:param basepath: base path for dota data
:param outpath: output base path for dota data,
the basepath and outputpath have the similar subdirectory, 'images' and 'labelTxt'
:param code: encodeing format of txt file
:param gap: overlap between two patches
:param subsize: subsize of patch
:param thresh: the thresh determine whether to keep the instance if the instance is cut down in the process of split
:param choosebestpoint: used to choose the first point for the
:param ext: ext for the image format
:param padding: if to padding the images so that all the images have the same size
"""
self.basepath = basepath
self.outpath = outpath
self.code = code
self.gap = gap
self.subsize = subsize
self.slide = self.subsize - self.gap
self.thresh = thresh
self.imagepath = os.path.join(self.basepath, 'images')
self.labelpath = os.path.join(self.basepath, 'labelTxt')
self.outimagepath = os.path.join(self.outpath, 'images')
self.outlabelpath = os.path.join(self.outpath, 'labelTxt')
self.choosebestpoint = choosebestpoint
self.ext = ext
self.padding = padding
self.num_process = num_process
self.pool = Pool(num_process)
print('padding:', padding)
# pdb.set_trace()
if not os.path.isdir(self.outpath):
os.mkdir(self.outpath)
if not os.path.isdir(self.outimagepath):
# pdb.set_trace()
os.mkdir(self.outimagepath)
if not os.path.isdir(self.outlabelpath):
os.mkdir(self.outlabelpath)
# pdb.set_trace()
# point: (x, y), rec: (xmin, ymin, xmax, ymax)
# def __del__(self):
# self.f_sub.close()
# grid --> (x, y) position of grids
def polyorig2sub(self, left, up, poly):
polyInsub = np.zeros(len(poly))
for i in range(int(len(poly)/2)):
polyInsub[i * 2] = int(poly[i * 2] - left)
polyInsub[i * 2 + 1] = int(poly[i * 2 + 1] - up)
return polyInsub
def calchalf_iou(self, poly1, poly2):
"""
It is not the iou on usual, the iou is the value of intersection over poly1
"""
inter_poly = poly1.intersection(poly2)
inter_area = inter_poly.area
poly1_area = poly1.area
half_iou = inter_area / poly1_area
return inter_poly, half_iou
def saveimagepatches(self, img, subimgname, left, up):
subimg = copy.deepcopy(
img[up: (up + self.subsize), left: (left + self.subsize)])
outdir = os.path.join(self.outimagepath, subimgname + self.ext)
h, w, c = np.shape(subimg)
if (self.padding):
outimg = np.zeros((self.subsize, self.subsize, 3))
outimg[0:h, 0:w, :] = subimg
cv2.imwrite(outdir, outimg)
else:
cv2.imwrite(outdir, subimg)
def GetPoly4FromPoly5(self, poly):
distances = [cal_line_length((poly[i * 2], poly[i * 2 + 1]), (poly[(
i + 1) * 2], poly[(i + 1) * 2 + 1])) for i in range(int(len(poly)/2 - 1))]
distances.append(cal_line_length(
(poly[0], poly[1]), (poly[8], poly[9])))
pos = np.array(distances).argsort()[0]
count = 0
outpoly = []
while count < 5:
#print('count:', count)
if (count == pos):
outpoly.append(
(poly[count * 2] + poly[(count * 2 + 2) % 10])/2)
outpoly.append(
(poly[(count * 2 + 1) % 10] + poly[(count * 2 + 3) % 10])/2)
count = count + 1
elif (count == (pos + 1) % 5):
count = count + 1
continue
else:
outpoly.append(poly[count * 2])
outpoly.append(poly[count * 2 + 1])
count = count + 1
return outpoly
def savepatches(self, resizeimg, objects, subimgname, left, up, right, down):
outdir = os.path.join(self.outlabelpath, subimgname + '.txt')
mask_poly = []
imgpoly = shgeo.Polygon([(left, up), (right, up), (right, down),
(left, down)])
with codecs.open(outdir, 'w', self.code) as f_out:
for obj in objects:
gtpoly = shgeo.Polygon([(obj['poly'][0], obj['poly'][1]),
(obj['poly'][2], obj['poly'][3]),
(obj['poly'][4], obj['poly'][5]),
(obj['poly'][6], obj['poly'][7])])
if (gtpoly.area <= 0):
continue
inter_poly, half_iou = self.calchalf_iou(gtpoly, imgpoly)
# print('writing...')
if (half_iou == 1):
polyInsub = self.polyorig2sub(left, up, obj['poly'])
outline = ' '.join(list(map(str, polyInsub)))
outline = outline + ' ' + \
obj['name'] + ' ' + str(obj['difficult'])
f_out.write(outline + '\n')
elif (half_iou > 0):
# elif (half_iou > self.thresh):
# print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
inter_poly = shgeo.polygon.orient(inter_poly, sign=1)
out_poly = list(inter_poly.exterior.coords)[0: -1]
if len(out_poly) < 4:
continue
out_poly2 = []
for i in range(len(out_poly)):
out_poly2.append(out_poly[i][0])
out_poly2.append(out_poly[i][1])
if (len(out_poly) == 5):
# print('==========================')
out_poly2 = self.GetPoly4FromPoly5(out_poly2)
elif (len(out_poly) > 5):
"""
if the cut instance is a polygon with points more than 5, we do not handle it currently
"""
continue
if (self.choosebestpoint):
out_poly2 = choose_best_pointorder_fit_another(
out_poly2, obj['poly'])
polyInsub = self.polyorig2sub(left, up, out_poly2)
for index, item in enumerate(polyInsub):
if (item <= 1):
polyInsub[index] = 1
elif (item >= self.subsize):
polyInsub[index] = self.subsize
outline = ' '.join(list(map(str, polyInsub)))
if (half_iou > self.thresh):
outline = outline + ' ' + \
obj['name'] + ' ' + str(obj['difficult'])
else:
# if the left part is too small, label as '2'
outline = outline + ' ' + obj['name'] + ' ' + '2'
f_out.write(outline + '\n')
# else:
# mask_poly.append(inter_poly)
self.saveimagepatches(resizeimg, subimgname, left, up)
def SplitSingle(self, name, rate, extent):
"""
split a single image and ground truth
:param name: image name
:param rate: the resize scale for the image
:param extent: the image format
:return:
"""
img = cv2.imread(os.path.join(self.imagepath, name + extent))
if np.shape(img) == ():
return
fullname = os.path.join(self.labelpath, name + '.txt')
objects = util.parse_dota_poly2(fullname)
for obj in objects:
obj['poly'] = list(map(lambda x: rate*x, obj['poly']))
#obj['poly'] = list(map(lambda x: ([2 * y for y in x]), obj['poly']))
if (rate != 1):
resizeimg = cv2.resize(
img, None, fx=rate, fy=rate, interpolation=cv2.INTER_CUBIC)
else:
resizeimg = img
outbasename = name + '__' + str(rate) + '__'
weight = np.shape(resizeimg)[1]
height = np.shape(resizeimg)[0]
left, up = 0, 0
while (left < weight):
if (left + self.subsize >= weight):
left = max(weight - self.subsize, 0)
up = 0
while (up < height):
if (up + self.subsize >= height):
up = max(height - self.subsize, 0)
right = min(left + self.subsize, weight - 1)
down = min(up + self.subsize, height - 1)
subimgname = outbasename + str(left) + '___' + str(up)
# self.f_sub.write(name + ' ' + subimgname + ' ' + str(left) + ' ' + str(up) + '\n')
self.savepatches(resizeimg, objects,
subimgname, left, up, right, down)
if (up + self.subsize >= height):
break
else:
up = up + self.slide
if (left + self.subsize >= weight):
break
else:
left = left + self.slide
def splitdata(self, rate):
"""
:param rate: resize rate before cut
"""
imagelist = GetFileFromThisRootDir(self.imagepath)
imagenames = [util.custombasename(x) for x in imagelist if (
util.custombasename(x) != 'Thumbs')]
if self.num_process == 1:
for name in imagenames:
self.SplitSingle(name, rate, self.ext)
else:
# worker = partial(self.SplitSingle, rate=rate, extent=self.ext)
worker = partial(split_single_warp, split_base=self,
rate=rate, extent=self.ext)
self.pool.map(worker, imagenames)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def __setstate__(self, state):
self.__dict__.update(state)
# Path: DOTA_devkit/SplitOnlyImage_multi_process.py
class splitbase():
def __init__(self,
srcpath,
dstpath,
gap=100,
subsize=1024,
ext='.png',
padding=True,
num_process=32):
self.srcpath = srcpath
self.outpath = dstpath
self.gap = gap
self.subsize = subsize
self.slide = self.subsize - self.gap
self.srcpath = srcpath
self.dstpath = dstpath
self.ext = ext
self.padding = padding
self.pool = Pool(num_process)
if not os.path.isdir(self.outpath):
os.mkdir(self.outpath)
def saveimagepatches(self, img, subimgname, left, up, ext='.png'):
subimg = copy.deepcopy(
img[up: (up + self.subsize), left: (left + self.subsize)])
outdir = os.path.join(self.dstpath, subimgname + ext)
h, w, c = np.shape(subimg)
if (self.padding):
outimg = np.zeros((self.subsize, self.subsize, 3))
outimg[0:h, 0:w, :] = subimg
cv2.imwrite(outdir, outimg)
else:
cv2.imwrite(outdir, subimg)
def SplitSingle(self, name, rate, extent):
img = cv2.imread(os.path.join(self.srcpath, name + extent))
assert np.shape(img) != ()
if (rate != 1):
resizeimg = cv2.resize(
img, None, fx=rate, fy=rate, interpolation=cv2.INTER_CUBIC)
else:
resizeimg = img
outbasename = name + '__' + str(rate) + '__'
weight = np.shape(resizeimg)[1]
height = np.shape(resizeimg)[0]
# if (max(weight, height) < self.subsize/2):
# return
left, up = 0, 0
while (left < weight):
if (left + self.subsize >= weight):
left = max(weight - self.subsize, 0)
up = 0
while (up < height):
if (up + self.subsize >= height):
up = max(height - self.subsize, 0)
subimgname = outbasename + str(left) + '___' + str(up)
self.saveimagepatches(resizeimg, subimgname, left, up)
if (up + self.subsize >= height):
break
else:
up = up + self.slide
if (left + self.subsize >= weight):
break
else:
left = left + self.slide
def splitdata(self, rate):
imagelist = util.GetFileFromThisRootDir(self.srcpath)
imagenames = [util.custombasename(x) for x in imagelist if (
util.custombasename(x) != 'Thumbs')]
# worker = partial(self.SplitSingle, rate=rate, extent=self.ext)
worker = partial(split_single_warp, split_base=self,
rate=rate, extent=self.ext)
self.pool.map(worker, imagenames)
#
# for name in imagenames:
# self.SplitSingle(name, rate, self.ext)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def __setstate__(self, state):
self.__dict__.update(state)
# Path: DOTA_devkit/prepare_dota1_ms.py
import os
import os.path as osp
from DOTA_devkit.DOTA2JSON import generate_json_labels
from DOTA_devkit.DOTA2COCO_poly import DOTA2COCOTrain, DOTA2COCOTest, wordname_15
from DOTA_devkit.ImgSplit_multi_process import splitbase as splitbase_trainval
from DOTA_devkit.SplitOnlyImage_multi_process import \
splitbase as splitbase_test
def mkdir_if_not_exists(path):
if not osp.exists(path):
os.mkdir(path)
| def prepare_multi_scale_data(src_path, dst_path, gap=200, subsize=1024, scales=[0.5, 1.0, 1.5], num_process=32): |
You will be given python files from a code repository, with the current file being shown last. Your task is to predict the next line of code in the current file.
NOTE: You should only predict the next line in the current file. Do not produce more than one line, and do not provide any explanation.
====REPOSITORY====
# Repo Name: hyw-dev/AFI-ForwardDeduplicate
# Path: models/gmflow/backbone.py
class CNNEncoder(nn.Module):
def __init__(self, output_dim=128,
norm_layer=nn.InstanceNorm2d,
num_output_scales=1,
**kwargs,
):
super(CNNEncoder, self).__init__()
self.num_branch = num_output_scales
feature_dims = [64, 96, 128]
self.conv1 = nn.Conv2d(3, feature_dims[0], kernel_size=7, stride=2, padding=3, bias=False) # 1/2
self.norm1 = norm_layer(feature_dims[0])
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = feature_dims[0]
self.layer1 = self._make_layer(feature_dims[0], stride=1, norm_layer=norm_layer) # 1/2
self.layer2 = self._make_layer(feature_dims[1], stride=2, norm_layer=norm_layer) # 1/4
# highest resolution 1/4 or 1/8
stride = 2 if num_output_scales == 1 else 1
self.layer3 = self._make_layer(feature_dims[2], stride=stride,
norm_layer=norm_layer,
) # 1/4 or 1/8
self.conv2 = nn.Conv2d(feature_dims[2], output_dim, 1, 1, 0)
if self.num_branch > 1:
if self.num_branch == 4:
strides = (1, 2, 4, 8)
elif self.num_branch == 3:
strides = (1, 2, 4)
elif self.num_branch == 2:
strides = (1, 2)
else:
raise ValueError
self.trident_conv = MultiScaleTridentConv(output_dim, output_dim,
kernel_size=3,
strides=strides,
paddings=1,
num_branch=self.num_branch,
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1, dilation=1, norm_layer=nn.InstanceNorm2d):
layer1 = ResidualBlock(self.in_planes, dim, norm_layer=norm_layer, stride=stride, dilation=dilation)
layer2 = ResidualBlock(dim, dim, norm_layer=norm_layer, stride=1, dilation=dilation)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x) # 1/2
x = self.layer2(x) # 1/4
x = self.layer3(x) # 1/8 or 1/4
x = self.conv2(x)
if self.num_branch > 1:
out = self.trident_conv([x] * self.num_branch) # high to low res
else:
out = [x]
return out
# Path: models/gmflow/transformer.py
class FeatureTransformer(nn.Module):
def __init__(self,
num_layers=6,
d_model=128,
nhead=1,
attention_type='swin',
ffn_dim_expansion=4,
**kwargs,
):
super(FeatureTransformer, self).__init__()
self.attention_type = attention_type
self.d_model = d_model
self.nhead = nhead
self.layers = nn.ModuleList([
TransformerBlock(d_model=d_model,
nhead=nhead,
attention_type=attention_type,
ffn_dim_expansion=ffn_dim_expansion,
with_shift=True if attention_type == 'swin' and i % 2 == 1 else False,
)
for i in range(num_layers)])
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, feature0, feature1,
attn_num_splits=None,
**kwargs,
):
b, c, h, w = feature0.shape
assert self.d_model == c
feature0 = feature0.flatten(-2).permute(0, 2, 1) # [B, H*W, C]
feature1 = feature1.flatten(-2).permute(0, 2, 1) # [B, H*W, C]
if self.attention_type == 'swin' and attn_num_splits > 1:
# global and refine use different number of splits
window_size_h = h // attn_num_splits
window_size_w = w // attn_num_splits
# compute attn mask once
shifted_window_attn_mask = generate_shift_window_attn_mask(
input_resolution=(h, w),
window_size_h=window_size_h,
window_size_w=window_size_w,
shift_size_h=window_size_h // 2,
shift_size_w=window_size_w // 2,
device=feature0.device,
) # [K*K, H/K*W/K, H/K*W/K]
else:
shifted_window_attn_mask = None
# concat feature0 and feature1 in batch dimension to compute in parallel
concat0 = torch.cat((feature0, feature1), dim=0) # [2B, H*W, C]
concat1 = torch.cat((feature1, feature0), dim=0) # [2B, H*W, C]
for layer in self.layers:
concat0 = layer(concat0, concat1,
height=h,
width=w,
shifted_window_attn_mask=shifted_window_attn_mask,
attn_num_splits=attn_num_splits,
)
# update feature1
concat1 = torch.cat(concat0.chunk(chunks=2, dim=0)[::-1], dim=0)
feature0, feature1 = concat0.chunk(chunks=2, dim=0) # [B, H*W, C]
# reshape back
feature0 = feature0.view(b, h, w, c).permute(0, 3, 1, 2).contiguous() # [B, C, H, W]
feature1 = feature1.view(b, h, w, c).permute(0, 3, 1, 2).contiguous() # [B, C, H, W]
return feature0, feature1
# Path: models/gmflow/transformer.py
class FeatureFlowAttention(nn.Module):
"""
flow propagation with self-attention on feature
query: feature0, key: feature0, value: flow
"""
def __init__(self, in_channels,
**kwargs,
):
super(FeatureFlowAttention, self).__init__()
self.q_proj = nn.Linear(in_channels, in_channels)
self.k_proj = nn.Linear(in_channels, in_channels)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, feature0, flow,
local_window_attn=False,
local_window_radius=1,
**kwargs,
):
# q, k: feature [B, C, H, W], v: flow [B, 2, H, W]
if local_window_attn:
return self.forward_local_window_attn(feature0, flow,
local_window_radius=local_window_radius)
b, c, h, w = feature0.size()
query = feature0.view(b, c, h * w).permute(0, 2, 1) # [B, H*W, C]
# a note: the ``correct'' implementation should be:
# ``query = self.q_proj(query), key = self.k_proj(query)''
# this problem is observed while cleaning up the code
# however, this doesn't affect the performance since the projection is a linear operation,
# thus the two projection matrices for key can be merged
# so I just leave it as is in order to not re-train all models :)
query = self.q_proj(query) # [B, H*W, C]
key = self.k_proj(query) # [B, H*W, C]
value = flow.view(b, flow.size(1), h * w).permute(0, 2, 1) # [B, H*W, 2]
scores = torch.matmul(query, key.permute(0, 2, 1)) / (c ** 0.5) # [B, H*W, H*W]
prob = torch.softmax(scores, dim=-1)
out = torch.matmul(prob, value) # [B, H*W, 2]
out = out.view(b, h, w, value.size(-1)).permute(0, 3, 1, 2) # [B, 2, H, W]
return out
def forward_local_window_attn(self, feature0, flow,
local_window_radius=1,
):
# assert flow.size(1) == 2
assert local_window_radius > 0
b, c, h, w = feature0.size()
feature0_reshape = self.q_proj(feature0.view(b, c, -1).permute(0, 2, 1)
).reshape(b * h * w, 1, c) # [B*H*W, 1, C]
kernel_size = 2 * local_window_radius + 1
feature0_proj = self.k_proj(feature0.view(b, c, -1).permute(0, 2, 1)).permute(0, 2, 1).reshape(b, c, h, w)
feature0_window = F.unfold(feature0_proj, kernel_size=kernel_size,
padding=local_window_radius) # [B, C*(2R+1)^2), H*W]
feature0_window = feature0_window.view(b, c, kernel_size ** 2, h, w).permute(
0, 3, 4, 1, 2).reshape(b * h * w, c, kernel_size ** 2) # [B*H*W, C, (2R+1)^2]
flow_window = F.unfold(flow, kernel_size=kernel_size,
padding=local_window_radius) # [B, 2*(2R+1)^2), H*W]
flow_window = flow_window.view(b, 2, kernel_size ** 2, h, w).permute(
0, 3, 4, 2, 1).reshape(b * h * w, kernel_size ** 2, 2) # [B*H*W, (2R+1)^2, 2]
scores = torch.matmul(feature0_reshape, feature0_window) / (c ** 0.5) # [B*H*W, 1, (2R+1)^2]
prob = torch.softmax(scores, dim=-1)
out = torch.matmul(prob, flow_window).view(b, h, w, 2).permute(0, 3, 1, 2).contiguous() # [B, 2, H, W]
return out
# Path: models/gmflow/matching.py
def global_correlation_softmax(feature0, feature1,
pred_bidir_flow=False,
):
# global correlation
b, c, h, w = feature0.shape
feature0 = feature0.view(b, c, -1).permute(0, 2, 1) # [B, H*W, C]
feature1 = feature1.view(b, c, -1) # [B, C, H*W]
correlation = torch.matmul(feature0, feature1).view(b, h, w, h, w) / (c ** 0.5) # [B, H, W, H, W]
# flow from softmax
init_grid = coords_grid(b, h, w, device=correlation.device, dtype=feature0.dtype) # [B, 2, H, W]
grid = init_grid.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2]
correlation = correlation.view(b, h * w, h * w) # [B, H*W, H*W]
if pred_bidir_flow:
correlation = torch.cat((correlation, correlation.permute(0, 2, 1)), dim=0) # [2*B, H*W, H*W]
init_grid = init_grid.repeat(2, 1, 1, 1) # [2*B, 2, H, W]
grid = grid.repeat(2, 1, 1) # [2*B, H*W, 2]
b = b * 2
prob = F.softmax(correlation, dim=-1) # [B, H*W, H*W]
correspondence = torch.matmul(prob, grid).view(b, h, w, 2).permute(0, 3, 1, 2) # [B, 2, H, W]
# when predicting bidirectional flow, flow is the concatenation of forward flow and backward flow
flow = correspondence - init_grid
return flow, prob
# Path: models/gmflow/matching.py
def local_correlation_softmax(feature0, feature1, local_radius,
padding_mode='zeros',
):
b, c, h, w = feature0.size()
coords_init = coords_grid(b, h, w, device=feature0.device, dtype=feature0.dtype) # [B, 2, H, W]
coords = coords_init.view(b, 2, -1).permute(0, 2, 1) # [B, H*W, 2]
local_h = 2 * local_radius + 1
local_w = 2 * local_radius + 1
window_grid = generate_window_grid(-local_radius, local_radius,
-local_radius, local_radius,
local_h, local_w, device=feature0.device, dtype=feature0.dtype) # [2R+1, 2R+1, 2]
window_grid = window_grid.reshape(-1, 2).repeat(b, 1, 1, 1) # [B, 1, (2R+1)^2, 2]
sample_coords = coords.unsqueeze(-2) + window_grid # [B, H*W, (2R+1)^2, 2]
sample_coords_softmax = sample_coords
# exclude coords that are out of image space
valid_x = (sample_coords[:, :, :, 0] >= 0) & (sample_coords[:, :, :, 0] < w) # [B, H*W, (2R+1)^2]
valid_y = (sample_coords[:, :, :, 1] >= 0) & (sample_coords[:, :, :, 1] < h) # [B, H*W, (2R+1)^2]
valid = valid_x & valid_y # [B, H*W, (2R+1)^2], used to mask out invalid values when softmax
# normalize coordinates to [-1, 1]
sample_coords_norm = normalize_coords(sample_coords, h, w) # [-1, 1]
window_feature = F.grid_sample(feature1.contiguous(), sample_coords_norm.contiguous(),
padding_mode=padding_mode, align_corners=True
).permute(0, 2, 1, 3) # [B, H*W, C, (2R+1)^2]
feature0_view = feature0.permute(0, 2, 3, 1).view(b, h * w, 1, c) # [B, H*W, 1, C]
corr = torch.matmul(feature0_view, window_feature).view(b, h * w, -1) / (c ** 0.5) # [B, H*W, (2R+1)^2]
# mask invalid locations
corr[~valid] = -1e9 if feature0.dtype == torch.float else -1e4
prob = F.softmax(corr, -1) # [B, H*W, (2R+1)^2]
correspondence = torch.matmul(prob.unsqueeze(-2), sample_coords_softmax).squeeze(-2).view(
b, h, w, 2).permute(0, 3, 1, 2) # [B, 2, H, W]
flow = correspondence - coords_init
match_prob = prob
return flow, match_prob
# Path: models/gmflow/geometry.py
def flow_warp(feature, flow, mask=False, padding_mode='zeros'):
b, c, h, w = feature.size()
assert flow.size(1) == 2
grid = coords_grid(b, h, w, device=flow.device, dtype=flow.dtype) + flow # [B, 2, H, W]
return bilinear_sample(feature, grid, padding_mode=padding_mode,
return_mask=mask)
# Path: models/gmflow/utils.py
def normalize_img(img0, img1):
# loaded images are in [0, 255]
# normalize by ImageNet mean and std
global mean, std, is_mean_std_loaded
if not is_mean_std_loaded:
mean = mean.to(img0)
std = std.to(img0)
is_mean_std_loaded = True
img0 = (img0 - mean) / std
img1 = (img1 - mean) / std
return img0, img1
# Path: models/gmflow/utils.py
def feature_add_position(feature0, feature1, attn_splits, feature_channels):
pos_enc = PositionEmbeddingSine(num_pos_feats=feature_channels // 2)
if attn_splits > 1: # add position in splited window
feature0_splits = split_feature(feature0, num_splits=attn_splits)
feature1_splits = split_feature(feature1, num_splits=attn_splits)
position = pos_enc(feature0_splits)
feature0_splits = feature0_splits + position
feature1_splits = feature1_splits + position
feature0 = merge_splits(feature0_splits, num_splits=attn_splits)
feature1 = merge_splits(feature1_splits, num_splits=attn_splits)
else:
position = pos_enc(feature0)
feature0 = feature0 + position
feature1 = feature1 + position
return feature0, feature1
# Path: models/gmflow/gmflow.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.gmflow.backbone import CNNEncoder
from models.gmflow.transformer import FeatureTransformer, FeatureFlowAttention
from models.gmflow.matching import global_correlation_softmax, local_correlation_softmax
from models.gmflow.geometry import flow_warp
from models.gmflow.utils import normalize_img, feature_add_position
class GMFlow(nn.Module):
def __init__(self,
num_scales=2,
upsample_factor=4,
feature_channels=128,
attention_type='swin',
num_transformer_layers=6,
ffn_dim_expansion=4,
num_head=1,
**kwargs,
):
super(GMFlow, self).__init__()
self.num_scales = num_scales
self.feature_channels = feature_channels
| self.upsample_factor = upsample_factor
|