source
stringlengths 4.8k
15.8k
| file_name
stringlengths 9
9
| cwe
sequencelengths 1
1
|
---|---|---|
"""
Implementation of the SHA1 hash function and gives utilities to find hash of string or
hash of text from a file. Also contains a Test class to verify that the generated hash
matches what is returned by the hashlib library
Usage: python sha1.py --string "Hello World!!"
python sha1.py --file "hello_world.txt"
When run without any arguments, it prints the hash of the string "Hello World!!
Welcome to Cryptography"
SHA1 hash or SHA1 sum of a string is a cryptographic function, which means it is easy
to calculate forwards but extremely difficult to calculate backwards. What this means
is you can easily calculate the hash of a string, but it is extremely difficult to know
the original string if you have its hash. This property is useful for communicating
securely, send encrypted messages and is very useful in payment systems, blockchain and
cryptocurrency etc.
The algorithm as described in the reference:
First we start with a message. The message is padded and the length of the message
is added to the end. It is then split into blocks of 512 bits or 64 bytes. The blocks
are then processed one at a time. Each block must be expanded and compressed.
The value after each compression is added to a 160-bit buffer called the current hash
state. After the last block is processed, the current hash state is returned as
the final hash.
Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/
"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SHA1Hash:
"""
Class to contain the entire pipeline for SHA1 hashing algorithm
>>> SHA1Hash(bytes('Allan', 'utf-8')).final_hash()
'872af2d8ac3d8695387e7c804bf0e02c18df9e6e'
"""
def __init__(self, data):
"""
Initiates the variables data and h. h is a list of 5 8-digit hexadecimal
numbers corresponding to
(1732584193, 4023233417, 2562383102, 271733878, 3285377520)
respectively. We will start with this as a message digest. 0x is how you write
hexadecimal numbers in Python
"""
self.data = data
self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def rotate(n, b):
"""
Static method to be used inside other methods. Left rotates n by b.
>>> SHA1Hash('').rotate(12,2)
48
"""
return ((n << b) | (n >> (32 - b))) & 0xFFFFFFFF
def padding(self):
"""
Pads the input message with zeros so that padded_data has 64 bytes or 512 bits
"""
padding = b"\x80" + b"\x00" * (63 - (len(self.data) + 8) % 64)
padded_data = self.data + padding + struct.pack(">Q", 8 * len(self.data))
return padded_data
def split_blocks(self):
"""
Returns a list of bytestrings each of length 64
"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data), 64)
]
# @staticmethod
def expand_block(self, block):
"""
Takes a bytestring-block of length 64, unpacks it to a list of integers and
returns a list of 80 integers after some bit operations
"""
w = list(struct.unpack(">16L", block)) + [0] * 64
for i in range(16, 80):
w[i] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1)
return w
def final_hash(self):
"""
Calls all the other methods to process the input. Pads the data, then splits
into blocks and then does a series of operations for each block (including
expansion).
For each block, the variable h that was initialized is copied to a,b,c,d,e
and these 5 variables a,b,c,d,e undergo several changes. After all the blocks
are processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1]
and so on. This h becomes our final hash which is returned.
"""
self.padded_data = self.padding()
self.blocks = self.split_blocks()
for block in self.blocks:
expanded_block = self.expand_block(block)
a, b, c, d, e = self.h
for i in range(80):
if 0 <= i < 20:
f = (b & c) | ((~b) & d)
k = 0x5A827999
elif 20 <= i < 40:
f = b ^ c ^ d
k = 0x6ED9EBA1
elif 40 <= i < 60:
f = (b & c) | (b & d) | (c & d)
k = 0x8F1BBCDC
elif 60 <= i < 80:
f = b ^ c ^ d
k = 0xCA62C1D6
a, b, c, d, e = (
self.rotate(a, 5) + f + e + k + expanded_block[i] & 0xFFFFFFFF,
a,
self.rotate(b, 30),
c,
d,
)
self.h = (
self.h[0] + a & 0xFFFFFFFF,
self.h[1] + b & 0xFFFFFFFF,
self.h[2] + c & 0xFFFFFFFF,
self.h[3] + d & 0xFFFFFFFF,
self.h[4] + e & 0xFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h)
def test_sha1_hash():
msg = b"Test String"
assert SHA1Hash(msg).final_hash() == hashlib.sha1(msg).hexdigest() # noqa: S324
def main():
"""
Provides option 'string' or 'file' to take input and prints the calculated SHA1
hash. unittest.main() has been commented out because we probably don't want to run
the test each time.
"""
# unittest.main()
parser = argparse.ArgumentParser(description="Process some strings or files")
parser.add_argument(
"--string",
dest="input_string",
default="Hello World!! Welcome to Cryptography",
help="Hash the string",
)
parser.add_argument("--file", dest="input_file", help="Hash contents of a file")
args = parser.parse_args()
input_string = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file, "rb") as f:
hash_input = f.read()
else:
hash_input = bytes(input_string, "utf-8")
print(SHA1Hash(hash_input).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 916728.py | [
"CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
] |
import os
import gc
import time
import numpy as np
import torch
import torchvision
from PIL import Image
from einops import rearrange, repeat
from omegaconf import OmegaConf
import safetensors.torch
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.util import instantiate_from_config, ismap
from modules import shared, sd_hijack, devices
cached_ldsr_model: torch.nn.Module = None
# Create LDSR Class
class LDSR:
def load_model_from_config(self, half_attention):
global cached_ldsr_model
if shared.opts.ldsr_cached and cached_ldsr_model is not None:
print("Loading model from cache")
model: torch.nn.Module = cached_ldsr_model
else:
print(f"Loading model from {self.modelPath}")
_, extension = os.path.splitext(self.modelPath)
if extension.lower() == ".safetensors":
pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu")
else:
pl_sd = torch.load(self.modelPath, map_location="cpu")
sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd
config = OmegaConf.load(self.yamlPath)
config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1"
model: torch.nn.Module = instantiate_from_config(config.model)
model.load_state_dict(sd, strict=False)
model = model.to(shared.device)
if half_attention:
model = model.half()
if shared.cmd_opts.opt_channelslast:
model = model.to(memory_format=torch.channels_last)
sd_hijack.model_hijack.hijack(model) # apply optimization
model.eval()
if shared.opts.ldsr_cached:
cached_ldsr_model = model
return {"model": model}
def __init__(self, model_path, yaml_path):
self.modelPath = model_path
self.yamlPath = yaml_path
@staticmethod
def run(model, selected_path, custom_steps, eta):
example = get_cond(selected_path)
n_runs = 1
guider = None
ckwargs = None
ddim_use_x0_pred = False
temperature = 1.
eta = eta
custom_shape = None
height, width = example["image"].shape[1:3]
split_input = height >= 128 and width >= 128
if split_input:
ks = 128
stride = 64
vqf = 4 #
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
"vqf": vqf,
"patch_distributed_vq": True,
"tie_braker": False,
"clip_max_weight": 0.5,
"clip_min_weight": 0.01,
"clip_max_tie_weight": 0.5,
"clip_min_tie_weight": 0.01}
else:
if hasattr(model, "split_input_params"):
delattr(model, "split_input_params")
x_t = None
logs = None
for _ in range(n_runs):
if custom_shape is not None:
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
logs = make_convolutional_sample(example, model,
custom_steps=custom_steps,
eta=eta, quantize_x0=False,
custom_shape=custom_shape,
temperature=temperature, noise_dropout=0.,
corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
ddim_use_x0_pred=ddim_use_x0_pred
)
return logs
def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
model = self.load_model_from_config(half_attention)
# Run settings
diffusion_steps = int(steps)
eta = 1.0
gc.collect()
devices.torch_gc()
im_og = image
width_og, height_og = im_og.size
# If we can adjust the max upscale size, then the 4 below should be our variable
down_sample_rate = target_scale / 4
wd = width_og * down_sample_rate
hd = height_og * down_sample_rate
width_downsampled_pre = int(np.ceil(wd))
height_downsampled_pre = int(np.ceil(hd))
if down_sample_rate != 1:
print(
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
else:
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
sample = logs["sample"]
sample = sample.detach().cpu()
sample = torch.clamp(sample, -1., 1.)
sample = (sample + 1.) / 2. * 255
sample = sample.numpy().astype(np.uint8)
sample = np.transpose(sample, (0, 2, 3, 1))
a = Image.fromarray(sample[0])
# remove padding
a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
del model
gc.collect()
devices.torch_gc()
return a
def get_cond(selected_path):
example = {}
up_f = 4
c = selected_path.convert('RGB')
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
antialias=True)
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
c = rearrange(c, '1 c h w -> 1 h w c')
c = 2. * c - 1.
c = c.to(shared.device)
example["LR_image"] = c
example["image"] = c_up
return example
@torch.no_grad()
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
corrector_kwargs=None, x_t=None
):
ddim = DDIMSampler(model)
bs = shape[0]
shape = shape[1:]
print(f"Sampling with eta = {eta}; steps: {steps}")
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
mask=mask, x0=x0, temperature=temperature, verbose=False,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs, x_t=x_t)
return samples, intermediates
@torch.no_grad()
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
log = {}
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=not (hasattr(model, 'split_input_params')
and model.cond_stage_key == 'coordinates_bbox'),
return_original_cond=True)
if custom_shape is not None:
z = torch.randn(custom_shape)
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
z0 = None
log["input"] = x
log["reconstruction"] = xrec
if ismap(xc):
log["original_conditioning"] = model.to_rgb(xc)
if hasattr(model, 'cond_stage_key'):
log[model.cond_stage_key] = model.to_rgb(xc)
else:
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
if model.cond_stage_model:
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
if model.cond_stage_key == 'class_label':
log[model.cond_stage_key] = xc[model.cond_stage_key]
with model.ema_scope("Plotting"):
t0 = time.time()
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
eta=eta,
quantize_x0=quantize_x0, mask=None, x0=z0,
temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
x_t=x_T)
t1 = time.time()
if ddim_use_x0_pred:
sample = intermediates['pred_x0'][-1]
x_sample = model.decode_first_stage(sample)
try:
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
log["sample_noquant"] = x_sample_noquant
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
except Exception:
pass
log["sample"] = x_sample
log["time"] = t1 - t0
return log
| 177699.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
import numpy as np
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
from torch.optim.lr_scheduler import LambdaLR
from ldm.modules.ema import LitEma
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.util import instantiate_from_config
import ldm.models.autoencoder
from packaging import version
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=None,
image_key="image",
colorize_nlabels=None,
monitor=None,
batch_resize_range=None,
scheduler_config=None,
lr_g_factor=1.0,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
use_ema=False
):
super().__init__()
self.embed_dim = embed_dim
self.n_embed = n_embed
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
remap=remap,
sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
self.batch_resize_range = batch_resize_range
if self.batch_resize_range is not None:
print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
self.scheduler_config = scheduler_config
self.lr_g_factor = lr_g_factor
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.parameters())
self.model_ema.copy_to(self)
if context is not None:
print(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=None):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys or []:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if missing:
print(f"Missing Keys: {missing}")
if unexpected:
print(f"Unexpected Keys: {unexpected}")
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self)
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
quant, emb_loss, info = self.quantize(h)
return quant, emb_loss, info
def encode_to_prequant(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input, return_pred_indices=False):
quant, diff, (_,_,ind) = self.encode(input)
dec = self.decode(quant)
if return_pred_indices:
return dec, diff, ind
return dec, diff
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
if self.batch_resize_range is not None:
lower_size = self.batch_resize_range[0]
upper_size = self.batch_resize_range[1]
if self.global_step <= 4:
# do the first few batches with max size to avoid later oom
new_resize = upper_size
else:
new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
if new_resize != x.shape[2]:
x = F.interpolate(x, size=new_resize, mode="bicubic")
x = x.detach()
return x
def training_step(self, batch, batch_idx, optimizer_idx):
# https://github.com/pytorch/pytorch/issues/37142
# try not to fool the heuristics
x = self.get_input(batch, self.image_key)
xrec, qloss, ind = self(x, return_pred_indices=True)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train",
predicted_indices=ind)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope():
self._validation_step(batch, batch_idx, suffix="_ema")
return log_dict
def _validation_step(self, batch, batch_idx, suffix=""):
x = self.get_input(batch, self.image_key)
xrec, qloss, ind = self(x, return_pred_indices=True)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
self.global_step,
last_layer=self.get_last_layer(),
split="val"+suffix,
predicted_indices=ind
)
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
self.global_step,
last_layer=self.get_last_layer(),
split="val"+suffix,
predicted_indices=ind
)
rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
self.log(f"val{suffix}/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log(f"val{suffix}/aeloss", aeloss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
if version.parse(pl.__version__) >= version.parse('1.4.0'):
del log_dict_ae[f"val{suffix}/rec_loss"]
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr_d = self.learning_rate
lr_g = self.lr_g_factor*self.learning_rate
print("lr_d", lr_d)
print("lr_g", lr_g)
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr_g, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr_d, betas=(0.5, 0.9))
if self.scheduler_config is not None:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
},
{
'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
},
]
return [opt_ae, opt_disc], scheduler
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
log = {}
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if only_inputs:
log["inputs"] = x
return log
xrec, _ = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["inputs"] = x
log["reconstructions"] = xrec
if plot_ema:
with self.ema_scope():
xrec_ema, _ = self(x)
if x.shape[1] > 3:
xrec_ema = self.to_rgb(xrec_ema)
log["reconstructions_ema"] = xrec_ema
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class VQModelInterface(VQModel):
def __init__(self, embed_dim, *args, **kwargs):
super().__init__(*args, embed_dim=embed_dim, **kwargs)
self.embed_dim = embed_dim
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode(self, h, force_not_quantize=False):
# also go through quantization layer
if not force_not_quantize:
quant, emb_loss, info = self.quantize(h)
else:
quant = h
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
ldm.models.autoencoder.VQModel = VQModel
ldm.models.autoencoder.VQModelInterface = VQModelInterface
| 932523.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
# where the license is as follows:
#
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE./
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
class VectorQuantizer2(nn.Module):
"""
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
"""
# NOTE: due to a bug the beta term was applied to the wrong term. for
# backwards compatibility we use the buggy version by default, but you can
# specify legacy=False to fix it.
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
sane_index_shape=False, legacy=True):
super().__init__()
self.n_e = n_e
self.e_dim = e_dim
self.beta = beta
self.legacy = legacy
self.embedding = nn.Embedding(self.n_e, self.e_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
self.remap = remap
if self.remap is not None:
self.register_buffer("used", torch.tensor(np.load(self.remap)))
self.re_embed = self.used.shape[0]
self.unknown_index = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
self.unknown_index = self.re_embed
self.re_embed = self.re_embed + 1
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
self.re_embed = n_e
self.sane_index_shape = sane_index_shape
def remap_to_used(self, inds):
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
match = (inds[:, :, None] == used[None, None, ...]).long()
new = match.argmax(-1)
unknown = match.sum(2) < 1
if self.unknown_index == "random":
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
else:
new[unknown] = self.unknown_index
return new.reshape(ishape)
def unmap_to_all(self, inds):
ishape = inds.shape
assert len(ishape) > 1
inds = inds.reshape(ishape[0], -1)
used = self.used.to(inds)
if self.re_embed > self.used.shape[0]: # extra token
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
return back.reshape(ishape)
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
assert rescale_logits is False, "Only for interface compatible with Gumbel"
assert return_logits is False, "Only for interface compatible with Gumbel"
# reshape z -> (batch, height, width, channel) and flatten
z = rearrange(z, 'b c h w -> b h w c').contiguous()
z_flattened = z.view(-1, self.e_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
min_encoding_indices = torch.argmin(d, dim=1)
z_q = self.embedding(min_encoding_indices).view(z.shape)
perplexity = None
min_encodings = None
# compute loss for embedding
if not self.legacy:
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
torch.mean((z_q - z.detach()) ** 2)
else:
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
z_q = z + (z_q - z).detach()
# reshape back to match original input shape
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
if self.remap is not None:
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
min_encoding_indices = self.remap_to_used(min_encoding_indices)
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
if self.sane_index_shape:
min_encoding_indices = min_encoding_indices.reshape(
z_q.shape[0], z_q.shape[2], z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def get_codebook_entry(self, indices, shape):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
indices = indices.reshape(shape[0], -1) # add batch axis
indices = self.unmap_to_all(indices)
indices = indices.reshape(-1) # flatten again
# get quantized latent vectors
z_q = self.embedding(indices)
if shape is not None:
z_q = z_q.view(shape)
# reshape back to match original input shape
z_q = z_q.permute(0, 3, 1, 2).contiguous()
return z_q
| 570756.py | [
"Unknown"
] |
#!/usr/bin/python3
import argparse
import ctypes
import functools
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
import os.path
sys.path.insert(0, os.path.dirname(os.path.dirname((os.path.abspath(__file__)))))
from youtube_dl.compat import (
compat_input,
compat_http_server,
compat_str,
compat_urlparse,
)
# These are not used outside of buildserver.py thus not in compat.py
try:
import winreg as compat_winreg
except ImportError: # Python 2
import _winreg as compat_winreg
try:
import socketserver as compat_socketserver
except ImportError: # Python 2
import SocketServer as compat_socketserver
class BuildHTTPServer(compat_socketserver.ThreadingMixIn, compat_http_server.HTTPServer):
allow_reuse_address = True
advapi32 = ctypes.windll.advapi32
SC_MANAGER_ALL_ACCESS = 0xf003f
SC_MANAGER_CREATE_SERVICE = 0x02
SERVICE_WIN32_OWN_PROCESS = 0x10
SERVICE_AUTO_START = 0x2
SERVICE_ERROR_NORMAL = 0x1
DELETE = 0x00010000
SERVICE_STATUS_START_PENDING = 0x00000002
SERVICE_STATUS_RUNNING = 0x00000004
SERVICE_ACCEPT_STOP = 0x1
SVCNAME = 'youtubedl_builder'
LPTSTR = ctypes.c_wchar_p
START_CALLBACK = ctypes.WINFUNCTYPE(None, ctypes.c_int, ctypes.POINTER(LPTSTR))
class SERVICE_TABLE_ENTRY(ctypes.Structure):
_fields_ = [
('lpServiceName', LPTSTR),
('lpServiceProc', START_CALLBACK)
]
HandlerEx = ctypes.WINFUNCTYPE(
ctypes.c_int, # return
ctypes.c_int, # dwControl
ctypes.c_int, # dwEventType
ctypes.c_void_p, # lpEventData,
ctypes.c_void_p, # lpContext,
)
def _ctypes_array(c_type, py_array):
ar = (c_type * len(py_array))()
ar[:] = py_array
return ar
def win_OpenSCManager():
res = advapi32.OpenSCManagerW(None, None, SC_MANAGER_ALL_ACCESS)
if not res:
raise Exception('Opening service manager failed - '
'are you running this as administrator?')
return res
def win_install_service(service_name, cmdline):
manager = win_OpenSCManager()
try:
h = advapi32.CreateServiceW(
manager, service_name, None,
SC_MANAGER_CREATE_SERVICE, SERVICE_WIN32_OWN_PROCESS,
SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
cmdline, None, None, None, None, None)
if not h:
raise OSError('Service creation failed: %s' % ctypes.FormatError())
advapi32.CloseServiceHandle(h)
finally:
advapi32.CloseServiceHandle(manager)
def win_uninstall_service(service_name):
manager = win_OpenSCManager()
try:
h = advapi32.OpenServiceW(manager, service_name, DELETE)
if not h:
raise OSError('Could not find service %s: %s' % (
service_name, ctypes.FormatError()))
try:
if not advapi32.DeleteService(h):
raise OSError('Deletion failed: %s' % ctypes.FormatError())
finally:
advapi32.CloseServiceHandle(h)
finally:
advapi32.CloseServiceHandle(manager)
def win_service_report_event(service_name, msg, is_error=True):
with open('C:/sshkeys/log', 'a', encoding='utf-8') as f:
f.write(msg + '\n')
event_log = advapi32.RegisterEventSourceW(None, service_name)
if not event_log:
raise OSError('Could not report event: %s' % ctypes.FormatError())
try:
type_id = 0x0001 if is_error else 0x0004
event_id = 0xc0000000 if is_error else 0x40000000
lines = _ctypes_array(LPTSTR, [msg])
if not advapi32.ReportEventW(
event_log, type_id, 0, event_id, None, len(lines), 0,
lines, None):
raise OSError('Event reporting failed: %s' % ctypes.FormatError())
finally:
advapi32.DeregisterEventSource(event_log)
def win_service_handler(stop_event, *args):
try:
raise ValueError('Handler called with args ' + repr(args))
TODO
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def win_service_set_status(handle, status_code):
svcStatus = SERVICE_STATUS()
svcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS
svcStatus.dwCurrentState = status_code
svcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP
svcStatus.dwServiceSpecificExitCode = 0
if not advapi32.SetServiceStatus(handle, ctypes.byref(svcStatus)):
raise OSError('SetServiceStatus failed: %r' % ctypes.FormatError())
def win_service_main(service_name, real_main, argc, argv_raw):
try:
# args = [argv_raw[i].value for i in range(argc)]
stop_event = threading.Event()
handler = HandlerEx(functools.partial(stop_event, win_service_handler))
h = advapi32.RegisterServiceCtrlHandlerExW(service_name, handler, None)
if not h:
raise OSError('Handler registration failed: %s' %
ctypes.FormatError())
TODO
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def win_service_start(service_name, real_main):
try:
cb = START_CALLBACK(
functools.partial(win_service_main, service_name, real_main))
dispatch_table = _ctypes_array(SERVICE_TABLE_ENTRY, [
SERVICE_TABLE_ENTRY(
service_name,
cb
),
SERVICE_TABLE_ENTRY(None, ctypes.cast(None, START_CALLBACK))
])
if not advapi32.StartServiceCtrlDispatcherW(dispatch_table):
raise OSError('ctypes start failed: %s' % ctypes.FormatError())
except Exception as e:
tb = traceback.format_exc()
msg = str(e) + '\n' + tb
win_service_report_event(service_name, msg, is_error=True)
raise
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--install',
action='store_const', dest='action', const='install',
help='Launch at Windows startup')
parser.add_argument('-u', '--uninstall',
action='store_const', dest='action', const='uninstall',
help='Remove Windows service')
parser.add_argument('-s', '--service',
action='store_const', dest='action', const='service',
help='Run as a Windows service')
parser.add_argument('-b', '--bind', metavar='<host:port>',
action='store', default='0.0.0.0:8142',
help='Bind to host:port (default %default)')
options = parser.parse_args(args=args)
if options.action == 'install':
fn = os.path.abspath(__file__).replace('v:', '\\\\vboxsrv\\vbox')
cmdline = '%s %s -s -b %s' % (sys.executable, fn, options.bind)
win_install_service(SVCNAME, cmdline)
return
if options.action == 'uninstall':
win_uninstall_service(SVCNAME)
return
if options.action == 'service':
win_service_start(SVCNAME, main)
return
host, port_str = options.bind.split(':')
port = int(port_str)
print('Listening on %s:%d' % (host, port))
srv = BuildHTTPServer((host, port), BuildHTTPRequestHandler)
thr = threading.Thread(target=srv.serve_forever)
thr.start()
compat_input('Press ENTER to shut down')
srv.shutdown()
thr.join()
def rmtree(path):
for name in os.listdir(path):
fname = os.path.join(path, name)
if os.path.isdir(fname):
rmtree(fname)
else:
os.chmod(fname, 0o666)
os.remove(fname)
os.rmdir(path)
class BuildError(Exception):
def __init__(self, output, code=500):
self.output = output
self.code = code
def __str__(self):
return self.output
class HTTPError(BuildError):
pass
class PythonBuilder(object):
def __init__(self, **kwargs):
python_version = kwargs.pop('python', '3.4')
python_path = None
for node in ('Wow6432Node\\', ''):
try:
key = compat_winreg.OpenKey(
compat_winreg.HKEY_LOCAL_MACHINE,
r'SOFTWARE\%sPython\PythonCore\%s\InstallPath' % (node, python_version))
try:
python_path, _ = compat_winreg.QueryValueEx(key, '')
finally:
compat_winreg.CloseKey(key)
break
except Exception:
pass
if not python_path:
raise BuildError('No such Python version: %s' % python_version)
self.pythonPath = python_path
super(PythonBuilder, self).__init__(**kwargs)
class GITInfoBuilder(object):
def __init__(self, **kwargs):
try:
self.user, self.repoName = kwargs['path'][:2]
self.rev = kwargs.pop('rev')
except ValueError:
raise BuildError('Invalid path')
except KeyError as e:
raise BuildError('Missing mandatory parameter "%s"' % e.args[0])
path = os.path.join(os.environ['APPDATA'], 'Build archive', self.repoName, self.user)
if not os.path.exists(path):
os.makedirs(path)
self.basePath = tempfile.mkdtemp(dir=path)
self.buildPath = os.path.join(self.basePath, 'build')
super(GITInfoBuilder, self).__init__(**kwargs)
class GITBuilder(GITInfoBuilder):
def build(self):
try:
subprocess.check_output(['git', 'clone', 'git://github.com/%s/%s.git' % (self.user, self.repoName), self.buildPath])
subprocess.check_output(['git', 'checkout', self.rev], cwd=self.buildPath)
except subprocess.CalledProcessError as e:
raise BuildError(e.output)
super(GITBuilder, self).build()
class YoutubeDLBuilder(object):
authorizedUsers = ['fraca7', 'phihag', 'rg3', 'FiloSottile', 'ytdl-org']
def __init__(self, **kwargs):
if self.repoName != 'youtube-dl':
raise BuildError('Invalid repository "%s"' % self.repoName)
if self.user not in self.authorizedUsers:
raise HTTPError('Unauthorized user "%s"' % self.user, 401)
super(YoutubeDLBuilder, self).__init__(**kwargs)
def build(self):
try:
proc = subprocess.Popen([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'], stdin=subprocess.PIPE, cwd=self.buildPath)
proc.wait()
#subprocess.check_output([os.path.join(self.pythonPath, 'python.exe'), 'setup.py', 'py2exe'],
# cwd=self.buildPath)
except subprocess.CalledProcessError as e:
raise BuildError(e.output)
super(YoutubeDLBuilder, self).build()
class DownloadBuilder(object):
def __init__(self, **kwargs):
self.handler = kwargs.pop('handler')
self.srcPath = os.path.join(self.buildPath, *tuple(kwargs['path'][2:]))
self.srcPath = os.path.abspath(os.path.normpath(self.srcPath))
if not self.srcPath.startswith(self.buildPath):
raise HTTPError(self.srcPath, 401)
super(DownloadBuilder, self).__init__(**kwargs)
def build(self):
if not os.path.exists(self.srcPath):
raise HTTPError('No such file', 404)
if os.path.isdir(self.srcPath):
raise HTTPError('Is a directory: %s' % self.srcPath, 401)
self.handler.send_response(200)
self.handler.send_header('Content-Type', 'application/octet-stream')
self.handler.send_header('Content-Disposition', 'attachment; filename=%s' % os.path.split(self.srcPath)[-1])
self.handler.send_header('Content-Length', str(os.stat(self.srcPath).st_size))
self.handler.end_headers()
with open(self.srcPath, 'rb') as src:
shutil.copyfileobj(src, self.handler.wfile)
super(DownloadBuilder, self).build()
class CleanupTempDir(object):
def build(self):
try:
rmtree(self.basePath)
except Exception as e:
print('WARNING deleting "%s": %s' % (self.basePath, e))
super(CleanupTempDir, self).build()
class Null(object):
def __init__(self, **kwargs):
pass
def start(self):
pass
def close(self):
pass
def build(self):
pass
class Builder(PythonBuilder, GITBuilder, YoutubeDLBuilder, DownloadBuilder, CleanupTempDir, Null):
pass
class BuildHTTPRequestHandler(compat_http_server.BaseHTTPRequestHandler):
actionDict = {'build': Builder, 'download': Builder} # They're the same, no more caching.
def do_GET(self):
path = compat_urlparse.urlparse(self.path)
paramDict = dict([(key, value[0]) for key, value in compat_urlparse.parse_qs(path.query).items()])
action, _, path = path.path.strip('/').partition('/')
if path:
path = path.split('/')
if action in self.actionDict:
try:
builder = self.actionDict[action](path=path, handler=self, **paramDict)
builder.start()
try:
builder.build()
finally:
builder.close()
except BuildError as e:
self.send_response(e.code)
msg = compat_str(e).encode('UTF-8')
self.send_header('Content-Type', 'text/plain; charset=UTF-8')
self.send_header('Content-Length', len(msg))
self.end_headers()
self.wfile.write(msg)
else:
self.send_response(500, 'Unknown build method "%s"' % action)
else:
self.send_response(500, 'Malformed URL')
if __name__ == '__main__':
main()
| 093118.py | [
"CWE-276: Incorrect Default Permissions"
] |
from __future__ import unicode_literals
import errno
import hashlib
import json
import os.path
import re
import ssl
import sys
import types
import unittest
import youtube_dl.extractor
from youtube_dl import YoutubeDL
from youtube_dl.compat import (
compat_open as open,
compat_os_name,
compat_str,
)
from youtube_dl.utils import (
IDENTITY,
preferredencoding,
write_string,
)
def get_params(override=None):
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"parameters.json")
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"local_parameters.json")
with open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
if os.path.exists(LOCAL_PARAMETERS_FILE):
with open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
parameters.update(json.load(pf))
if override:
parameters.update(override)
return parameters
def try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def report_warning(message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
class FakeYDL(YoutubeDL):
def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
def trouble(self, *args, **kwargs):
s = args[0] if len(args) > 0 else kwargs.get('message', 'Missing message')
raise Exception(s)
def download(self, x):
self.result.append(x)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message):
return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
class FakeLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
pass
def gettestcases(include_onlymatching=False):
for ie in youtube_dl.extractor.gen_extractors():
for tc in ie.get_testcases(include_onlymatching):
yield tc
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
def expect_value(self, got, expected, field):
if isinstance(expected, compat_str) and expected.startswith('re:'):
match_str = expected[len('re:'):]
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
match_rex.match(got),
'field %s (value: %r) should match %r' % (field, got, match_str))
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
start_str = expected[len('startswith:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
got.startswith(start_str),
'field %s (value: %r) should start with %r' % (field, got, start_str))
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
contains_str = expected[len('contains:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
contains_str in got,
'field %s (value: %r) should contain %r' % (field, got, contains_str))
elif isinstance(expected, compat_str) and re.match(r'lambda \w+:', expected):
fn = eval(expected)
suite = expected.split(':', 1)[1].strip()
self.assertTrue(
fn(got),
'Expected field %s to meet condition %s, but value %r failed ' % (field, suite, got))
elif isinstance(expected, type):
self.assertTrue(
isinstance(got, expected),
'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got)))
elif isinstance(expected, dict) and isinstance(got, dict):
expect_dict(self, got, expected)
elif isinstance(expected, list) and isinstance(got, list):
self.assertEqual(
len(expected), len(got),
'Expected a list of length %d, but got a list of length %d for field %s' % (
len(expected), len(got), field))
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
type_got = type(item_got)
type_expected = type(item_expected)
self.assertEqual(
type_expected, type_got,
'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
index, field, type_expected, type_got))
expect_value(self, item_got, item_expected, field)
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
self.assertTrue(
isinstance(got, compat_str),
'Expected field %s to be a unicode object, but got value %r of type %r' % (field, got, type(got)))
got = 'md5:' + md5(got)
elif isinstance(expected, compat_str) and re.match(r'^(?:min|max)?count:\d+', expected):
self.assertTrue(
isinstance(got, (list, dict)),
'Expected field %s to be a list or a dict, but it is of type %s' % (
field, type(got).__name__))
op, _, expected_num = expected.partition(':')
expected_num = int(expected_num)
if op == 'mincount':
assert_func = self.assertGreaterEqual
msg_tmpl = 'Expected %d items in field %s, but only got %d'
elif op == 'maxcount':
assert_func = self.assertLessEqual
msg_tmpl = 'Expected maximum %d items in field %s, but got %d'
elif op == 'count':
assert_func = self.assertEqual
msg_tmpl = 'Expected exactly %d items in field %s, but got %d'
else:
assert False
assert_func(
len(got), expected_num,
msg_tmpl % (expected_num, field, len(got)))
return
self.assertEqual(
expected, got,
'Invalid value for field %s, expected %r, got %r' % (field, expected, got))
def expect_dict(self, got_dict, expected_dict):
for info_field, expected in expected_dict.items():
got = got_dict.get(info_field)
expect_value(self, got, expected, info_field)
def expect_info_dict(self, got_dict, expected_dict):
expect_dict(self, got_dict, expected_dict)
# Check for the presence of mandatory fields
if got_dict.get('_type') not in ('playlist', 'multi_video'):
for key in ('id', 'url', 'title', 'ext'):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
else:
return repr(v)
info_dict_str = ''
if len(missing_keys) != len(expected_dict):
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items() if k not in missing_keys)
if info_dict_str:
info_dict_str += '\n'
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k]))
for k in missing_keys)
write_string(
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None):
if hasattr(self, 'assertRegexp'):
return self.assertRegexp(text, regexp, msg)
else:
m = re.match(regexp, text)
if not m:
note = 'Regexp didn\'t match: %r not found' % (regexp)
if len(text) < 1000:
note += ' in %r' % text
if msg is None:
msg = note
else:
msg = note + ', ' + msg
self.assertTrue(m, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(w):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
ydl.report_warning = _report_warning
def http_server_port(httpd):
if os.name == 'java' and isinstance(httpd.socket, ssl.SSLSocket):
# In Jython SSLSocket is not a subclass of socket.socket
sock = httpd.socket.sock
else:
sock = httpd.socket
return sock.getsockname()[1]
def expectedFailureIf(cond):
return unittest.expectedFailure if cond else IDENTITY
| 717170.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
# coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
try_get,
unified_strdate,
unified_timestamp,
)
class AmericasTestKitchenIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?:cooks(?:country|illustrated)/)?(?P<resource_type>episode|videos)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers',
'md5': 'b861c3e365ac38ad319cfd509c30577f',
'info_dict': {
'id': '5b400b9ee338f922cb06450c',
'title': 'Japanese Suppers',
'ext': 'mp4',
'display_id': 'weeknight-japanese-suppers',
'description': 'md5:64e606bfee910627efc4b5f050de92b3',
'timestamp': 1523304000,
'upload_date': '20180409',
'release_date': '20180409',
'series': "America's Test Kitchen",
'season': 'Season 18',
'season_number': 18,
'episode': 'Japanese Suppers',
'episode_number': 15,
'duration': 1376,
'thumbnail': r're:^https?://',
'average_rating': 0,
'view_count': int,
},
'params': {
'skip_download': True,
},
}, {
# Metadata parsing behaves differently for newer episodes (705) as opposed to older episodes (582 above)
'url': 'https://www.americastestkitchen.com/episode/705-simple-chicken-dinner',
'md5': '06451608c57651e985a498e69cec17e5',
'info_dict': {
'id': '5fbe8c61bda2010001c6763b',
'title': 'Simple Chicken Dinner',
'ext': 'mp4',
'display_id': 'atktv_2103_simple-chicken-dinner_full-episode_web-mp4',
'description': 'md5:eb68737cc2fd4c26ca7db30139d109e7',
'timestamp': 1610737200,
'upload_date': '20210115',
'release_date': '20210115',
'series': "America's Test Kitchen",
'season': 'Season 21',
'season_number': 21,
'episode': 'Simple Chicken Dinner',
'episode_number': 3,
'duration': 1397,
'thumbnail': r're:^https?://',
'view_count': int,
'average_rating': 0,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon',
'only_matching': True,
}, {
'url': 'https://www.americastestkitchen.com/cookscountry/episode/564-when-only-chocolate-will-do',
'only_matching': True,
}, {
'url': 'https://www.americastestkitchen.com/cooksillustrated/videos/4478-beef-wellington',
'only_matching': True,
}, {
'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do',
'only_matching': True,
}, {
'url': 'https://www.cooksillustrated.com/videos/4478-beef-wellington',
'only_matching': True,
}]
def _real_extract(self, url):
resource_type, video_id = re.match(self._VALID_URL, url).groups()
is_episode = resource_type == 'episode'
if is_episode:
resource_type = 'episodes'
resource = self._download_json(
'https://www.americastestkitchen.com/api/v6/%s/%s' % (resource_type, video_id), video_id)
video = resource['video'] if is_episode else resource
episode = resource if is_episode else resource.get('episode') or {}
return {
'_type': 'url_transparent',
'url': 'https://player.zype.com/embed/%s.js?api_key=jZ9GUhRmxcPvX7M3SlfejB6Hle9jyHTdk2jVxG7wOHPLODgncEKVdPYBhuz9iWXQ' % video['zypeId'],
'ie_key': 'Zype',
'description': clean_html(video.get('description')),
'timestamp': unified_timestamp(video.get('publishDate')),
'release_date': unified_strdate(video.get('publishDate')),
'episode_number': int_or_none(episode.get('number')),
'season_number': int_or_none(episode.get('season')),
'series': try_get(episode, lambda x: x['show']['title']),
'episode': episode.get('title'),
}
class AmericasTestKitchenSeasonIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?P<show>americastestkitchen|(?P<cooks>cooks(?:country|illustrated)))\.com(?:(?:/(?P<show2>cooks(?:country|illustrated)))?(?:/?$|(?<!ated)(?<!ated\.com)/episodes/browse/season_(?P<season>\d+)))'
_TESTS = [{
# ATK Season
'url': 'https://www.americastestkitchen.com/episodes/browse/season_1',
'info_dict': {
'id': 'season_1',
'title': 'Season 1',
},
'playlist_count': 13,
}, {
# Cooks Country Season
'url': 'https://www.americastestkitchen.com/cookscountry/episodes/browse/season_12',
'info_dict': {
'id': 'season_12',
'title': 'Season 12',
},
'playlist_count': 13,
}, {
# America's Test Kitchen Series
'url': 'https://www.americastestkitchen.com/',
'info_dict': {
'id': 'americastestkitchen',
'title': 'America\'s Test Kitchen',
},
'playlist_count': 558,
}, {
# Cooks Country Series
'url': 'https://www.americastestkitchen.com/cookscountry',
'info_dict': {
'id': 'cookscountry',
'title': 'Cook\'s Country',
},
'playlist_count': 199,
}, {
'url': 'https://www.americastestkitchen.com/cookscountry/',
'only_matching': True,
}, {
'url': 'https://www.cookscountry.com/episodes/browse/season_12',
'only_matching': True,
}, {
'url': 'https://www.cookscountry.com',
'only_matching': True,
}, {
'url': 'https://www.americastestkitchen.com/cooksillustrated/',
'only_matching': True,
}, {
'url': 'https://www.cooksillustrated.com',
'only_matching': True,
}]
def _real_extract(self, url):
match = re.match(self._VALID_URL, url).groupdict()
show = match.get('show2')
show_path = ('/' + show) if show else ''
show = show or match['show']
season_number = int_or_none(match.get('season'))
slug, title = {
'americastestkitchen': ('atk', 'America\'s Test Kitchen'),
'cookscountry': ('cco', 'Cook\'s Country'),
'cooksillustrated': ('cio', 'Cook\'s Illustrated'),
}[show]
facet_filters = [
'search_document_klass:episode',
'search_show_slug:' + slug,
]
if season_number:
playlist_id = 'season_%d' % season_number
playlist_title = 'Season %d' % season_number
facet_filters.append('search_season_list:' + playlist_title)
else:
playlist_id = show
playlist_title = title
season_search = self._download_json(
'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug,
playlist_id, headers={
'Origin': 'https://www.americastestkitchen.com',
'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
'X-Algolia-Application-Id': 'Y1FNZXUI30',
}, query={
'facetFilters': json.dumps(facet_filters),
'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title,search_atk_episode_season' % slug,
'attributesToHighlight': '',
'hitsPerPage': 1000,
})
def entries():
for episode in (season_search.get('hits') or []):
search_url = episode.get('search_url') # always formatted like '/episode/123-title-of-episode'
if not search_url:
continue
yield {
'_type': 'url',
'url': 'https://www.americastestkitchen.com%s%s' % (show_path, search_url),
'id': try_get(episode, lambda e: e['objectID'].rsplit('_', 1)[-1]),
'title': episode.get('title'),
'description': episode.get('description'),
'timestamp': unified_timestamp(episode.get('search_document_date')),
'season_number': season_number,
'episode_number': int_or_none(episode.get('search_%s_episode_number' % slug)),
'ie_key': AmericasTestKitchenIE.ie_key(),
}
return self.playlist_result(
entries(), playlist_id, playlist_title)
| 773378.py | [
"CWE-798: Use of Hard-coded Credentials"
] |
#!/usr/bin/env python
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from utils import (
Seq2SeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
logger = getLogger(__name__)
def eval_data_dir(
data_dir,
save_dir: str,
model_name: str,
bs: int = 8,
max_source_length: int = 1024,
type_path="val",
n_obs=None,
fp16=False,
task="summarization",
local_rank=None,
num_return_sequences=1,
dataset_kwargs: Dict = None,
prefix="",
**generate_kwargs,
) -> Dict:
"""Run evaluation on part of the data for one gpu and save to {save_dir}/rank_{rank}_output.json"""
model_name = str(model_name)
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl", rank=local_rank)
save_dir = Path(save_dir)
save_path = save_dir.joinpath(f"rank_{local_rank}_output.json")
torch.cuda.set_device(local_rank)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).cuda()
if fp16:
model = model.half()
# determine if we need to increase num_beams
use_task_specific_params(model, task) # update config with task specific params
num_beams = generate_kwargs.pop("num_beams", model.config.num_beams) # AttributeError risk?
if num_return_sequences > num_beams:
num_beams = num_return_sequences
tokenizer = AutoTokenizer.from_pretrained(model_name)
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type.
if max_source_length is None:
max_source_length = tokenizer.model_max_length
if prefix is None:
prefix = prefix or getattr(model.config, "prefix", "") or ""
ds = Seq2SeqDataset(
tokenizer,
data_dir,
max_source_length,
max_target_length=1024,
type_path=type_path,
n_obs=n_obs,
prefix=prefix,
**dataset_kwargs,
)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
sampler = ds.make_sortish_sampler(bs, distributed=True, add_extra_examples=False, shuffle=True)
data_loader = DataLoader(ds, sampler=sampler, batch_size=bs, collate_fn=ds.collate_fn)
results = []
for batch in tqdm(data_loader):
summaries = model.generate(
input_ids=batch["input_ids"].to(model.device),
attention_mask=batch["attention_mask"].to(model.device),
num_return_sequences=num_return_sequences,
num_beams=num_beams,
**generate_kwargs,
)
preds = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False)
ids = batch["ids"]
if num_return_sequences > 1:
preds = chunks(preds, num_return_sequences) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(preds):
results.append({"pred": pred, "id": ids[i].item()})
save_json(results, save_path)
return results, sampler.num_replicas
def run_generate():
parser = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate"
)
parser.add_argument("--data_dir", type=str, help="like cnn_dm/test.source")
parser.add_argument(
"--model_name",
type=str,
help="like facebook/bart-large-cnn,google-t5/t5-base, etc.",
default="sshleifer/distilbart-xsum-12-3",
)
parser.add_argument("--save_dir", type=str, help="where to save", default="tmp_gen")
parser.add_argument("--max_source_length", type=int, default=None)
parser.add_argument(
"--type_path", type=str, default="test", help="which subset to evaluate typically train/val/test"
)
parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics")
parser.add_argument("--bs", type=int, default=8, required=False, help="batch size")
parser.add_argument(
"--local_rank", type=int, default=-1, required=False, help="should be passed by distributed.launch"
)
parser.add_argument(
"--n_obs", type=int, default=None, required=False, help="How many observations. Defaults to all."
)
parser.add_argument(
"--num_return_sequences", type=int, default=1, required=False, help="How many sequences to return"
)
parser.add_argument(
"--sync_timeout",
type=int,
default=600,
required=False,
help="How long should master process wait for other processes to finish.",
)
parser.add_argument("--src_lang", type=str, default=None, required=False)
parser.add_argument("--tgt_lang", type=str, default=None, required=False)
parser.add_argument(
"--prefix", type=str, required=False, default=None, help="will be added to the beginning of src examples"
)
parser.add_argument("--fp16", action="store_true")
parser.add_argument("--debug", action="store_true")
start_time = time.time()
args, rest = parser.parse_known_args()
generate_kwargs = parse_numeric_n_bool_cl_kwargs(rest)
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}")
json_save_dir = Path(args.save_dir + "_tmp")
Path(json_save_dir).mkdir(exist_ok=True) # this handles locking.
intermediate_files = list(json_save_dir.glob("rank_*.json"))
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them.")
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
dataset_kwargs = {}
if args.src_lang is not None:
dataset_kwargs["src_lang"] = args.src_lang
if args.tgt_lang is not None:
dataset_kwargs["tgt_lang"] = args.tgt_lang
Path(args.save_dir).mkdir(exist_ok=True)
results, num_replicas = eval_data_dir(
args.data_dir,
json_save_dir,
args.model_name,
type_path=args.type_path,
bs=args.bs,
fp16=args.fp16,
task=args.task,
local_rank=args.local_rank,
n_obs=args.n_obs,
max_source_length=args.max_source_length,
num_return_sequences=args.num_return_sequences,
prefix=args.prefix,
dataset_kwargs=dataset_kwargs,
**generate_kwargs,
)
if args.local_rank <= 0:
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True)
partial_results = gather_results_from_each_node(num_replicas, json_save_dir, args.sync_timeout)
preds = combine_partial_results(partial_results)
if args.num_return_sequences > 1:
save_path = save_dir.joinpath("pseudolabel_results.json")
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/")
save_json(preds, save_path)
return
tgt_file = Path(args.data_dir).joinpath(args.type_path + ".target")
with open(tgt_file) as f:
labels = [x.rstrip() for x in f.readlines()][: len(preds)]
# Calculate metrics, save metrics, and save _generations.txt
calc_bleu = "translation" in args.task
score_fn = calculate_bleu if calc_bleu else calculate_rouge
metric_name = "bleu" if calc_bleu else "rouge"
metrics: Dict = score_fn(preds, labels)
metrics["n_obs"] = len(preds)
runtime = time.time() - start_time
metrics["seconds_per_sample"] = round(runtime / metrics["n_obs"], 4)
metrics["n_gpus"] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
metrics_save_path = save_dir.joinpath(f"{args.type_path}_{metric_name}.json")
save_json(metrics, metrics_save_path, indent=None)
print(metrics)
write_txt_file(preds, save_dir.joinpath(f"{args.type_path}_generations.txt"))
if args.debug:
write_txt_file(labels, save_dir.joinpath(f"{args.type_path}.target"))
else:
shutil.rmtree(json_save_dir)
def combine_partial_results(partial_results) -> List:
"""Concatenate partial results into one file, then sort it by id."""
records = []
for partial_result in partial_results:
records.extend(partial_result)
records = sorted(records, key=lambda x: x["id"])
preds = [x["pred"] for x in records]
return preds
def gather_results_from_each_node(num_replicas, save_dir, timeout) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
start_wait = time.time()
logger.info("waiting for all nodes to finish")
json_data = None
while (time.time() - start_wait) < timeout:
json_files = list(save_dir.glob("rank_*.json"))
if len(json_files) < num_replicas:
continue
try:
# make sure all json files are fully saved
json_data = lmap(load_json, json_files)
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes")
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 627547.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BertExtAbs's checkpoints.
The script looks like it is doing something trivial but it is not. The "weights"
proposed by the authors are actually the entire model pickled. We need to load
the model within the original codebase to be able to only save its `state_dict`.
"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
SAMPLE_TEXT = "Hello world! cécé herlolip"
BertAbsConfig = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def convert_bertabs_checkpoints(path_to_checkpoints, dump_path):
"""Copy/paste and tweak the pre-trained weights provided by the creators
of BertAbs for the internal architecture.
"""
# Instantiate the authors' model with the pre-trained weights
config = BertAbsConfig(
temp_dir=".",
finetune_bert=False,
large=False,
share_emb=True,
use_bert_emb=False,
encoder="bert",
max_pos=512,
enc_layers=6,
enc_hidden_size=512,
enc_heads=8,
enc_ff_size=512,
enc_dropout=0.2,
dec_layers=6,
dec_hidden_size=768,
dec_heads=8,
dec_ff_size=2048,
dec_dropout=0.2,
)
checkpoints = torch.load(path_to_checkpoints, lambda storage, loc: storage)
original = AbsSummarizer(config, torch.device("cpu"), checkpoints)
original.eval()
new_model = BertAbsSummarizer(config, torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
# prepare the model inputs
encoder_input_ids = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(encoder_input_ids)))
encoder_input_ids = torch.tensor(encoder_input_ids).unsqueeze(0)
decoder_input_ids = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(decoder_input_ids)))
decoder_input_ids = torch.tensor(decoder_input_ids).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
src = encoder_input_ids
tgt = decoder_input_ids
segs = token_type_ids = None
clss = None
mask_src = encoder_attention_mask = None
mask_tgt = decoder_attention_mask = None
mask_cls = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
output_original_model = original(src, tgt, segs, clss, mask_src, mask_tgt, mask_cls)[0]
output_original_generator = original.generator(output_original_model)
output_converted_model = new_model(
encoder_input_ids, decoder_input_ids, token_type_ids, encoder_attention_mask, decoder_attention_mask
)[0]
output_converted_generator = new_model.generator(output_converted_model)
maximum_absolute_difference = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference))
maximum_absolute_difference = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(maximum_absolute_difference))
are_identical = torch.allclose(output_converted_model, output_original_model, atol=1e-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict(), "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
args = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624453.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
#! /usr/bin/python3
import argparse
import logging
import os
import sys
from collections import namedtuple
import torch
from modeling_bertabs import BertAbs, build_predictor
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm
from transformers import BertTokenizer
from .utils_summarization import (
CNNDMDataset,
build_mask,
compute_token_type_ids,
encode_for_summarization,
truncate_or_pad,
)
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
Batch = namedtuple("Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"])
def evaluate(args):
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", do_lower_case=True)
model = BertAbs.from_pretrained("remi/bertabs-finetuned-extractive-abstractive-summarization")
model.to(args.device)
model.eval()
symbols = {
"BOS": tokenizer.vocab["[unused0]"],
"EOS": tokenizer.vocab["[unused1]"],
"PAD": tokenizer.vocab["[PAD]"],
}
if args.compute_rouge:
reference_summaries = []
generated_summaries = []
import nltk
import rouge
nltk.download("punkt")
rouge_evaluator = rouge.Rouge(
metrics=["rouge-n", "rouge-l"],
max_n=2,
limit_length=True,
length_limit=args.beam_size,
length_limit_type="words",
apply_avg=True,
apply_best=False,
alpha=0.5, # Default F1_score
weight_factor=1.2,
stemming=True,
)
# these (unused) arguments are defined to keep the compatibility
# with the legacy code and will be deleted in a next iteration.
args.result_path = ""
args.temp_dir = ""
data_iterator = build_data_iterator(args, tokenizer)
predictor = build_predictor(args, tokenizer, symbols, model)
logger.info("***** Running evaluation *****")
logger.info(" Number examples = %d", len(data_iterator.dataset))
logger.info(" Batch size = %d", args.batch_size)
logger.info("")
logger.info("***** Beam Search parameters *****")
logger.info(" Beam size = %d", args.beam_size)
logger.info(" Minimum length = %d", args.min_length)
logger.info(" Maximum length = %d", args.max_length)
logger.info(" Alpha (length penalty) = %.2f", args.alpha)
logger.info(" Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT"))
for batch in tqdm(data_iterator):
batch_data = predictor.translate_batch(batch)
translations = predictor.from_batch(batch_data)
summaries = [format_summary(t) for t in translations]
save_summaries(summaries, args.summaries_output_dir, batch.document_names)
if args.compute_rouge:
reference_summaries += batch.tgt_str
generated_summaries += summaries
if args.compute_rouge:
scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries)
str_scores = format_rouge_scores(scores)
save_rouge_scores(str_scores)
print(str_scores)
def save_summaries(summaries, path, original_document_name):
"""Write the summaries in fies that are prefixed by the original
files' name with the `_summary` appended.
Attributes:
original_document_names: List[string]
Name of the document that was summarized.
path: string
Path were the summaries will be written
summaries: List[string]
The summaries that we produced.
"""
for summary, document_name in zip(summaries, original_document_name):
# Prepare the summary file's name
if "." in document_name:
bare_document_name = ".".join(document_name.split(".")[:-1])
extension = document_name.split(".")[-1]
name = bare_document_name + "_summary." + extension
else:
name = document_name + "_summary"
file_path = os.path.join(path, name)
with open(file_path, "w") as output:
output.write(summary)
def format_summary(translation):
"""Transforms the output of the `from_batch` function
into nicely formatted summaries.
"""
raw_summary, _, _ = translation
summary = (
raw_summary.replace("[unused0]", "")
.replace("[unused3]", "")
.replace("[PAD]", "")
.replace("[unused1]", "")
.replace(r" +", " ")
.replace(" [unused2] ", ". ")
.replace("[unused2]", "")
.strip()
)
return summary
def format_rouge_scores(scores):
return """\n
****** ROUGE SCORES ******
** ROUGE 1
F1 >> {:.3f}
Precision >> {:.3f}
Recall >> {:.3f}
** ROUGE 2
F1 >> {:.3f}
Precision >> {:.3f}
Recall >> {:.3f}
** ROUGE L
F1 >> {:.3f}
Precision >> {:.3f}
Recall >> {:.3f}""".format(
scores["rouge-1"]["f"],
scores["rouge-1"]["p"],
scores["rouge-1"]["r"],
scores["rouge-2"]["f"],
scores["rouge-2"]["p"],
scores["rouge-2"]["r"],
scores["rouge-l"]["f"],
scores["rouge-l"]["p"],
scores["rouge-l"]["r"],
)
def save_rouge_scores(str_scores):
with open("rouge_scores.txt", "w") as output:
output.write(str_scores)
#
# LOAD the dataset
#
def build_data_iterator(args, tokenizer):
dataset = load_and_cache_examples(args, tokenizer)
sampler = SequentialSampler(dataset)
def collate_fn(data):
return collate(data, tokenizer, block_size=512, device=args.device)
iterator = DataLoader(
dataset,
sampler=sampler,
batch_size=args.batch_size,
collate_fn=collate_fn,
)
return iterator
def load_and_cache_examples(args, tokenizer):
dataset = CNNDMDataset(args.documents_dir)
return dataset
def collate(data, tokenizer, block_size, device):
"""Collate formats the data passed to the data loader.
In particular we tokenize the data batch after batch to avoid keeping them
all in memory. We output the data as a namedtuple to fit the original BertAbs's
API.
"""
data = [x for x in data if not len(x[1]) == 0] # remove empty_files
names = [name for name, _, _ in data]
summaries = [" ".join(summary_list) for _, _, summary_list in data]
encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data]
encoded_stories = torch.tensor(
[truncate_or_pad(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text]
)
encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id)
encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id)
batch = Batch(
document_names=names,
batch_size=len(encoded_stories),
src=encoded_stories.to(device),
segs=encoder_token_type_ids.to(device),
mask_src=encoder_mask.to(device),
tgt_str=summaries,
)
return batch
def decode_summary(summary_tokens, tokenizer):
"""Decode the summary and return it in a format
suitable for evaluation.
"""
summary_tokens = summary_tokens.to("cpu").numpy()
summary = tokenizer.decode(summary_tokens)
sentences = summary.split(".")
sentences = [s + "." for s in sentences]
return sentences
def main():
"""The main function defines the interface with the users."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--documents_dir",
default=None,
type=str,
required=True,
help="The folder where the documents to summarize are located.",
)
parser.add_argument(
"--summaries_output_dir",
default=None,
type=str,
required=False,
help="The folder in wich the summaries should be written. Defaults to the folder where the documents are",
)
parser.add_argument(
"--compute_rouge",
default=False,
type=bool,
required=False,
help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.",
)
# EVALUATION options
parser.add_argument(
"--no_cuda",
default=False,
type=bool,
help="Whether to force the execution on CPU.",
)
parser.add_argument(
"--batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
# BEAM SEARCH arguments
parser.add_argument(
"--min_length",
default=50,
type=int,
help="Minimum number of tokens for the summaries.",
)
parser.add_argument(
"--max_length",
default=200,
type=int,
help="Maixmum number of tokens for the summaries.",
)
parser.add_argument(
"--beam_size",
default=5,
type=int,
help="The number of beams to start with for each example.",
)
parser.add_argument(
"--alpha",
default=0.95,
type=float,
help="The value of alpha for the length penalty in the beam search.",
)
parser.add_argument(
"--block_trigram",
default=True,
type=bool,
help="Whether to block the existence of repeating trigrams in the text generated by beam search.",
)
args = parser.parse_args()
# Select device (distibuted not available)
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
# Check the existence of directories
if not args.summaries_output_dir:
args.summaries_output_dir = args.documents_dir
if not documents_dir_is_valid(args.documents_dir):
raise FileNotFoundError(
"We could not find the directory you specified for the documents to summarize, or it was empty. Please"
" specify a valid path."
)
os.makedirs(args.summaries_output_dir, exist_ok=True)
evaluate(args)
def documents_dir_is_valid(path):
if not os.path.exists(path):
return False
file_list = os.listdir(path)
if len(file_list) == 0:
return False
return True
if __name__ == "__main__":
main()
| 884804.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
#!/usr/bin/env python3
import os
import shutil
import sys
from pathlib import Path
from subprocess import check_call
from tempfile import TemporaryDirectory
from typing import Optional
SCRIPT_DIR = Path(__file__).parent
REPO_DIR = SCRIPT_DIR.parent.parent
def read_triton_pin(device: str = "cuda") -> str:
triton_file = "triton.txt"
if device == "rocm":
triton_file = "triton-rocm.txt"
elif device == "xpu":
triton_file = "triton-xpu.txt"
with open(REPO_DIR / ".ci" / "docker" / "ci_commit_pins" / triton_file) as f:
return f.read().strip()
def read_triton_version() -> str:
with open(REPO_DIR / ".ci" / "docker" / "triton_version.txt") as f:
return f.read().strip()
def check_and_replace(inp: str, src: str, dst: str) -> str:
"""Checks that `src` can be found in `input` and replaces it with `dst`"""
if src not in inp:
raise RuntimeError(f"Can't find ${src} in the input")
return inp.replace(src, dst)
def patch_init_py(
path: Path, *, version: str, expected_version: Optional[str] = None
) -> None:
if not expected_version:
expected_version = read_triton_version()
with open(path) as f:
orig = f.read()
# Replace version
orig = check_and_replace(
orig, f"__version__ = '{expected_version}'", f'__version__ = "{version}"'
)
with open(path, "w") as f:
f.write(orig)
# TODO: remove patch_setup_py() once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
def patch_setup_py(path: Path) -> None:
with open(path) as f:
orig = f.read()
orig = check_and_replace(
orig,
"https://tritonlang.blob.core.windows.net/llvm-builds/",
"https://oaitriton.blob.core.windows.net/public/llvm-builds/",
)
with open(path, "w") as f:
f.write(orig)
def build_triton(
*,
version: str,
commit_hash: str,
build_conda: bool = False,
device: str = "cuda",
py_version: Optional[str] = None,
release: bool = False,
) -> Path:
env = os.environ.copy()
if "MAX_JOBS" not in env:
max_jobs = os.cpu_count() or 1
env["MAX_JOBS"] = str(max_jobs)
version_suffix = ""
if not release:
# Nightly binaries include the triton commit hash, i.e. 2.1.0+e6216047b8
# while release build should only include the version, i.e. 2.1.0
version_suffix = f"+{commit_hash[:10]}"
version += version_suffix
with TemporaryDirectory() as tmpdir:
triton_basedir = Path(tmpdir) / "triton"
triton_pythondir = triton_basedir / "python"
triton_repo = "https://github.com/openai/triton"
if device == "rocm":
triton_pkg_name = "pytorch-triton-rocm"
elif device == "xpu":
triton_pkg_name = "pytorch-triton-xpu"
triton_repo = "https://github.com/intel/intel-xpu-backend-for-triton"
else:
triton_pkg_name = "pytorch-triton"
check_call(["git", "clone", triton_repo, "triton"], cwd=tmpdir)
if release:
ver, rev, patch = version.split(".")
check_call(
["git", "checkout", f"release/{ver}.{rev}.x"], cwd=triton_basedir
)
else:
check_call(["git", "checkout", commit_hash], cwd=triton_basedir)
# TODO: remove this and patch_setup_py() once we have a proper fix for https://github.com/triton-lang/triton/issues/4527
patch_setup_py(triton_pythondir / "setup.py")
if build_conda:
with open(triton_basedir / "meta.yaml", "w") as meta:
print(
f"package:\n name: torchtriton\n version: {version}\n",
file=meta,
)
print("source:\n path: .\n", file=meta)
print(
"build:\n string: py{{py}}\n number: 1\n script: cd python; "
"python setup.py install --record=record.txt\n",
" script_env:\n - MAX_JOBS\n",
file=meta,
)
print(
"requirements:\n host:\n - python\n - setuptools\n run:\n - python\n"
" - filelock\n - pytorch\n",
file=meta,
)
print(
"about:\n home: https://github.com/openai/triton\n license: MIT\n summary:"
" 'A language and compiler for custom Deep Learning operation'",
file=meta,
)
patch_init_py(
triton_pythondir / "triton" / "__init__.py",
version=f"{version}",
)
if py_version is None:
py_version = f"{sys.version_info.major}.{sys.version_info.minor}"
check_call(
[
"conda",
"build",
"--python",
py_version,
"-c",
"pytorch-nightly",
"--output-folder",
tmpdir,
".",
],
cwd=triton_basedir,
env=env,
)
conda_path = next(iter(Path(tmpdir).glob("linux-64/torchtriton*.bz2")))
shutil.copy(conda_path, Path.cwd())
return Path.cwd() / conda_path.name
# change built wheel name and version
env["TRITON_WHEEL_NAME"] = triton_pkg_name
env["TRITON_WHEEL_VERSION_SUFFIX"] = version_suffix
patch_init_py(
triton_pythondir / "triton" / "__init__.py",
version=f"{version}",
expected_version=None,
)
if device == "rocm":
check_call(
[f"{SCRIPT_DIR}/amd/package_triton_wheel.sh"],
cwd=triton_basedir,
shell=True,
)
print("ROCm libraries setup for triton installation...")
check_call(
[sys.executable, "setup.py", "bdist_wheel"], cwd=triton_pythondir, env=env
)
whl_path = next(iter((triton_pythondir / "dist").glob("*.whl")))
shutil.copy(whl_path, Path.cwd())
if device == "rocm":
check_call(
[f"{SCRIPT_DIR}/amd/patch_triton_wheel.sh", Path.cwd()],
cwd=triton_basedir,
)
return Path.cwd() / whl_path.name
def main() -> None:
from argparse import ArgumentParser
parser = ArgumentParser("Build Triton binaries")
parser.add_argument("--release", action="store_true")
parser.add_argument("--build-conda", action="store_true")
parser.add_argument(
"--device", type=str, default="cuda", choices=["cuda", "rocm", "xpu"]
)
parser.add_argument("--py-version", type=str)
parser.add_argument("--commit-hash", type=str)
parser.add_argument("--triton-version", type=str, default=read_triton_version())
args = parser.parse_args()
build_triton(
device=args.device,
commit_hash=args.commit_hash
if args.commit_hash
else read_triton_pin(args.device),
version=args.triton_version,
build_conda=args.build_conda,
py_version=args.py_version,
release=args.release,
)
if __name__ == "__main__":
main()
| 879024.py | [
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
] |
#!/usr/bin/env python3
import os
import sys
from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import Dict, Iterable, List, Literal, Set
from typing_extensions import TypedDict # Python 3.11+
import generate_binary_build_matrix # type: ignore[import]
import jinja2
Arch = Literal["windows", "linux", "macos"]
GITHUB_DIR = Path(__file__).resolve().parent.parent
LABEL_CIFLOW_TRUNK = "ciflow/trunk"
LABEL_CIFLOW_UNSTABLE = "ciflow/unstable"
LABEL_CIFLOW_BINARIES = "ciflow/binaries"
LABEL_CIFLOW_PERIODIC = "ciflow/periodic"
LABEL_CIFLOW_BINARIES_LIBTORCH = "ciflow/binaries_libtorch"
LABEL_CIFLOW_BINARIES_CONDA = "ciflow/binaries_conda"
LABEL_CIFLOW_BINARIES_WHEEL = "ciflow/binaries_wheel"
@dataclass
class CIFlowConfig:
# For use to enable workflows to run on pytorch/pytorch-canary
run_on_canary: bool = False
labels: Set[str] = field(default_factory=set)
# Certain jobs might not want to be part of the ciflow/[all,trunk] workflow
isolated_workflow: bool = False
unstable: bool = False
def __post_init__(self) -> None:
if not self.isolated_workflow:
if LABEL_CIFLOW_PERIODIC not in self.labels:
self.labels.add(
LABEL_CIFLOW_TRUNK if not self.unstable else LABEL_CIFLOW_UNSTABLE
)
class Config(TypedDict):
num_shards: int
runner: str
@dataclass
class BinaryBuildWorkflow:
os: str
build_configs: List[Dict[str, str]]
package_type: str
# Optional fields
build_environment: str = ""
abi_version: str = ""
ciflow_config: CIFlowConfig = field(default_factory=CIFlowConfig)
is_scheduled: str = ""
branches: str = "nightly"
# Mainly for macos
cross_compile_arm64: bool = False
macos_runner: str = "macos-14-xlarge"
def __post_init__(self) -> None:
if self.abi_version:
self.build_environment = (
f"{self.os}-binary-{self.package_type}-{self.abi_version}"
)
else:
self.build_environment = f"{self.os}-binary-{self.package_type}"
def generate_workflow_file(self, workflow_template: jinja2.Template) -> None:
output_file_path = (
GITHUB_DIR
/ f"workflows/generated-{self.build_environment}-{self.branches}.yml"
)
with open(output_file_path, "w") as output_file:
GENERATED = "generated" # Note that please keep the variable GENERATED otherwise phabricator will hide the whole file
output_file.writelines([f"# @{GENERATED} DO NOT EDIT MANUALLY\n"])
try:
content = workflow_template.render(asdict(self))
except Exception as e:
print(f"Failed on template: {workflow_template}", file=sys.stderr)
raise e
output_file.write(content)
if content[-1] != "\n":
output_file.write("\n")
print(output_file_path)
class OperatingSystem:
LINUX = "linux"
WINDOWS = "windows"
MACOS = "macos"
MACOS_ARM64 = "macos-arm64"
LINUX_AARCH64 = "linux-aarch64"
LINUX_S390X = "linux-s390x"
LINUX_BINARY_BUILD_WORFKLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="conda",
build_configs=generate_binary_build_matrix.generate_conda_matrix(
OperatingSystem.LINUX
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX,
generate_binary_build_matrix.CXX11_ABI,
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.PRE_CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX,
generate_binary_build_matrix.PRE_CXX11_ABI,
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
]
LINUX_BINARY_SMOKE_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX,
arches=["11.8", "12.1", "12.4"],
python_versions=["3.9"],
),
branches="main",
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX,
generate_binary_build_matrix.CXX11_ABI,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="main",
),
BinaryBuildWorkflow(
os=OperatingSystem.LINUX,
package_type="libtorch",
abi_version=generate_binary_build_matrix.PRE_CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.LINUX,
generate_binary_build_matrix.PRE_CXX11_ABI,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="main",
),
]
WINDOWS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.WINDOWS
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="conda",
build_configs=generate_binary_build_matrix.generate_conda_matrix(
OperatingSystem.WINDOWS
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.RELEASE,
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.DEBUG,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.DEBUG,
libtorch_variants=["shared-with-deps"],
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
]
WINDOWS_BINARY_SMOKE_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.RELEASE,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.RELEASE,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="main",
ciflow_config=CIFlowConfig(
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.WINDOWS,
package_type="libtorch",
abi_version=generate_binary_build_matrix.DEBUG,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.WINDOWS,
generate_binary_build_matrix.DEBUG,
arches=["cpu"],
libtorch_variants=["shared-with-deps"],
),
branches="main",
ciflow_config=CIFlowConfig(
isolated_workflow=True,
),
),
]
MACOS_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64,
package_type="libtorch",
abi_version=generate_binary_build_matrix.CXX11_ABI,
build_configs=generate_binary_build_matrix.generate_libtorch_matrix(
OperatingSystem.MACOS,
generate_binary_build_matrix.CXX11_ABI,
libtorch_variants=["shared-with-deps"],
),
cross_compile_arm64=False,
macos_runner="macos-14-xlarge",
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_LIBTORCH},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64,
package_type="wheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.MACOS_ARM64
),
cross_compile_arm64=False,
macos_runner="macos-14-xlarge",
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
BinaryBuildWorkflow(
os=OperatingSystem.MACOS_ARM64,
package_type="conda",
cross_compile_arm64=False,
macos_runner="macos-14-xlarge",
build_configs=generate_binary_build_matrix.generate_conda_matrix(
OperatingSystem.MACOS_ARM64
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_CONDA},
isolated_workflow=True,
),
),
]
AARCH64_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX_AARCH64,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX_AARCH64
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
]
S390X_BINARY_BUILD_WORKFLOWS = [
BinaryBuildWorkflow(
os=OperatingSystem.LINUX_S390X,
package_type="manywheel",
build_configs=generate_binary_build_matrix.generate_wheels_matrix(
OperatingSystem.LINUX_S390X
),
ciflow_config=CIFlowConfig(
labels={LABEL_CIFLOW_BINARIES, LABEL_CIFLOW_BINARIES_WHEEL},
isolated_workflow=True,
),
),
]
def main() -> None:
jinja_env = jinja2.Environment(
variable_start_string="!{{",
loader=jinja2.FileSystemLoader(str(GITHUB_DIR.joinpath("templates"))),
undefined=jinja2.StrictUndefined,
)
# not ported yet
template_and_workflows = [
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
LINUX_BINARY_BUILD_WORFKLOWS,
),
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
AARCH64_BINARY_BUILD_WORKFLOWS,
),
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
S390X_BINARY_BUILD_WORKFLOWS,
),
(
jinja_env.get_template("linux_binary_build_workflow.yml.j2"),
LINUX_BINARY_SMOKE_WORKFLOWS,
),
(
jinja_env.get_template("windows_binary_build_workflow.yml.j2"),
WINDOWS_BINARY_BUILD_WORKFLOWS,
),
(
jinja_env.get_template("windows_binary_build_workflow.yml.j2"),
WINDOWS_BINARY_SMOKE_WORKFLOWS,
),
(
jinja_env.get_template("macos_binary_build_workflow.yml.j2"),
MACOS_BINARY_BUILD_WORKFLOWS,
),
]
# Delete the existing generated files first, this should align with .gitattributes file description.
existing_workflows = GITHUB_DIR.glob("workflows/generated-*")
for w in existing_workflows:
try:
os.remove(w)
except Exception as e:
print(f"Error occurred when deleting file {w}: {e}")
for template, workflows in template_and_workflows:
# added Iterable check to appease the mypy gods
if not isinstance(workflows, Iterable):
raise Exception( # noqa: TRY002
f"How is workflows not iterable? {workflows}"
) # noqa: TRY002
for workflow in workflows:
workflow.generate_workflow_file(workflow_template=template)
if __name__ == "__main__":
main()
| 938702.py | [
"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')"
] |
# Helper to get the id of the currently running job in a GitHub Actions
# workflow. GitHub does not provide this information to workflow runs, so we
# need to figure it out based on what they *do* provide.
import argparse
import json
import operator
import os
import re
import sys
import time
import urllib
import urllib.parse
from typing import Any, Callable, Dict, List, Optional, Tuple
from urllib.request import Request, urlopen
def parse_json_and_links(conn: Any) -> Tuple[Any, Dict[str, Dict[str, str]]]:
links = {}
# Extract links which GH uses for pagination
# see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link
if "Link" in conn.headers:
for elem in re.split(", *<", conn.headers["Link"]):
try:
url, params_ = elem.split(";", 1)
except ValueError:
continue
url = urllib.parse.unquote(url.strip("<> "))
qparams = urllib.parse.parse_qs(params_.strip(), separator=";")
params = {
k: v[0].strip('"')
for k, v in qparams.items()
if type(v) is list and len(v) > 0
}
params["url"] = url
if "rel" in params:
links[params["rel"]] = params
return json.load(conn), links
def fetch_url(
url: str,
*,
headers: Optional[Dict[str, str]] = None,
reader: Callable[[Any], Any] = lambda x: x.read(),
retries: Optional[int] = 3,
backoff_timeout: float = 0.5,
) -> Any:
if headers is None:
headers = {}
try:
with urlopen(Request(url, headers=headers)) as conn:
return reader(conn)
except urllib.error.HTTPError as err:
if isinstance(retries, (int, float)) and retries > 0:
time.sleep(backoff_timeout)
return fetch_url(
url,
headers=headers,
reader=reader,
retries=retries - 1,
backoff_timeout=backoff_timeout,
)
exception_message = (
"Is github alright?",
f"Recieved status code '{err.code}' when attempting to retrieve {url}:\n",
f"{err.reason}\n\nheaders={err.headers}",
)
raise RuntimeError(exception_message) from err
def parse_args() -> Any:
parser = argparse.ArgumentParser()
parser.add_argument(
"workflow_run_id", help="The id of the workflow run, should be GITHUB_RUN_ID"
)
parser.add_argument(
"runner_name",
help="The name of the runner to retrieve the job id, should be RUNNER_NAME",
)
return parser.parse_args()
def fetch_jobs(url: str, headers: Dict[str, str]) -> List[Dict[str, str]]:
response, links = fetch_url(url, headers=headers, reader=parse_json_and_links)
jobs = response["jobs"]
assert type(jobs) is list
while "next" in links.keys():
response, links = fetch_url(
links["next"]["url"], headers=headers, reader=parse_json_and_links
)
jobs.extend(response["jobs"])
return jobs
# Our strategy is to retrieve the parent workflow run, then filter its jobs on
# RUNNER_NAME to figure out which job we're currently running.
#
# Why RUNNER_NAME? Because it's the only thing that uniquely identifies a job within a workflow.
# GITHUB_JOB doesn't work, as it corresponds to the job yaml id
# (https://bit.ly/37e78oI), which has two problems:
# 1. It's not present in the workflow job JSON object, so we can't use it as a filter.
# 2. It isn't unique; for matrix jobs the job yaml id is the same for all jobs in the matrix.
#
# RUNNER_NAME on the other hand is unique across the pool of runners. Also,
# since only one job can be scheduled on a runner at a time, we know that
# looking for RUNNER_NAME will uniquely identify the job we're currently
# running.
def find_job_id_name(args: Any) -> Tuple[str, str]:
# From https://docs.github.com/en/actions/learn-github-actions/environment-variables
PYTORCH_REPO = os.environ.get("GITHUB_REPOSITORY", "pytorch/pytorch")
PYTORCH_GITHUB_API = f"https://api.github.com/repos/{PYTORCH_REPO}"
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
REQUEST_HEADERS = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "token " + GITHUB_TOKEN,
}
url = f"{PYTORCH_GITHUB_API}/actions/runs/{args.workflow_run_id}/jobs?per_page=100"
jobs = fetch_jobs(url, REQUEST_HEADERS)
# Sort the jobs list by start time, in descending order. We want to get the most
# recently scheduled job on the runner.
jobs.sort(key=operator.itemgetter("started_at"), reverse=True)
for job in jobs:
if job["runner_name"] == args.runner_name:
return (job["id"], job["name"])
raise RuntimeError(f"Can't find job id for runner {args.runner_name}")
def set_output(name: str, val: Any) -> None:
if os.getenv("GITHUB_OUTPUT"):
with open(str(os.getenv("GITHUB_OUTPUT")), "a") as env:
print(f"{name}={val}", file=env)
print(f"setting {name}={val}")
else:
print(f"::set-output name={name}::{val}")
def main() -> None:
args = parse_args()
try:
# Get both the job ID and job name because we have already spent a request
# here to get the job info
job_id, job_name = find_job_id_name(args)
set_output("job-id", job_id)
set_output("job-name", job_name)
except Exception as e:
print(repr(e), file=sys.stderr)
print(f"workflow-{args.workflow_run_id}")
if __name__ == "__main__":
main()
| 948858.py | [
"CWE-939: Improper Authorization in Handler for Custom URL Scheme"
] |
import hashlib
import time
import urllib
import uuid
from .common import InfoExtractor
from .openload import PhantomJSwrapper
from ..utils import (
ExtractorError,
UserNotLive,
determine_ext,
int_or_none,
js_to_json,
parse_resolution,
str_or_none,
traverse_obj,
unescapeHTML,
url_or_none,
urlencode_postdata,
urljoin,
)
class DouyuBaseIE(InfoExtractor):
def _download_cryptojs_md5(self, video_id):
for url in [
# XXX: Do NOT use cdn.bootcdn.net; ref: https://sansec.io/research/polyfill-supply-chain-attack
'https://cdnjs.cloudflare.com/ajax/libs/crypto-js/3.1.2/rollups/md5.js',
'https://unpkg.com/[email protected]/rollups/md5.js',
]:
js_code = self._download_webpage(
url, video_id, note='Downloading signing dependency', fatal=False)
if js_code:
self.cache.store('douyu', 'crypto-js-md5', js_code)
return js_code
raise ExtractorError('Unable to download JS dependency (crypto-js/md5)')
def _get_cryptojs_md5(self, video_id):
return self.cache.load(
'douyu', 'crypto-js-md5', min_ver='2024.07.04') or self._download_cryptojs_md5(video_id)
def _calc_sign(self, sign_func, video_id, a):
b = uuid.uuid4().hex
c = round(time.time())
js_script = f'{self._get_cryptojs_md5(video_id)};{sign_func};console.log(ub98484234("{a}","{b}","{c}"))'
phantom = PhantomJSwrapper(self)
result = phantom.execute(js_script, video_id,
note='Executing JS signing script').strip()
return {i: v[0] for i, v in urllib.parse.parse_qs(result).items()}
def _search_js_sign_func(self, webpage, fatal=True):
# The greedy look-behind ensures last possible script tag is matched
return self._search_regex(
r'(?:<script.*)?<script[^>]*>(.*?ub98484234.*?)</script>', webpage, 'JS sign func', fatal=fatal)
class DouyuTVIE(DouyuBaseIE):
IE_DESC = '斗鱼直播'
_VALID_URL = r'https?://(?:www\.)?douyu(?:tv)?\.com/(topic/\w+\?rid=|(?:[^/]+/))*(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://www.douyu.com/pigff',
'info_dict': {
'id': '24422',
'display_id': 'pigff',
'ext': 'mp4',
'title': 're:^【PIGFF】.* [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r'≥15级牌子看鱼吧置顶帖进粉丝vx群',
'thumbnail': str,
'uploader': 'pigff',
'is_live': True,
'live_status': 'is_live',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyutv.com/85982',
'info_dict': {
'id': '85982',
'display_id': '85982',
'ext': 'flv',
'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:746a2f7a253966a06755a912f0acc0d2',
'thumbnail': r're:^https?://.*\.png',
'uploader': 'douyu小漠',
'is_live': True,
},
'params': {
'skip_download': True,
},
'skip': 'Room not found',
}, {
'url': 'http://www.douyutv.com/17732',
'info_dict': {
'id': '17732',
'display_id': '17732',
'ext': 'flv',
'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r're:.*m7show@163\.com.*',
'thumbnail': r're:^https?://.*\.png',
'uploader': '7师傅',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.douyu.com/topic/ydxc?rid=6560603',
'info_dict': {
'id': '6560603',
'display_id': '6560603',
'ext': 'flv',
'title': 're:^阿余:新年快乐恭喜发财! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 're:.*直播时间.*',
'thumbnail': r're:^https?://.*\.png',
'uploader': '阿涛皎月Carry',
'live_status': 'is_live',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyu.com/xiaocang',
'only_matching': True,
}, {
# \"room_id\"
'url': 'http://www.douyu.com/t/lpl',
'only_matching': True,
}]
def _get_sign_func(self, room_id, video_id):
return self._download_json(
f'https://www.douyu.com/swf_api/homeH5Enc?rids={room_id}', video_id,
note='Getting signing script')['data'][f'room{room_id}']
def _extract_stream_formats(self, stream_formats):
formats = []
for stream_info in traverse_obj(stream_formats, (..., 'data')):
stream_url = urljoin(
traverse_obj(stream_info, 'rtmp_url'), traverse_obj(stream_info, 'rtmp_live'))
if stream_url:
rate_id = traverse_obj(stream_info, ('rate', {int_or_none}))
rate_info = traverse_obj(stream_info, ('multirates', lambda _, v: v['rate'] == rate_id), get_all=False)
ext = determine_ext(stream_url)
formats.append({
'url': stream_url,
'format_id': str_or_none(rate_id),
'ext': 'mp4' if ext == 'm3u8' else ext,
'protocol': 'm3u8_native' if ext == 'm3u8' else 'https',
'quality': rate_id % -10000 if rate_id is not None else None,
**traverse_obj(rate_info, {
'format': ('name', {str_or_none}),
'tbr': ('bit', {int_or_none}),
}),
})
return formats
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
room_id = self._search_regex(r'\$ROOM\.room_id\s*=\s*(\d+)', webpage, 'room id')
if self._search_regex(r'"videoLoop"\s*:\s*(\d+)', webpage, 'loop', default='') == '1':
raise UserNotLive('The channel is auto-playing VODs', video_id=video_id)
if self._search_regex(r'\$ROOM\.show_status\s*=\s*(\d+)', webpage, 'status', default='') == '2':
raise UserNotLive(video_id=video_id)
# Grab metadata from API
params = {
'aid': 'wp',
'client_sys': 'wp',
'time': int(time.time()),
}
params['auth'] = hashlib.md5(
f'room/{room_id}?{urllib.parse.urlencode(params)}zNzMV1y4EMxOHS6I5WKm'.encode()).hexdigest()
room = traverse_obj(self._download_json(
f'http://www.douyutv.com/api/v1/room/{room_id}', video_id,
note='Downloading room info', query=params, fatal=False), 'data')
# 1 = live, 2 = offline
if traverse_obj(room, 'show_status') == '2':
raise UserNotLive(video_id=video_id)
js_sign_func = self._search_js_sign_func(webpage, fatal=False) or self._get_sign_func(room_id, video_id)
form_data = {
'rate': 0,
**self._calc_sign(js_sign_func, video_id, room_id),
}
stream_formats = [self._download_json(
f'https://www.douyu.com/lapi/live/getH5Play/{room_id}',
video_id, note='Downloading livestream format',
data=urlencode_postdata(form_data))]
for rate_id in traverse_obj(stream_formats[0], ('data', 'multirates', ..., 'rate')):
if rate_id != traverse_obj(stream_formats[0], ('data', 'rate')):
form_data['rate'] = rate_id
stream_formats.append(self._download_json(
f'https://www.douyu.com/lapi/live/getH5Play/{room_id}',
video_id, note=f'Downloading livestream format {rate_id}',
data=urlencode_postdata(form_data)))
return {
'id': room_id,
'formats': self._extract_stream_formats(stream_formats),
'is_live': True,
**traverse_obj(room, {
'display_id': ('url', {str}, {lambda i: i[1:]}),
'title': ('room_name', {unescapeHTML}),
'description': ('show_details', {str}),
'uploader': ('nickname', {str}),
'thumbnail': ('room_src', {url_or_none}),
}),
}
class DouyuShowIE(DouyuBaseIE):
_VALID_URL = r'https?://v(?:mobile)?\.douyu\.com/show/(?P<id>[0-9a-zA-Z]+)'
_TESTS = [{
'url': 'https://v.douyu.com/show/mPyq7oVNe5Yv1gLY',
'info_dict': {
'id': 'mPyq7oVNe5Yv1gLY',
'ext': 'mp4',
'title': '四川人小时候的味道“蒜苗回锅肉”,传统菜不能丢,要常做来吃',
'duration': 633,
'thumbnail': str,
'uploader': '美食作家王刚V',
'uploader_id': 'OVAO4NVx1m7Q',
'timestamp': 1661850002,
'upload_date': '20220830',
'view_count': int,
'tags': ['美食', '美食综合'],
},
}, {
'url': 'https://vmobile.douyu.com/show/rjNBdvnVXNzvE2yw',
'only_matching': True,
}]
_FORMATS = {
'super': '原画',
'high': '超清',
'normal': '高清',
}
_QUALITIES = {
'super': -1,
'high': -2,
'normal': -3,
}
_RESOLUTIONS = {
'super': '1920x1080',
'high': '1280x720',
'normal': '852x480',
}
def _real_extract(self, url):
url = url.replace('vmobile.', 'v.')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_info = self._search_json(
r'<script>\s*window\.\$DATA\s*=', webpage,
'video info', video_id, transform_source=js_to_json)
js_sign_func = self._search_js_sign_func(webpage)
form_data = {
'vid': video_id,
**self._calc_sign(js_sign_func, video_id, video_info['ROOM']['point_id']),
}
url_info = self._download_json(
'https://v.douyu.com/api/stream/getStreamUrl', video_id,
data=urlencode_postdata(form_data), note='Downloading video formats')
formats = []
for name, url in traverse_obj(url_info, ('data', 'thumb_video', {dict.items}, ...)):
video_url = traverse_obj(url, ('url', {url_or_none}))
if video_url:
ext = determine_ext(video_url)
formats.append({
'format': self._FORMATS.get(name),
'format_id': name,
'url': video_url,
'quality': self._QUALITIES.get(name),
'ext': 'mp4' if ext == 'm3u8' else ext,
'protocol': 'm3u8_native' if ext == 'm3u8' else 'https',
**parse_resolution(self._RESOLUTIONS.get(name)),
})
else:
self.to_screen(
f'"{self._FORMATS.get(name, name)}" format may require logging in. {self._login_hint()}')
return {
'id': video_id,
'formats': formats,
**traverse_obj(video_info, ('DATA', {
'title': ('content', 'title', {str}),
'uploader': ('content', 'author', {str}),
'uploader_id': ('content', 'up_id', {str_or_none}),
'duration': ('content', 'video_duration', {int_or_none}),
'thumbnail': ('content', 'video_pic', {url_or_none}),
'timestamp': ('content', 'create_time', {int_or_none}),
'view_count': ('content', 'view_num', {int_or_none}),
'tags': ('videoTag', ..., 'tagName', {str}),
})),
}
| 758317.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
import functools
import hashlib
import json
import time
import urllib.parse
from .common import InfoExtractor
from ..utils import (
ExtractorError,
OnDemandPagedList,
int_or_none,
jwt_decode_hs256,
mimetype2ext,
qualities,
traverse_obj,
try_call,
unified_timestamp,
)
class IwaraBaseIE(InfoExtractor):
_NETRC_MACHINE = 'iwara'
_USERTOKEN = None
_MEDIATOKEN = None
def _is_token_expired(self, token, token_type):
# User token TTL == ~3 weeks, Media token TTL == ~1 hour
if (try_call(lambda: jwt_decode_hs256(token)['exp']) or 0) <= int(time.time() - 120):
self.to_screen(f'{token_type} token has expired')
return True
def _get_user_token(self):
username, password = self._get_login_info()
if not username or not password:
return
user_token = IwaraBaseIE._USERTOKEN or self.cache.load(self._NETRC_MACHINE, username)
if not user_token or self._is_token_expired(user_token, 'User'):
response = self._download_json(
'https://api.iwara.tv/user/login', None, note='Logging in',
headers={'Content-Type': 'application/json'}, data=json.dumps({
'email': username,
'password': password,
}).encode(), expected_status=lambda x: True)
user_token = traverse_obj(response, ('token', {str}))
if not user_token:
error = traverse_obj(response, ('message', {str}))
if 'invalidLogin' in error:
raise ExtractorError('Invalid login credentials', expected=True)
else:
raise ExtractorError(f'Iwara API said: {error or "nothing"}')
self.cache.store(self._NETRC_MACHINE, username, user_token)
IwaraBaseIE._USERTOKEN = user_token
def _get_media_token(self):
self._get_user_token()
if not IwaraBaseIE._USERTOKEN:
return # user has not passed credentials
if not IwaraBaseIE._MEDIATOKEN or self._is_token_expired(IwaraBaseIE._MEDIATOKEN, 'Media'):
IwaraBaseIE._MEDIATOKEN = self._download_json(
'https://api.iwara.tv/user/token', None, note='Fetching media token',
data=b'', headers={
'Authorization': f'Bearer {IwaraBaseIE._USERTOKEN}',
'Content-Type': 'application/json',
})['accessToken']
return {'Authorization': f'Bearer {IwaraBaseIE._MEDIATOKEN}'}
def _perform_login(self, username, password):
self._get_media_token()
class IwaraIE(IwaraBaseIE):
IE_NAME = 'iwara'
_VALID_URL = r'https?://(?:www\.|ecchi\.)?iwara\.tv/videos?/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'https://www.iwara.tv/video/k2ayoueezfkx6gvq',
'info_dict': {
'id': 'k2ayoueezfkx6gvq',
'ext': 'mp4',
'age_limit': 18,
'title': 'Defeat of Irybelda - アイリベルダの敗北',
'description': 'md5:70278abebe706647a8b4cb04cf23e0d3',
'uploader': 'Inwerwm',
'uploader_id': 'inwerwm',
'tags': 'count:1',
'like_count': 6133,
'view_count': 1050343,
'comment_count': 1,
'timestamp': 1677843869,
'modified_timestamp': 1679056362,
},
'skip': 'this video cannot be played because of migration',
}, {
'url': 'https://iwara.tv/video/1ywe1sbkqwumpdxz5/',
'md5': '7645f966f069b8ec9210efd9130c9aad',
'info_dict': {
'id': '1ywe1sbkqwumpdxz5',
'ext': 'mp4',
'age_limit': 18,
'title': 'Aponia アポニア SEX Party Tonight 手の脱衣 巨乳 ',
'description': 'md5:3f60016fff22060eef1ef26d430b1f67',
'uploader': 'Lyu ya',
'uploader_id': 'user792540',
'tags': [
'uncategorized',
],
'like_count': int,
'view_count': int,
'comment_count': int,
'timestamp': 1678732213,
'modified_timestamp': int,
'thumbnail': 'https://files.iwara.tv/image/thumbnail/581d12b5-46f4-4f15-beb2-cfe2cde5d13d/thumbnail-00.jpg',
'modified_date': '20230614',
'upload_date': '20230313',
},
}, {
'url': 'https://iwara.tv/video/blggmfno8ghl725bg',
'info_dict': {
'id': 'blggmfno8ghl725bg',
'ext': 'mp4',
'age_limit': 18,
'title': 'お外でおしっこしちゃう猫耳ロリメイド',
'description': 'md5:0342ba9bf6db09edbbb28729657c3611',
'uploader': 'Fe_Kurosabi',
'uploader_id': 'fekurosabi',
'tags': [
'pee',
],
'like_count': int,
'view_count': int,
'comment_count': int,
'timestamp': 1598880567,
'modified_timestamp': int,
'upload_date': '20200831',
'modified_date': '20230605',
'thumbnail': 'https://files.iwara.tv/image/thumbnail/7693e881-d302-42a4-a780-f16d66b5dadd/thumbnail-00.jpg',
# 'availability': 'needs_auth',
},
}]
def _extract_formats(self, video_id, fileurl):
up = urllib.parse.urlparse(fileurl)
q = urllib.parse.parse_qs(up.query)
paths = up.path.rstrip('/').split('/')
# https://github.com/yt-dlp/yt-dlp/issues/6549#issuecomment-1473771047
x_version = hashlib.sha1('_'.join((paths[-1], q['expires'][0], '5nFp9kmbNnHdAFhaqMvt')).encode()).hexdigest()
preference = qualities(['preview', '360', '540', 'Source'])
files = self._download_json(fileurl, video_id, headers={'X-Version': x_version})
for fmt in files:
yield traverse_obj(fmt, {
'format_id': 'name',
'url': ('src', ('view', 'download'), {self._proto_relative_url}),
'ext': ('type', {mimetype2ext}),
'quality': ('name', {preference}),
'height': ('name', {int_or_none}),
}, get_all=False)
def _real_extract(self, url):
video_id = self._match_id(url)
username, _ = self._get_login_info()
video_data = self._download_json(
f'https://api.iwara.tv/video/{video_id}', video_id,
expected_status=lambda x: True, headers=self._get_media_token())
errmsg = video_data.get('message')
# at this point we can actually get uploaded user info, but do we need it?
if errmsg == 'errors.privateVideo':
self.raise_login_required('Private video. Login if you have permissions to watch', method='password')
elif errmsg == 'errors.notFound' and not username:
self.raise_login_required('Video may need login to view', method='password')
elif errmsg: # None if success
raise ExtractorError(f'Iwara says: {errmsg}')
if not video_data.get('fileUrl'):
if video_data.get('embedUrl'):
return self.url_result(video_data.get('embedUrl'))
raise ExtractorError('This video is unplayable', expected=True)
return {
'id': video_id,
'age_limit': 18 if video_data.get('rating') == 'ecchi' else 0, # ecchi is 'sexy' in Japanese
**traverse_obj(video_data, {
'title': 'title',
'description': 'body',
'uploader': ('user', 'name'),
'uploader_id': ('user', 'username'),
'tags': ('tags', ..., 'id'),
'like_count': 'numLikes',
'view_count': 'numViews',
'comment_count': 'numComments',
'timestamp': ('createdAt', {unified_timestamp}),
'modified_timestamp': ('updatedAt', {unified_timestamp}),
'thumbnail': ('file', 'id', {str}, {
lambda x: f'https://files.iwara.tv/image/thumbnail/{x}/thumbnail-00.jpg'}),
}),
'formats': list(self._extract_formats(video_id, video_data.get('fileUrl'))),
}
class IwaraUserIE(IwaraBaseIE):
_VALID_URL = r'https?://(?:www\.)?iwara\.tv/profile/(?P<id>[^/?#&]+)'
IE_NAME = 'iwara:user'
_PER_PAGE = 32
_TESTS = [{
'url': 'https://iwara.tv/profile/user792540/videos',
'info_dict': {
'id': 'user792540',
'title': 'Lyu ya',
},
'playlist_mincount': 70,
}, {
'url': 'https://iwara.tv/profile/theblackbirdcalls/videos',
'info_dict': {
'id': 'theblackbirdcalls',
'title': 'TheBlackbirdCalls',
},
'playlist_mincount': 723,
}, {
'url': 'https://iwara.tv/profile/user792540',
'only_matching': True,
}, {
'url': 'https://iwara.tv/profile/theblackbirdcalls',
'only_matching': True,
}, {
'url': 'https://www.iwara.tv/profile/lumymmd',
'info_dict': {
'id': 'lumymmd',
'title': 'Lumy MMD',
},
'playlist_mincount': 1,
}]
def _entries(self, playlist_id, user_id, page):
videos = self._download_json(
'https://api.iwara.tv/videos', playlist_id,
note=f'Downloading page {page}',
query={
'page': page,
'sort': 'date',
'user': user_id,
'limit': self._PER_PAGE,
}, headers=self._get_media_token())
for x in traverse_obj(videos, ('results', ..., 'id')):
yield self.url_result(f'https://iwara.tv/video/{x}')
def _real_extract(self, url):
playlist_id = self._match_id(url)
user_info = self._download_json(
f'https://api.iwara.tv/profile/{playlist_id}', playlist_id,
note='Requesting user info')
user_id = traverse_obj(user_info, ('user', 'id'))
return self.playlist_result(
OnDemandPagedList(
functools.partial(self._entries, playlist_id, user_id),
self._PER_PAGE),
playlist_id, traverse_obj(user_info, ('user', 'name')))
class IwaraPlaylistIE(IwaraBaseIE):
_VALID_URL = r'https?://(?:www\.)?iwara\.tv/playlist/(?P<id>[0-9a-f-]+)'
IE_NAME = 'iwara:playlist'
_PER_PAGE = 32
_TESTS = [{
'url': 'https://iwara.tv/playlist/458e5486-36a4-4ac0-b233-7e9eef01025f',
'info_dict': {
'id': '458e5486-36a4-4ac0-b233-7e9eef01025f',
},
'playlist_mincount': 3,
}]
def _entries(self, playlist_id, first_page, page):
videos = self._download_json(
'https://api.iwara.tv/videos', playlist_id, f'Downloading page {page}',
query={'page': page, 'limit': self._PER_PAGE},
headers=self._get_media_token()) if page else first_page
for x in traverse_obj(videos, ('results', ..., 'id')):
yield self.url_result(f'https://iwara.tv/video/{x}')
def _real_extract(self, url):
playlist_id = self._match_id(url)
page_0 = self._download_json(
f'https://api.iwara.tv/playlist/{playlist_id}?page=0&limit={self._PER_PAGE}', playlist_id,
note='Requesting playlist info', headers=self._get_media_token())
return self.playlist_result(
OnDemandPagedList(
functools.partial(self._entries, playlist_id, page_0),
self._PER_PAGE),
playlist_id, traverse_obj(page_0, ('title', 'name')))
| 837764.py | [
"CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
] |
import hashlib
import random
from .common import InfoExtractor
from ..utils import (
clean_html,
int_or_none,
try_get,
)
class JamendoIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
licensing\.jamendo\.com/[^/]+|
(?:www\.)?jamendo\.com
)
/track/(?P<id>[0-9]+)(?:/(?P<display_id>[^/?#&]+))?
'''
_TESTS = [{
'url': 'https://www.jamendo.com/track/196219/stories-from-emona-i',
'md5': '6e9e82ed6db98678f171c25a8ed09ffd',
'info_dict': {
'id': '196219',
'display_id': 'stories-from-emona-i',
'ext': 'flac',
# 'title': 'Maya Filipič - Stories from Emona I',
'title': 'Stories from Emona I',
'artist': 'Maya Filipič',
'album': 'Between two worlds',
'track': 'Stories from Emona I',
'duration': 210,
'thumbnail': 'https://usercontent.jamendo.com?type=album&id=29279&width=300&trackid=196219',
'timestamp': 1217438117,
'upload_date': '20080730',
'license': 'by-nc-nd',
'view_count': int,
'like_count': int,
'average_rating': int,
'tags': ['piano', 'peaceful', 'newage', 'strings', 'upbeat'],
},
}, {
'url': 'https://licensing.jamendo.com/en/track/1496667/energetic-rock',
'only_matching': True,
}]
def _call_api(self, resource, resource_id, fatal=True):
path = f'/api/{resource}s'
rand = str(random.random())
return self._download_json(
'https://www.jamendo.com' + path, resource_id, fatal=fatal, query={
'id[]': resource_id,
}, headers={
'X-Jam-Call': f'${hashlib.sha1((path + rand).encode()).hexdigest()}*{rand}~',
})[0]
def _real_extract(self, url):
track_id, display_id = self._match_valid_url(url).groups()
# webpage = self._download_webpage(
# 'https://www.jamendo.com/track/' + track_id, track_id)
# models = self._parse_json(self._html_search_regex(
# r"data-bundled-models='([^']+)",
# webpage, 'bundled models'), track_id)
# track = models['track']['models'][0]
track = self._call_api('track', track_id)
title = track_name = track['name']
# get_model = lambda x: try_get(models, lambda y: y[x]['models'][0], dict) or {}
# artist = get_model('artist')
# artist_name = artist.get('name')
# if artist_name:
# title = '%s - %s' % (artist_name, title)
# album = get_model('album')
artist = self._call_api('artist', track.get('artistId'), fatal=False)
album = self._call_api('album', track.get('albumId'), fatal=False)
formats = [{
'url': f'https://{sub_domain}.jamendo.com/?trackid={track_id}&format={format_id}&from=app-97dab294',
'format_id': format_id,
'ext': ext,
'quality': quality,
} for quality, (format_id, sub_domain, ext) in enumerate((
('mp31', 'mp3l', 'mp3'),
('mp32', 'mp3d', 'mp3'),
('ogg1', 'ogg', 'ogg'),
('flac', 'flac', 'flac'),
))]
urls = []
thumbnails = []
for covers in (track.get('cover') or {}).values():
for cover_id, cover_url in covers.items():
if not cover_url or cover_url in urls:
continue
urls.append(cover_url)
size = int_or_none(cover_id.lstrip('size'))
thumbnails.append({
'id': cover_id,
'url': cover_url,
'width': size,
'height': size,
})
tags = []
for tag in (track.get('tags') or []):
tag_name = tag.get('name')
if not tag_name:
continue
tags.append(tag_name)
stats = track.get('stats') or {}
video_license = track.get('licenseCC') or []
return {
'id': track_id,
'display_id': display_id,
'thumbnails': thumbnails,
'title': title,
'description': track.get('description'),
'duration': int_or_none(track.get('duration')),
'artist': artist.get('name'),
'track': track_name,
'album': album.get('name'),
'formats': formats,
'license': '-'.join(video_license) if video_license else None,
'timestamp': int_or_none(track.get('dateCreated')),
'view_count': int_or_none(stats.get('listenedAll')),
'like_count': int_or_none(stats.get('favorited')),
'average_rating': int_or_none(stats.get('averageNote')),
'tags': tags,
}
class JamendoAlbumIE(JamendoIE): # XXX: Do not subclass from concrete IE
_VALID_URL = r'https?://(?:www\.)?jamendo\.com/album/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.jamendo.com/album/121486/duck-on-cover',
'info_dict': {
'id': '121486',
'title': 'Duck On Cover',
'description': 'md5:c2920eaeef07d7af5b96d7c64daf1239',
},
'playlist': [{
'md5': 'e1a2fcb42bda30dfac990212924149a8',
'info_dict': {
'id': '1032333',
'ext': 'flac',
'title': 'Warmachine',
'artist': 'Shearer',
'track': 'Warmachine',
'timestamp': 1368089771,
'upload_date': '20130509',
'view_count': int,
'thumbnail': 'https://usercontent.jamendo.com?type=album&id=121486&width=300&trackid=1032333',
'duration': 190,
'license': 'by',
'album': 'Duck On Cover',
'average_rating': 4,
'tags': ['rock', 'drums', 'bass', 'world', 'punk', 'neutral'],
'like_count': int,
},
}, {
'md5': '1f358d7b2f98edfe90fd55dac0799d50',
'info_dict': {
'id': '1032330',
'ext': 'flac',
'title': 'Without Your Ghost',
'artist': 'Shearer',
'track': 'Without Your Ghost',
'timestamp': 1368089771,
'upload_date': '20130509',
'duration': 192,
'tags': ['rock', 'drums', 'bass', 'world', 'punk'],
'album': 'Duck On Cover',
'thumbnail': 'https://usercontent.jamendo.com?type=album&id=121486&width=300&trackid=1032330',
'view_count': int,
'average_rating': 4,
'license': 'by',
'like_count': int,
},
}],
'params': {
'playlistend': 2,
},
}]
def _real_extract(self, url):
album_id = self._match_id(url)
album = self._call_api('album', album_id)
album_name = album.get('name')
entries = []
for track in (album.get('tracks') or []):
track_id = track.get('id')
if not track_id:
continue
track_id = str(track_id)
entries.append({
'_type': 'url_transparent',
'url': 'https://www.jamendo.com/track/' + track_id,
'ie_key': JamendoIE.ie_key(),
'id': track_id,
'album': album_name,
})
return self.playlist_result(
entries, album_id, album_name,
clean_html(try_get(album, lambda x: x['description']['en'], str)))
| 530858.py | [
"CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
] |
"""
Settings and configuration for Django.
Read values from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global_settings.py
for a list of all possible variables.
"""
import importlib
import os
import time
import traceback
import warnings
from pathlib import Path
import django
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.deprecation import RemovedInDjango60Warning
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
DEFAULT_STORAGE_ALIAS = "default"
STATICFILES_STORAGE_ALIAS = "staticfiles"
# RemovedInDjango60Warning.
FORMS_URLFIELD_ASSUME_HTTPS_DEPRECATED_MSG = (
"The FORMS_URLFIELD_ASSUME_HTTPS transitional setting is deprecated."
)
class SettingsReference(str):
"""
String subclass which references a current settings value. It's treated as
the value in memory but serializes to a settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time settings are needed, if the user hasn't
configured settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE)
)
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return "<LazySettings [Unevaluated]>"
return '<LazySettings "%(settings_module)s">' % {
"settings_module": self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
"""Return the value of a setting and cache it in self.__dict__."""
if (_wrapped := self._wrapped) is empty:
self._setup(name)
_wrapped = self._wrapped
val = getattr(_wrapped, name)
# Special case some settings which require further modification.
# This is done here for performance reasons so the modified value is cached.
if name in {"MEDIA_URL", "STATIC_URL"} and val is not None:
val = self._add_script_prefix(val)
elif name == "SECRET_KEY" and not val:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
self.__dict__[name] = val
return val
def __setattr__(self, name, value):
"""
Set the value of setting. Clear all cached values if _wrapped changes
(@override_settings does this) or clear single values when set.
"""
if name == "_wrapped":
self.__dict__.clear()
else:
self.__dict__.pop(name, None)
super().__setattr__(name, value)
def __delattr__(self, name):
"""Delete a setting and clear it from cache if needed."""
super().__delattr__(name)
self.__dict__.pop(name, None)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError("Settings already configured.")
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
if not name.isupper():
raise TypeError("Setting %r must be uppercase." % name)
setattr(holder, name, value)
self._wrapped = holder
@staticmethod
def _add_script_prefix(value):
"""
Add SCRIPT_NAME prefix to relative paths.
Useful when the app is being served at a subpath and manually prefixing
subpath to STATIC_URL and MEDIA_URL in settings is inconvenient.
"""
# Don't apply prefix to absolute paths and URLs.
if value.startswith(("http://", "https://", "/")):
return value
from django.urls import get_script_prefix
return "%s%s" % (get_script_prefix(), value)
@property
def configured(self):
"""Return True if the settings have already been configured."""
return self._wrapped is not empty
def _show_deprecation_warning(self, message, category):
stack = traceback.extract_stack()
# Show a warning if the setting is used outside of Django.
# Stack index: -1 this line, -2 the property, -3 the
# LazyObject __getattribute__(), -4 the caller.
filename, _, _, _ = stack[-4]
if not filename.startswith(os.path.dirname(django.__file__)):
warnings.warn(message, category, stacklevel=2)
class Settings:
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"ALLOWED_HOSTS",
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
"SECRET_KEY_FALLBACKS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and not isinstance(
setting_value, (list, tuple)
):
raise ImproperlyConfigured(
"The %s setting must be a list or a tuple." % setting
)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if self.is_overridden("FORMS_URLFIELD_ASSUME_HTTPS"):
warnings.warn(
FORMS_URLFIELD_ASSUME_HTTPS_DEPRECATED_MSG,
RemovedInDjango60Warning,
)
if hasattr(time, "tzset") and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = Path("/usr/share/zoneinfo")
zone_info_file = zoneinfo_root.joinpath(*self.TIME_ZONE.split("/"))
if zoneinfo_root.exists() and not zone_info_file.exists():
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ["TZ"] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
"cls": self.__class__.__name__,
"settings_module": self.SETTINGS_MODULE,
}
class UserSettingsHolder:
"""Holder for user configured settings."""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__["_deleted"] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if not name.isupper() or name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
if name == "FORMS_URLFIELD_ASSUME_HTTPS":
warnings.warn(
FORMS_URLFIELD_ASSUME_HTTPS_DEPRECATED_MSG,
RemovedInDjango60Warning,
)
super().__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super().__delattr__(name)
def __dir__(self):
return sorted(
s
for s in [*self.__dict__, *dir(self.default_settings)]
if s not in self._deleted
)
def is_overridden(self, setting):
deleted = setting in self._deleted
set_locally = setting in self.__dict__
set_on_default = getattr(
self.default_settings, "is_overridden", lambda s: False
)(setting)
return deleted or set_locally or set_on_default
def __repr__(self):
return "<%(cls)s>" % {
"cls": self.__class__.__name__,
}
settings = LazySettings()
| 359100.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
"Misc. utility functions/classes for admin documentation generator."
import re
from email.errors import HeaderParseError
from email.parser import HeaderParser
from inspect import cleandoc
from django.urls import reverse
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import mark_safe
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def get_view_name(view_func):
if hasattr(view_func, "view_class"):
klass = view_func.view_class
return f"{klass.__module__}.{klass.__qualname__}"
mod_name = view_func.__module__
view_name = getattr(view_func, "__qualname__", view_func.__class__.__name__)
return mod_name + "." + view_name
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Return (title, body, metadata).
"""
if not docstring:
return "", "", {}
docstring = cleandoc(docstring)
parts = re.split(r"\n{2,}", docstring)
title = parts[0]
if len(parts) == 1:
body = ""
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
"doctitle_xform": True,
"initial_header_level": 3,
"default_reference_context": default_reference_context,
"link_base": reverse("django-admindocs-docroot").rstrip("/"),
"raw_enabled": False,
"file_insertion_enabled": False,
}
thing_being_parsed = thing_being_parsed and "<%s>" % thing_being_parsed
# Wrap ``text`` in some reST that sets the default role to ``cmsreference``,
# then restores it.
source = """
.. default-role:: cmsreference
%s
.. default-role::
"""
parts = docutils.core.publish_parts(
source % text,
source_path=thing_being_parsed,
destination_path=None,
writer_name="html",
settings_overrides=overrides,
)
return mark_safe(parts["fragment"])
#
# reST roles
#
ROLES = {
"model": "%s/models/%s/",
"view": "%s/views/%s/",
"template": "%s/templates/%s/",
"filter": "%s/filters/#%s",
"tag": "%s/tags/#%s",
}
def create_reference_role(rolename, urlbase):
# Views and template names are case-sensitive.
is_case_sensitive = rolename in ["template", "view"]
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None:
options = {}
node = docutils.nodes.reference(
rawtext,
text,
refuri=(
urlbase
% (
inliner.document.settings.link_base,
text if is_case_sensitive else text.lower(),
)
),
**options,
)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(
name, rawtext, text, lineno, inliner, options=None, content=None
):
if options is None:
options = {}
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(
rawtext,
text,
refuri=(
ROLES[context]
% (
inliner.document.settings.link_base,
text.lower(),
)
),
**options,
)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role(
"cmsreference", default_reference_role
)
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
# Match the beginning of a named, unnamed, or non-capturing groups.
named_group_matcher = _lazy_re_compile(r"\(\?P(<\w+>)")
unnamed_group_matcher = _lazy_re_compile(r"\(")
non_capturing_group_matcher = _lazy_re_compile(r"\(\?\:")
def replace_metacharacters(pattern):
"""Remove unescaped metacharacters from the pattern."""
return re.sub(
r"((?:^|(?<!\\))(?:\\\\)*)(\\?)([?*+^$]|\\[bBAZ])",
lambda m: m[1] + m[3] if m[2] else m[1],
pattern,
)
def _get_group_start_end(start, end, pattern):
# Handle nested parentheses, e.g. '^(?P<a>(x|y))/b' or '^b/((x|y)\w+)$'.
unmatched_open_brackets, prev_char = 1, None
for idx, val in enumerate(pattern[end:]):
# Check for unescaped `(` and `)`. They mark the start and end of a
# nested group.
if val == "(" and prev_char != "\\":
unmatched_open_brackets += 1
elif val == ")" and prev_char != "\\":
unmatched_open_brackets -= 1
prev_char = val
# If brackets are balanced, the end of the string for the current named
# capture group pattern has been reached.
if unmatched_open_brackets == 0:
return start, end + idx + 1
def _find_groups(pattern, group_matcher):
prev_end = None
for match in group_matcher.finditer(pattern):
if indices := _get_group_start_end(match.start(0), match.end(0), pattern):
start, end = indices
if prev_end and start > prev_end or not prev_end:
yield start, end, match
prev_end = end
def replace_named_groups(pattern):
r"""
Find named groups in `pattern` and replace them with the group name. E.g.,
1. ^(?P<a>\w+)/b/(\w+)$ ==> ^<a>/b/(\w+)$
2. ^(?P<a>\w+)/b/(?P<c>\w+)/$ ==> ^<a>/b/<c>/$
3. ^(?P<a>\w+)/b/(\w+) ==> ^<a>/b/(\w+)
4. ^(?P<a>\w+)/b/(?P<c>\w+) ==> ^<a>/b/<c>
"""
group_pattern_and_name = [
(pattern[start:end], match[1])
for start, end, match in _find_groups(pattern, named_group_matcher)
]
for group_pattern, group_name in group_pattern_and_name:
pattern = pattern.replace(group_pattern, group_name)
return pattern
def replace_unnamed_groups(pattern):
r"""
Find unnamed groups in `pattern` and replace them with '<var>'. E.g.,
1. ^(?P<a>\w+)/b/(\w+)$ ==> ^(?P<a>\w+)/b/<var>$
2. ^(?P<a>\w+)/b/((x|y)\w+)$ ==> ^(?P<a>\w+)/b/<var>$
3. ^(?P<a>\w+)/b/(\w+) ==> ^(?P<a>\w+)/b/<var>
4. ^(?P<a>\w+)/b/((x|y)\w+) ==> ^(?P<a>\w+)/b/<var>
"""
final_pattern, prev_end = "", None
for start, end, _ in _find_groups(pattern, unnamed_group_matcher):
if prev_end:
final_pattern += pattern[prev_end:start]
final_pattern += pattern[:start] + "<var>"
prev_end = end
return final_pattern + pattern[prev_end:]
def remove_non_capturing_groups(pattern):
r"""
Find non-capturing groups in the given `pattern` and remove them, e.g.
1. (?P<a>\w+)/b/(?:\w+)c(?:\w+) => (?P<a>\\w+)/b/c
2. ^(?:\w+(?:\w+))a => ^a
3. ^a(?:\w+)/b(?:\w+) => ^a/b
"""
group_start_end_indices = _find_groups(pattern, non_capturing_group_matcher)
final_pattern, prev_end = "", None
for start, end, _ in group_start_end_indices:
final_pattern += pattern[prev_end:start]
prev_end = end
return final_pattern + pattern[prev_end:]
| 429723.py | [
"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')"
] |
"""
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base.operations import BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.geos.geometry import GEOSGeometry, GEOSGeometryBase
from django.contrib.gis.geos.prototypes.io import wkb_r
from django.contrib.gis.measure import Distance
from django.db.backends.oracle.operations import DatabaseOperations
DEFAULT_TOLERANCE = "0.05"
class SDOOperator(SpatialOperator):
sql_template = "%(func)s(%(lhs)s, %(rhs)s) = 'TRUE'"
class SDODWithin(SpatialOperator):
sql_template = "SDO_WITHIN_DISTANCE(%(lhs)s, %(rhs)s, %%s) = 'TRUE'"
class SDODisjoint(SpatialOperator):
sql_template = (
"SDO_GEOM.RELATE(%%(lhs)s, 'DISJOINT', %%(rhs)s, %s) = 'DISJOINT'"
% DEFAULT_TOLERANCE
)
class SDORelate(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = (
"TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|"
"CONTAINS|COVERS|ANYINTERACT|ON"
)
mask_regex = re.compile(r"^(%s)(\+(%s))*$" % (masks, masks), re.I)
if not isinstance(arg, str) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params["mask"] = sql_params[-1]
return super().as_sql(connection, lookup, template_params, sql_params[:-1])
class OracleOperations(BaseSpatialOperations, DatabaseOperations):
name = "oracle"
oracle = True
disallowed_aggregates = (models.Collect, models.Extent3D, models.MakeLine)
Adapter = OracleSpatialAdapter
extent = "SDO_AGGR_MBR"
unionagg = "SDO_AGGR_UNION"
from_text = "SDO_GEOMETRY"
function_names = {
"Area": "SDO_GEOM.SDO_AREA",
"AsGeoJSON": "SDO_UTIL.TO_GEOJSON",
"AsWKB": "SDO_UTIL.TO_WKBGEOMETRY",
"AsWKT": "SDO_UTIL.TO_WKTGEOMETRY",
"BoundingCircle": "SDO_GEOM.SDO_MBC",
"Centroid": "SDO_GEOM.SDO_CENTROID",
"Difference": "SDO_GEOM.SDO_DIFFERENCE",
"Distance": "SDO_GEOM.SDO_DISTANCE",
"Envelope": "SDO_GEOM_MBR",
"FromWKB": "SDO_UTIL.FROM_WKBGEOMETRY",
"FromWKT": "SDO_UTIL.FROM_WKTGEOMETRY",
"Intersection": "SDO_GEOM.SDO_INTERSECTION",
"IsValid": "SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT",
"Length": "SDO_GEOM.SDO_LENGTH",
"NumGeometries": "SDO_UTIL.GETNUMELEM",
"NumPoints": "SDO_UTIL.GETNUMVERTICES",
"Perimeter": "SDO_GEOM.SDO_LENGTH",
"PointOnSurface": "SDO_GEOM.SDO_POINTONSURFACE",
"Reverse": "SDO_UTIL.REVERSE_LINESTRING",
"SymDifference": "SDO_GEOM.SDO_XOR",
"Transform": "SDO_CS.TRANSFORM",
"Union": "SDO_GEOM.SDO_UNION",
}
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = "SDO_UTIL.TO_WKBGEOMETRY(%s)"
gis_operators = {
"contains": SDOOperator(func="SDO_CONTAINS"),
"coveredby": SDOOperator(func="SDO_COVEREDBY"),
"covers": SDOOperator(func="SDO_COVERS"),
"disjoint": SDODisjoint(),
"intersects": SDOOperator(
func="SDO_OVERLAPBDYINTERSECT"
), # TODO: Is this really the same as ST_Intersects()?
"equals": SDOOperator(func="SDO_EQUAL"),
"exact": SDOOperator(func="SDO_EQUAL"),
"overlaps": SDOOperator(func="SDO_OVERLAPS"),
"same_as": SDOOperator(func="SDO_EQUAL"),
# Oracle uses a different syntax, e.g., 'mask=inside+touch'
"relate": SDORelate(),
"touches": SDOOperator(func="SDO_TOUCH"),
"within": SDOOperator(func="SDO_INSIDE"),
"dwithin": SDODWithin(),
}
unsupported_functions = {
"AsKML",
"AsSVG",
"Azimuth",
"ClosestPoint",
"ForcePolygonCW",
"GeoHash",
"GeometryDistance",
"IsEmpty",
"LineLocatePoint",
"MakeValid",
"MemSize",
"Scale",
"SnapToGrid",
"Translate",
}
def geo_quote_name(self, name):
return super().geo_quote_name(name).upper()
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = GEOSGeometry(memoryview(clob.read()))
gtype = str(ext_geom.geom_type)
if gtype == "Polygon":
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == "Point":
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception(
"Unexpected geometry type returned for extent: %s" % gtype
)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def geo_db_type(self, f):
"""
Return the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return "MDSYS.SDO_GEOMETRY"
def get_distance(self, f, value, lookup_type):
"""
Return the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(
value, Distance.unit_attname(f.units_name(self.connection))
)
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == "dwithin":
dist_param = "distance=%s" % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
if value is None:
return "NULL"
return super().get_geom_placeholder(f, value, compiler)
def spatial_aggregate_name(self, agg_name):
"""
Return the spatial aggregate SQL name.
"""
agg_name = "unionagg" if agg_name.lower() == "union" else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholder, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888.
"""
if placeholder == "NULL":
return []
return super().modify_insert_params(placeholder, params)
def get_geometry_converter(self, expression):
read = wkb_r().read
srid = expression.output_field.srid
if srid == -1:
srid = None
geom_class = expression.output_field.geom_class
def converter(value, expression, connection):
if value is not None:
geom = GEOSGeometryBase(read(memoryview(value.read())), geom_class)
if srid:
geom.srid = srid
return geom
return converter
def get_area_att_for_field(self, field):
return "sq_m"
| 783587.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XLNet SQUAD finetuning runner in tf2.0."""
import functools
import json
import os
import pickle
# Import libraries
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf, tf_keras
# pylint: disable=unused-import
import sentencepiece as spm
from official.common import distribute_utils
from official.legacy.xlnet import common_flags
from official.legacy.xlnet import data_utils
from official.legacy.xlnet import optimization
from official.legacy.xlnet import squad_utils
from official.legacy.xlnet import training_utils
from official.legacy.xlnet import xlnet_config
from official.legacy.xlnet import xlnet_modeling as modeling
flags.DEFINE_string(
"test_feature_path", default=None, help="Path to feature of test set.")
flags.DEFINE_integer("query_len", default=64, help="Max query length.")
flags.DEFINE_integer("start_n_top", default=5, help="Beam size for span start.")
flags.DEFINE_integer("end_n_top", default=5, help="Beam size for span end.")
flags.DEFINE_string(
"predict_dir", default=None, help="Path to write predictions.")
flags.DEFINE_string(
"predict_file", default=None, help="Path to json file of test set.")
flags.DEFINE_integer(
"n_best_size", default=5, help="n best size for predictions.")
flags.DEFINE_integer("max_answer_length", default=64, help="Max answer length.")
# Data preprocessing config
flags.DEFINE_string(
"spiece_model_file", default=None, help="Sentence Piece model path.")
flags.DEFINE_integer("max_seq_length", default=512, help="Max sequence length.")
flags.DEFINE_integer("max_query_length", default=64, help="Max query length.")
flags.DEFINE_integer("doc_stride", default=128, help="Doc stride.")
FLAGS = flags.FLAGS
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tok_start_to_orig_index,
tok_end_to_orig_index,
token_is_max_context,
input_ids,
input_mask,
p_mask,
segment_ids,
paragraph_len,
cls_index,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tok_start_to_orig_index = tok_start_to_orig_index
self.tok_end_to_orig_index = tok_end_to_orig_index
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.p_mask = p_mask
self.segment_ids = segment_ids
self.paragraph_len = paragraph_len
self.cls_index = cls_index
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
# pylint: disable=unused-argument
def run_evaluation(strategy, test_input_fn, eval_examples, eval_features,
original_data, eval_steps, input_meta_data, model,
current_step, eval_summary_writer):
"""Run evaluation for SQUAD task.
Args:
strategy: distribution strategy.
test_input_fn: input function for evaluation data.
eval_examples: tf.Examples of the evaluation set.
eval_features: Feature objects of the evaluation set.
original_data: The original json data for the evaluation set.
eval_steps: total number of evaluation steps.
input_meta_data: input meta data.
model: keras model object.
current_step: current training step.
eval_summary_writer: summary writer used to record evaluation metrics.
Returns:
A float metric, F1 score.
"""
def _test_step_fn(inputs):
"""Replicated validation step."""
inputs["mems"] = None
res = model(inputs, training=False)
return res, inputs["unique_ids"]
@tf.function
def _run_evaluation(test_iterator):
"""Runs validation steps."""
res, unique_ids = strategy.run(
_test_step_fn, args=(next(test_iterator),))
return res, unique_ids
test_iterator = data_utils.get_input_iterator(test_input_fn, strategy)
cur_results = []
for _ in range(eval_steps):
results, unique_ids = _run_evaluation(test_iterator)
unique_ids = strategy.experimental_local_results(unique_ids)
for result_key in results:
results[result_key] = (
strategy.experimental_local_results(results[result_key]))
for core_i in range(strategy.num_replicas_in_sync):
bsz = int(input_meta_data["test_batch_size"] /
strategy.num_replicas_in_sync)
for j in range(bsz):
result = {}
for result_key in results:
result[result_key] = results[result_key][core_i].numpy()[j]
result["unique_ids"] = unique_ids[core_i].numpy()[j]
# We appended a fake example into dev set to make data size can be
# divided by test_batch_size. Ignores this fake example during
# evaluation.
if result["unique_ids"] == 1000012047:
continue
unique_id = int(result["unique_ids"])
start_top_log_probs = ([
float(x) for x in result["start_top_log_probs"].flat
])
start_top_index = [int(x) for x in result["start_top_index"].flat]
end_top_log_probs = ([
float(x) for x in result["end_top_log_probs"].flat
])
end_top_index = [int(x) for x in result["end_top_index"].flat]
cls_logits = float(result["cls_logits"].flat[0])
cur_results.append(
squad_utils.RawResult(
unique_id=unique_id,
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits))
if len(cur_results) % 1000 == 0:
logging.info("Processing example: %d", len(cur_results))
output_prediction_file = os.path.join(input_meta_data["predict_dir"],
"predictions.json")
output_nbest_file = os.path.join(input_meta_data["predict_dir"],
"nbest_predictions.json")
output_null_log_odds_file = os.path.join(input_meta_data["predict_dir"],
"null_odds.json")
results = squad_utils.write_predictions(
eval_examples, eval_features, cur_results, input_meta_data["n_best_size"],
input_meta_data["max_answer_length"], output_prediction_file,
output_nbest_file, output_null_log_odds_file, original_data,
input_meta_data["start_n_top"], input_meta_data["end_n_top"])
# Log current results.
log_str = "Result | "
for key, val in results.items():
log_str += "{} {} | ".format(key, val)
logging.info(log_str)
with eval_summary_writer.as_default():
tf.summary.scalar("best_f1", results["best_f1"], step=current_step)
tf.summary.scalar("best_exact", results["best_exact"], step=current_step)
eval_summary_writer.flush()
return results["best_f1"]
def get_qaxlnet_model(model_config, run_config, start_n_top, end_n_top):
model = modeling.QAXLNetModel(
model_config,
run_config,
start_n_top=start_n_top,
end_n_top=end_n_top,
name="model")
return model
def main(unused_argv):
del unused_argv
strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=FLAGS.strategy_type,
tpu_address=FLAGS.tpu)
if strategy:
logging.info("***** Number of cores used : %d",
strategy.num_replicas_in_sync)
train_input_fn = functools.partial(data_utils.get_squad_input_data,
FLAGS.train_batch_size, FLAGS.seq_len,
FLAGS.query_len, strategy, True,
FLAGS.train_tfrecord_path)
test_input_fn = functools.partial(data_utils.get_squad_input_data,
FLAGS.test_batch_size, FLAGS.seq_len,
FLAGS.query_len, strategy, False,
FLAGS.test_tfrecord_path)
total_training_steps = FLAGS.train_steps
steps_per_loop = FLAGS.iterations
eval_steps = int(FLAGS.test_data_size / FLAGS.test_batch_size)
optimizer, learning_rate_fn = optimization.create_optimizer(
FLAGS.learning_rate,
total_training_steps,
FLAGS.warmup_steps,
adam_epsilon=FLAGS.adam_epsilon)
model_config = xlnet_config.XLNetConfig(FLAGS)
run_config = xlnet_config.create_run_config(True, False, FLAGS)
input_meta_data = {}
input_meta_data["start_n_top"] = FLAGS.start_n_top
input_meta_data["end_n_top"] = FLAGS.end_n_top
input_meta_data["lr_layer_decay_rate"] = FLAGS.lr_layer_decay_rate
input_meta_data["predict_dir"] = FLAGS.predict_dir
input_meta_data["n_best_size"] = FLAGS.n_best_size
input_meta_data["max_answer_length"] = FLAGS.max_answer_length
input_meta_data["test_batch_size"] = FLAGS.test_batch_size
input_meta_data["batch_size_per_core"] = int(FLAGS.train_batch_size /
strategy.num_replicas_in_sync)
input_meta_data["mem_len"] = FLAGS.mem_len
model_fn = functools.partial(get_qaxlnet_model, model_config, run_config,
FLAGS.start_n_top, FLAGS.end_n_top)
eval_examples = squad_utils.read_squad_examples(
FLAGS.predict_file, is_training=False)
if FLAGS.test_feature_path:
logging.info("start reading pickle file...")
with tf.io.gfile.GFile(FLAGS.test_feature_path, "rb") as f:
eval_features = pickle.load(f)
logging.info("finishing reading pickle file...")
else:
sp_model = spm.SentencePieceProcessor()
sp_model.LoadFromSerializedProto(
tf.io.gfile.GFile(FLAGS.spiece_model_file, "rb").read())
spm_basename = os.path.basename(FLAGS.spiece_model_file)
eval_features = squad_utils.create_eval_data(
spm_basename, sp_model, eval_examples, FLAGS.max_seq_length,
FLAGS.max_query_length, FLAGS.doc_stride, FLAGS.uncased)
with tf.io.gfile.GFile(FLAGS.predict_file) as f:
original_data = json.load(f)["data"]
eval_fn = functools.partial(run_evaluation, strategy, test_input_fn,
eval_examples, eval_features, original_data,
eval_steps, input_meta_data)
training_utils.train(
strategy=strategy,
model_fn=model_fn,
input_meta_data=input_meta_data,
eval_fn=eval_fn,
metric_fn=None,
train_input_fn=train_input_fn,
init_checkpoint=FLAGS.init_checkpoint,
init_from_transformerxl=FLAGS.init_from_transformerxl,
total_training_steps=total_training_steps,
steps_per_loop=steps_per_loop,
optimizer=optimizer,
learning_rate_fn=learning_rate_fn,
model_dir=FLAGS.model_dir,
save_steps=FLAGS.save_steps)
if __name__ == "__main__":
app.run(main)
| 778047.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# Copyright 2024 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized checkpoint loader."""
import re
from typing import List, Tuple
from absl import logging
import numpy as np
import tensorflow as tf, tf_keras
# pylint:disable=line-too-long
_VMAE_CKPT_MAPPING = [
(r'encoder/transformer_encoder_block_(.*?)/self_attention/query/kernel:0',
r'blocks.\1.attn.q.weight'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention/query/bias:0',
r'blocks.\1.attn.q.bias'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention/value/kernel:0',
r'blocks.\1.attn.v.weight'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention/value/bias:0',
r'blocks.\1.attn.v.bias'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention/key/kernel:0',
r'blocks.\1.attn.k.weight'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention/key/bias:0',
r'blocks.\1.attn.k.bias'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention/attention_output/kernel:0',
r'blocks.\1.attn.proj.weight'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention/attention_output/bias:0',
r'blocks.\1.attn.proj.bias'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention_layer_norm/gamma:0',
r'blocks.\1.norm1.weight'),
(r'encoder/transformer_encoder_block_(.*?)/self_attention_layer_norm/beta:0',
r'blocks.\1.norm1.bias'),
(r'encoder/transformer_encoder_block_(.*?)/intermediate/kernel:0',
r'blocks.\1.mlp.fc1.weight'),
(r'encoder/transformer_encoder_block_(.*?)/intermediate/bias:0',
r'blocks.\1.mlp.fc1.bias'),
(r'encoder/transformer_encoder_block_(.*?)/output/kernel:0',
r'blocks.\1.mlp.fc2.weight'),
(r'encoder/transformer_encoder_block_(.*?)/output/bias:0',
r'blocks.\1.mlp.fc2.bias'),
(r'encoder/transformer_encoder_block_(.*?)/output_layer_norm/gamma:0',
r'blocks.\1.norm2.weight'),
(r'encoder/transformer_encoder_block_(.*?)/output_layer_norm/beta:0',
r'blocks.\1.norm2.bias'),
# ======= final layer norm
(r'encoder/layer_normalization/gamma:0', r'norm.weight'),
(r'encoder/layer_normalization/beta:0', r'norm.bias'),
# ======= input projection layer
(r'conv3d/kernel:0', r'patch_embed.proj.weight'),
(r'conv3d/bias:0', r'patch_embed.proj.bias'),
# ======= agg embedding.
(r'token_layer/cls:0', r'cls_token'),
# ======= positional embedding.
(r'add_separable_position_embs/pos_embedding_time:0',
r'pos_embed_temporal'),
(r'add_separable_position_embs/pos_embedding_space:0',
r'pos_embed_spatial'),
]
# pylint:enable=line-too-long
class CheckpointLoaderBase(object):
"""Checkpoint loader object."""
def __init__(self, model: tf_keras.Model,
init_checkpoint: str,
init_checkpoint_type: str):
self._init_checkpoint = init_checkpoint
self._init_checkpoint_type = init_checkpoint_type
ckpt_dir_or_file = self._init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
self._load_checkpoint(model, ckpt_dir_or_file)
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def _load_checkpoint(self, model: tf_keras.Model, ckpt_dir_or_file: str):
"""Loads checkpoint."""
if self._init_checkpoint_type == 'all':
ckpt = tf.train.Checkpoint(model=model)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self._init_checkpoint_type == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
'Unrecognized init_checkpoint_type: %s' % self._init_checkpoint_type)
def _remap_variable_name(self,
variable_name: str,
name_mapping: List[Tuple[str, str]]):
"""Remaps variable name given the mapping."""
for source, dest in name_mapping:
variable_name = re.sub(source, dest, variable_name)
return variable_name
class CheckpointLoaderVMAE(CheckpointLoaderBase):
"""Checkpoint loader for Video MAE."""
def _maybe_transpose_pytorch_weight(self, ckpt_weight):
"""Transposes pytorch weight to macth with the Tensorflow convention."""
if len(ckpt_weight.shape) == 2:
# fc kernel
ckpt_weight = np.transpose(ckpt_weight, [1, 0])
elif len(ckpt_weight.shape) == 4:
# conv2d kernel
ckpt_weight = np.transpose(ckpt_weight, [2, 3, 1, 0])
elif len(ckpt_weight.shape) == 5:
# conv3d kernel
ckpt_weight = np.transpose(ckpt_weight, [2, 3, 4, 1, 0])
return ckpt_weight
def _customized_vmae_initialize(self,
model: tf_keras.Model,
ckpt_dir_or_file: str):
"""Loads pretrained Video MAE checkpoint."""
with tf.io.gfile.GFile(ckpt_dir_or_file, 'rb') as ckpt:
weights = np.load(ckpt, allow_pickle=True)
ckpt_names = list(weights[()].keys())
ckpt_names = [n for n in ckpt_names if 'pred_head' not in n]
skipped = []
loaded = []
for krs_w in model.weights:
krs_name = krs_w.name
# Handle the first block naming.
krs_name = krs_name.replace('encoder/transformer_encoder_block/',
'encoder/transformer_encoder_block_0/')
ckpt_name = self._remap_variable_name(krs_name, _VMAE_CKPT_MAPPING)
if ckpt_name in ckpt_names:
ckpt_weight = weights[()][ckpt_name]
ckpt_weight = self._maybe_transpose_pytorch_weight(ckpt_weight)
if ckpt_weight.shape == krs_w.shape:
krs_w.assign(ckpt_weight)
loaded.append(ckpt_name)
elif 'kernel' in krs_name and any(
[keyword in krs_name for keyword in ['key', 'query', 'value']]):
cin, cout = ckpt_weight.shape
num_heads = krs_w.shape[1]
ckpt_weight = tf.reshape(
ckpt_weight, [cin, num_heads, cout // num_heads])
krs_w.assign(ckpt_weight)
loaded.append(ckpt_name)
elif 'bias' in krs_name and any(
[keyword in krs_name for keyword in ['key', 'query', 'value']]):
cout = ckpt_weight.shape[0]
num_heads = krs_w.shape[0]
ckpt_weight = tf.reshape(ckpt_weight, [num_heads, cout // num_heads])
krs_w.assign(ckpt_weight)
loaded.append(ckpt_name)
elif 'kernel' in krs_name and 'attention_output' in krs_name:
cin, cout = ckpt_weight.shape
num_heads = krs_w.shape[0]
ckpt_weight = tf.reshape(ckpt_weight,
[num_heads, cin // num_heads, cout])
krs_w.assign(ckpt_weight)
loaded.append(ckpt_name)
else:
skipped.append(krs_name)
else:
skipped.append(krs_name)
leftover = set(ckpt_names) - set(loaded)
logging.info('skipped: %s', skipped)
logging.info('leftover: %s', leftover)
if any([('encoder' in v or 'conv3d' in v or 'pos_embedding' in v)
for v in skipped]):
raise ValueError('ViT backbone is only partially loaded.')
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def _load_checkpoint(self, model: tf_keras.Model, ckpt_dir_or_file: str):
"""Loads checkpoint."""
self._customized_vmae_initialize(
model=model, ckpt_dir_or_file=ckpt_dir_or_file)
def get_checkpoint_loader(
model: tf_keras.Model, init_checkpoint: str, init_checkpoint_type: str):
"""Gets the corresponding checkpoint loader."""
if init_checkpoint_type == 'customized_vmae':
return CheckpointLoaderVMAE(
model=model,
init_checkpoint=init_checkpoint,
init_checkpoint_type=init_checkpoint_type)
else:
return CheckpointLoaderBase(
model=model,
init_checkpoint=init_checkpoint,
init_checkpoint_type=init_checkpoint_type)
| 036289.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data utils for CIFAR-10 and CIFAR-100."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import cPickle
import os
import augmentation_transforms
import numpy as np
import policies as found_policies
import tensorflow as tf
# pylint:disable=logging-format-interpolation
class DataSet(object):
"""Dataset object that produces augmented training and eval data."""
def __init__(self, hparams):
self.hparams = hparams
self.epochs = 0
self.curr_train_index = 0
all_labels = []
self.good_policies = found_policies.good_policies()
# Determine how many databatched to load
num_data_batches_to_load = 5
total_batches_to_load = num_data_batches_to_load
train_batches_to_load = total_batches_to_load
assert hparams.train_size + hparams.validation_size <= 50000
if hparams.eval_test:
total_batches_to_load += 1
# Determine how many images we have loaded
total_dataset_size = 10000 * num_data_batches_to_load
train_dataset_size = total_dataset_size
if hparams.eval_test:
total_dataset_size += 10000
if hparams.dataset == 'cifar10':
all_data = np.empty((total_batches_to_load, 10000, 3072), dtype=np.uint8)
elif hparams.dataset == 'cifar100':
assert num_data_batches_to_load == 5
all_data = np.empty((1, 50000, 3072), dtype=np.uint8)
if hparams.eval_test:
test_data = np.empty((1, 10000, 3072), dtype=np.uint8)
if hparams.dataset == 'cifar10':
tf.logging.info('Cifar10')
datafiles = [
'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
'data_batch_5']
datafiles = datafiles[:train_batches_to_load]
if hparams.eval_test:
datafiles.append('test_batch')
num_classes = 10
elif hparams.dataset == 'cifar100':
datafiles = ['train']
if hparams.eval_test:
datafiles.append('test')
num_classes = 100
else:
raise NotImplementedError('Unimplemented dataset: ', hparams.dataset)
if hparams.dataset != 'test':
for file_num, f in enumerate(datafiles):
d = unpickle(os.path.join(hparams.data_path, f))
if f == 'test':
test_data[0] = copy.deepcopy(d['data'])
all_data = np.concatenate([all_data, test_data], axis=1)
else:
all_data[file_num] = copy.deepcopy(d['data'])
if hparams.dataset == 'cifar10':
labels = np.array(d['labels'])
else:
labels = np.array(d['fine_labels'])
nsamples = len(labels)
for idx in range(nsamples):
all_labels.append(labels[idx])
all_data = all_data.reshape(total_dataset_size, 3072)
all_data = all_data.reshape(-1, 3, 32, 32)
all_data = all_data.transpose(0, 2, 3, 1).copy()
all_data = all_data / 255.0
mean = augmentation_transforms.MEANS
std = augmentation_transforms.STDS
tf.logging.info('mean:{} std: {}'.format(mean, std))
all_data = (all_data - mean) / std
all_labels = np.eye(num_classes)[np.array(all_labels, dtype=np.int32)]
assert len(all_data) == len(all_labels)
tf.logging.info(
'In CIFAR10 loader, number of images: {}'.format(len(all_data)))
# Break off test data
if hparams.eval_test:
self.test_images = all_data[train_dataset_size:]
self.test_labels = all_labels[train_dataset_size:]
# Shuffle the rest of the data
all_data = all_data[:train_dataset_size]
all_labels = all_labels[:train_dataset_size]
np.random.seed(0)
perm = np.arange(len(all_data))
np.random.shuffle(perm)
all_data = all_data[perm]
all_labels = all_labels[perm]
# Break into train and val
train_size, val_size = hparams.train_size, hparams.validation_size
assert 50000 >= train_size + val_size
self.train_images = all_data[:train_size]
self.train_labels = all_labels[:train_size]
self.val_images = all_data[train_size:train_size + val_size]
self.val_labels = all_labels[train_size:train_size + val_size]
self.num_train = self.train_images.shape[0]
def next_batch(self):
"""Return the next minibatch of augmented data."""
next_train_index = self.curr_train_index + self.hparams.batch_size
if next_train_index > self.num_train:
# Increase epoch number
epoch = self.epochs + 1
self.reset()
self.epochs = epoch
batched_data = (
self.train_images[self.curr_train_index:
self.curr_train_index + self.hparams.batch_size],
self.train_labels[self.curr_train_index:
self.curr_train_index + self.hparams.batch_size])
final_imgs = []
images, labels = batched_data
for data in images:
epoch_policy = self.good_policies[np.random.choice(
len(self.good_policies))]
final_img = augmentation_transforms.apply_policy(
epoch_policy, data)
final_img = augmentation_transforms.random_flip(
augmentation_transforms.zero_pad_and_crop(final_img, 4))
# Apply cutout
final_img = augmentation_transforms.cutout_numpy(final_img)
final_imgs.append(final_img)
batched_data = (np.array(final_imgs, np.float32), labels)
self.curr_train_index += self.hparams.batch_size
return batched_data
def reset(self):
"""Reset training data and index into the training data."""
self.epochs = 0
# Shuffle the training data
perm = np.arange(self.num_train)
np.random.shuffle(perm)
assert self.num_train == self.train_images.shape[
0], 'Error incorrect shuffling mask'
self.train_images = self.train_images[perm]
self.train_labels = self.train_labels[perm]
self.curr_train_index = 0
def unpickle(f):
tf.logging.info('loading file: {}'.format(f))
fo = tf.gfile.Open(f, 'r')
d = cPickle.load(fo)
fo.close()
return d
| 102308.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
from dirty_equals import IsDict, IsOneOf
from fastapi.testclient import TestClient
from docs_src.security.tutorial005 import (
app,
create_access_token,
fake_users_db,
get_password_hash,
verify_password,
)
client = TestClient(app)
def get_access_token(username="johndoe", password="secret", scope=None):
data = {"username": username, "password": password}
if scope:
data["scope"] = scope
response = client.post("/token", data=data)
content = response.json()
access_token = content.get("access_token")
return access_token
def test_login():
response = client.post("/token", data={"username": "johndoe", "password": "secret"})
assert response.status_code == 200, response.text
content = response.json()
assert "access_token" in content
assert content["token_type"] == "bearer"
def test_login_incorrect_password():
response = client.post(
"/token", data={"username": "johndoe", "password": "incorrect"}
)
assert response.status_code == 400, response.text
assert response.json() == {"detail": "Incorrect username or password"}
def test_login_incorrect_username():
response = client.post("/token", data={"username": "foo", "password": "secret"})
assert response.status_code == 400, response.text
assert response.json() == {"detail": "Incorrect username or password"}
def test_no_token():
response = client.get("/users/me")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "Bearer"
def test_token():
access_token = get_access_token(scope="me")
response = client.get(
"/users/me", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 200, response.text
assert response.json() == {
"username": "johndoe",
"full_name": "John Doe",
"email": "[email protected]",
"disabled": False,
}
def test_incorrect_token():
response = client.get("/users/me", headers={"Authorization": "Bearer nonexistent"})
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Could not validate credentials"}
assert response.headers["WWW-Authenticate"] == 'Bearer scope="me"'
def test_incorrect_token_type():
response = client.get(
"/users/me", headers={"Authorization": "Notexistent testtoken"}
)
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "Bearer"
def test_verify_password():
assert verify_password("secret", fake_users_db["johndoe"]["hashed_password"])
def test_get_password_hash():
assert get_password_hash("secretalice")
def test_create_access_token():
access_token = create_access_token(data={"data": "foo"})
assert access_token
def test_token_no_sub():
response = client.get(
"/users/me",
headers={
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhIjoiZm9vIn0.9ynBhuYb4e6aW3oJr_K_TBgwcMTDpRToQIE25L57rOE"
},
)
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Could not validate credentials"}
assert response.headers["WWW-Authenticate"] == 'Bearer scope="me"'
def test_token_no_username():
response = client.get(
"/users/me",
headers={
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJmb28ifQ.NnExK_dlNAYyzACrXtXDrcWOgGY2JuPbI4eDaHdfK5Y"
},
)
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Could not validate credentials"}
assert response.headers["WWW-Authenticate"] == 'Bearer scope="me"'
def test_token_no_scope():
access_token = get_access_token()
response = client.get(
"/users/me", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not enough permissions"}
assert response.headers["WWW-Authenticate"] == 'Bearer scope="me"'
def test_token_nonexistent_user():
response = client.get(
"/users/me",
headers={
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJ1c2VybmFtZTpib2IifQ.HcfCW67Uda-0gz54ZWTqmtgJnZeNem0Q757eTa9EZuw"
},
)
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Could not validate credentials"}
assert response.headers["WWW-Authenticate"] == 'Bearer scope="me"'
def test_token_inactive_user():
access_token = get_access_token(
username="alice", password="secretalice", scope="me"
)
response = client.get(
"/users/me", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 400, response.text
assert response.json() == {"detail": "Inactive user"}
def test_read_items():
access_token = get_access_token(scope="me items")
response = client.get(
"/users/me/items/", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 200, response.text
assert response.json() == [{"item_id": "Foo", "owner": "johndoe"}]
def test_read_system_status():
access_token = get_access_token()
response = client.get(
"/status/", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 200, response.text
assert response.json() == {"status": "ok"}
def test_read_system_status_no_token():
response = client.get("/status/")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "Bearer"
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/token": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Token"}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Login For Access Token",
"operationId": "login_for_access_token_token_post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"$ref": "#/components/schemas/Body_login_for_access_token_token_post"
}
}
},
"required": True,
},
}
},
"/users/me/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/User"}
}
},
}
},
"summary": "Read Users Me",
"operationId": "read_users_me_users_me__get",
"security": [{"OAuth2PasswordBearer": ["me"]}],
}
},
"/users/me/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Own Items",
"operationId": "read_own_items_users_me_items__get",
"security": [{"OAuth2PasswordBearer": ["items", "me"]}],
}
},
"/status/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read System Status",
"operationId": "read_system_status_status__get",
"security": [{"OAuth2PasswordBearer": []}],
}
},
},
"components": {
"schemas": {
"User": {
"title": "User",
"required": IsOneOf(
["username", "email", "full_name", "disabled"],
# TODO: remove when deprecating Pydantic v1
["username"],
),
"type": "object",
"properties": {
"username": {"title": "Username", "type": "string"},
"email": IsDict(
{
"title": "Email",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Email", "type": "string"}
),
"full_name": IsDict(
{
"title": "Full Name",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Full Name", "type": "string"}
),
"disabled": IsDict(
{
"title": "Disabled",
"anyOf": [{"type": "boolean"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Disabled", "type": "boolean"}
),
},
},
"Token": {
"title": "Token",
"required": ["access_token", "token_type"],
"type": "object",
"properties": {
"access_token": {"title": "Access Token", "type": "string"},
"token_type": {"title": "Token Type", "type": "string"},
},
},
"Body_login_for_access_token_token_post": {
"title": "Body_login_for_access_token_token_post",
"required": ["username", "password"],
"type": "object",
"properties": {
"grant_type": IsDict(
{
"title": "Grant Type",
"anyOf": [
{"pattern": "password", "type": "string"},
{"type": "null"},
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"title": "Grant Type",
"pattern": "password",
"type": "string",
}
),
"username": {"title": "Username", "type": "string"},
"password": {"title": "Password", "type": "string"},
"scope": {"title": "Scope", "type": "string", "default": ""},
"client_id": IsDict(
{
"title": "Client Id",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Client Id", "type": "string"}
),
"client_secret": IsDict(
{
"title": "Client Secret",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Client Secret", "type": "string"}
),
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
},
"securitySchemes": {
"OAuth2PasswordBearer": {
"type": "oauth2",
"flows": {
"password": {
"scopes": {
"me": "Read information about the current user.",
"items": "Read items.",
},
"tokenUrl": "token",
}
},
}
},
},
}
| 545558.py | [
"CWE-798: Use of Hard-coded Credentials"
] |
"""Support for Alexa skill auth."""
import asyncio
from asyncio import timeout
from datetime import datetime, timedelta
from http import HTTPStatus
import json
import logging
from typing import Any
import aiohttp
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.storage import Store
from homeassistant.util import dt as dt_util
from .const import STORAGE_ACCESS_TOKEN, STORAGE_REFRESH_TOKEN
from .diagnostics import async_redact_lwa_params
_LOGGER = logging.getLogger(__name__)
LWA_TOKEN_URI = "https://api.amazon.com/auth/o2/token"
LWA_HEADERS = {"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"}
PREEMPTIVE_REFRESH_TTL_IN_SECONDS = 300
STORAGE_KEY = "alexa_auth"
STORAGE_VERSION = 1
STORAGE_EXPIRE_TIME = "expire_time"
class Auth:
"""Handle authentication to send events to Alexa."""
def __init__(self, hass: HomeAssistant, client_id: str, client_secret: str) -> None:
"""Initialize the Auth class."""
self.hass = hass
self.client_id = client_id
self.client_secret = client_secret
self._prefs: dict[str, Any] | None = None
self._store: Store = Store(hass, STORAGE_VERSION, STORAGE_KEY)
self._get_token_lock = asyncio.Lock()
async def async_do_auth(self, accept_grant_code: str) -> str | None:
"""Do authentication with an AcceptGrant code."""
# access token not retrieved yet for the first time, so this should
# be an access token request
lwa_params: dict[str, str] = {
"grant_type": "authorization_code",
"code": accept_grant_code,
CONF_CLIENT_ID: self.client_id,
CONF_CLIENT_SECRET: self.client_secret,
}
_LOGGER.debug(
"Calling LWA to get the access token (first time), with: %s",
json.dumps(async_redact_lwa_params(lwa_params)),
)
return await self._async_request_new_token(lwa_params)
@callback
def async_invalidate_access_token(self) -> None:
"""Invalidate access token."""
assert self._prefs is not None
self._prefs[STORAGE_ACCESS_TOKEN] = None
async def async_get_access_token(self) -> str | None:
"""Perform access token or token refresh request."""
async with self._get_token_lock:
if self._prefs is None:
await self.async_load_preferences()
assert self._prefs is not None
if self.is_token_valid():
_LOGGER.debug("Token still valid, using it")
token: str = self._prefs[STORAGE_ACCESS_TOKEN]
return token
if self._prefs[STORAGE_REFRESH_TOKEN] is None:
_LOGGER.debug("Token invalid and no refresh token available")
return None
lwa_params: dict[str, str] = {
"grant_type": "refresh_token",
"refresh_token": self._prefs[STORAGE_REFRESH_TOKEN],
CONF_CLIENT_ID: self.client_id,
CONF_CLIENT_SECRET: self.client_secret,
}
_LOGGER.debug("Calling LWA to refresh the access token")
return await self._async_request_new_token(lwa_params)
@callback
def is_token_valid(self) -> bool:
"""Check if a token is already loaded and if it is still valid."""
assert self._prefs is not None
if not self._prefs[STORAGE_ACCESS_TOKEN]:
return False
expire_time: datetime | None = dt_util.parse_datetime(
self._prefs[STORAGE_EXPIRE_TIME]
)
assert expire_time is not None
preemptive_expire_time = expire_time - timedelta(
seconds=PREEMPTIVE_REFRESH_TTL_IN_SECONDS
)
return dt_util.utcnow() < preemptive_expire_time
async def _async_request_new_token(self, lwa_params: dict[str, str]) -> str | None:
try:
session = aiohttp_client.async_get_clientsession(self.hass)
async with timeout(10):
response = await session.post(
LWA_TOKEN_URI,
headers=LWA_HEADERS,
data=lwa_params,
allow_redirects=True,
)
except (TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling LWA to get auth token")
return None
_LOGGER.debug("LWA response header: %s", response.headers)
_LOGGER.debug("LWA response status: %s", response.status)
if response.status != HTTPStatus.OK:
_LOGGER.error("Error calling LWA to get auth token")
return None
response_json = await response.json()
_LOGGER.debug("LWA response body : %s", async_redact_lwa_params(response_json))
access_token: str = response_json["access_token"]
refresh_token: str = response_json["refresh_token"]
expires_in: int = response_json["expires_in"]
expire_time = dt_util.utcnow() + timedelta(seconds=expires_in)
await self._async_update_preferences(
access_token, refresh_token, expire_time.isoformat()
)
return access_token
async def async_load_preferences(self) -> None:
"""Load preferences with stored tokens."""
self._prefs = await self._store.async_load()
if self._prefs is None:
self._prefs = {
STORAGE_ACCESS_TOKEN: None,
STORAGE_REFRESH_TOKEN: None,
STORAGE_EXPIRE_TIME: None,
}
async def _async_update_preferences(
self, access_token: str, refresh_token: str, expire_time: str
) -> None:
"""Update user preferences."""
if self._prefs is None:
await self.async_load_preferences()
assert self._prefs is not None
if access_token is not None:
self._prefs[STORAGE_ACCESS_TOKEN] = access_token
if refresh_token is not None:
self._prefs[STORAGE_REFRESH_TOKEN] = refresh_token
if expire_time is not None:
self._prefs[STORAGE_EXPIRE_TIME] = expire_time
await self._store.async_save(self._prefs)
| 695407.py | [
"CWE-532: Insertion of Sensitive Information into Log File"
] |
"""The Application Credentials integration.
This integration provides APIs for managing local OAuth credentials on behalf
of other integrations. Integrations register an authorization server, and then
the APIs are used to add one or more client credentials. Integrations may also
provide credentials from yaml for backwards compatibility.
"""
from __future__ import annotations
from dataclasses import dataclass
import logging
from typing import Any, Protocol
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DOMAIN,
CONF_ID,
CONF_NAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import collection, config_entry_oauth2_flow
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, VolDictType
from homeassistant.loader import (
IntegrationNotFound,
async_get_application_credentials,
async_get_integration,
)
from homeassistant.util import slugify
__all__ = ["ClientCredential", "AuthorizationServer", "async_import_client_credential"]
_LOGGER = logging.getLogger(__name__)
DOMAIN = "application_credentials"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
DATA_STORAGE = "storage"
CONF_AUTH_DOMAIN = "auth_domain"
DEFAULT_IMPORT_NAME = "Import from configuration.yaml"
CREATE_FIELDS: VolDictType = {
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_CLIENT_ID): vol.All(cv.string, vol.Strip),
vol.Required(CONF_CLIENT_SECRET): vol.All(cv.string, vol.Strip),
vol.Optional(CONF_AUTH_DOMAIN): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
UPDATE_FIELDS: VolDictType = {} # Not supported
CONFIG_SCHEMA = cv.empty_config_schema(DOMAIN)
@dataclass
class ClientCredential:
"""Represent an OAuth client credential."""
client_id: str
client_secret: str
name: str | None = None
@dataclass
class AuthorizationServer:
"""Represent an OAuth2 Authorization Server."""
authorize_url: str
token_url: str
class ApplicationCredentialsStorageCollection(collection.DictStorageCollection):
"""Application credential collection stored in storage."""
CREATE_SCHEMA = vol.Schema(CREATE_FIELDS)
async def _process_create_data(self, data: dict[str, str]) -> dict[str, str]:
"""Validate the config is valid."""
result = self.CREATE_SCHEMA(data)
domain = result[CONF_DOMAIN]
if not await _get_platform(self.hass, domain):
raise ValueError(f"No application_credentials platform for {domain}")
return result
@callback
def _get_suggested_id(self, info: dict[str, str]) -> str:
"""Suggest an ID based on the config."""
return f"{info[CONF_DOMAIN]}.{info[CONF_CLIENT_ID]}"
async def _update_data(
self, item: dict[str, str], update_data: dict[str, str]
) -> dict[str, str]:
"""Return a new updated data object."""
raise ValueError("Updates not supported")
async def async_delete_item(self, item_id: str) -> None:
"""Delete item, verifying credential is not in use."""
if item_id not in self.data:
raise collection.ItemNotFound(item_id)
# Cannot delete a credential currently in use by a ConfigEntry
current = self.data[item_id]
entries = self.hass.config_entries.async_entries(current[CONF_DOMAIN])
for entry in entries:
if entry.data.get("auth_implementation") == item_id:
raise HomeAssistantError(
f"Cannot delete credential in use by integration {entry.domain}"
)
await super().async_delete_item(item_id)
async def async_import_item(self, info: dict[str, str]) -> None:
"""Import an yaml credential if it does not already exist."""
suggested_id = self._get_suggested_id(info)
if self.id_manager.has_id(slugify(suggested_id)):
return
await self.async_create_item(info)
def async_client_credentials(self, domain: str) -> dict[str, ClientCredential]:
"""Return ClientCredentials in storage for the specified domain."""
credentials = {}
for item in self.async_items():
if item[CONF_DOMAIN] != domain:
continue
auth_domain = item.get(CONF_AUTH_DOMAIN, item[CONF_ID])
credentials[auth_domain] = ClientCredential(
client_id=item[CONF_CLIENT_ID],
client_secret=item[CONF_CLIENT_SECRET],
name=item.get(CONF_NAME),
)
return credentials
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Application Credentials."""
hass.data[DOMAIN] = {}
id_manager = collection.IDManager()
storage_collection = ApplicationCredentialsStorageCollection(
Store(hass, STORAGE_VERSION, STORAGE_KEY),
id_manager,
)
await storage_collection.async_load()
hass.data[DOMAIN][DATA_STORAGE] = storage_collection
collection.DictStorageCollectionWebsocket(
storage_collection, DOMAIN, DOMAIN, CREATE_FIELDS, UPDATE_FIELDS
).async_setup(hass)
websocket_api.async_register_command(hass, handle_integration_list)
websocket_api.async_register_command(hass, handle_config_entry)
config_entry_oauth2_flow.async_add_implementation_provider(
hass, DOMAIN, _async_provide_implementation
)
return True
async def async_import_client_credential(
hass: HomeAssistant,
domain: str,
credential: ClientCredential,
auth_domain: str | None = None,
) -> None:
"""Import an existing credential from configuration.yaml."""
if DOMAIN not in hass.data:
raise ValueError("Integration 'application_credentials' not setup")
storage_collection = hass.data[DOMAIN][DATA_STORAGE]
item = {
CONF_DOMAIN: domain,
CONF_CLIENT_ID: credential.client_id,
CONF_CLIENT_SECRET: credential.client_secret,
CONF_AUTH_DOMAIN: auth_domain if auth_domain else domain,
}
item[CONF_NAME] = credential.name if credential.name else DEFAULT_IMPORT_NAME
await storage_collection.async_import_item(item)
class AuthImplementation(config_entry_oauth2_flow.LocalOAuth2Implementation):
"""Application Credentials local oauth2 implementation."""
def __init__(
self,
hass: HomeAssistant,
auth_domain: str,
credential: ClientCredential,
authorization_server: AuthorizationServer,
) -> None:
"""Initialize AuthImplementation."""
super().__init__(
hass,
auth_domain,
credential.client_id,
credential.client_secret,
authorization_server.authorize_url,
authorization_server.token_url,
)
self._name = credential.name
@property
def name(self) -> str:
"""Name of the implementation."""
return self._name or self.client_id
async def _async_provide_implementation(
hass: HomeAssistant, domain: str
) -> list[config_entry_oauth2_flow.AbstractOAuth2Implementation]:
"""Return registered OAuth implementations."""
platform = await _get_platform(hass, domain)
if not platform:
return []
storage_collection = hass.data[DOMAIN][DATA_STORAGE]
credentials = storage_collection.async_client_credentials(domain)
if hasattr(platform, "async_get_auth_implementation"):
return [
await platform.async_get_auth_implementation(hass, auth_domain, credential)
for auth_domain, credential in credentials.items()
]
authorization_server = await platform.async_get_authorization_server(hass)
return [
AuthImplementation(hass, auth_domain, credential, authorization_server)
for auth_domain, credential in credentials.items()
]
async def _async_config_entry_app_credentials(
hass: HomeAssistant,
config_entry: ConfigEntry,
) -> str | None:
"""Return the item id of an application credential for an existing ConfigEntry."""
if not await _get_platform(hass, config_entry.domain) or not (
auth_domain := config_entry.data.get("auth_implementation")
):
return None
storage_collection = hass.data[DOMAIN][DATA_STORAGE]
for item in storage_collection.async_items():
item_id = item[CONF_ID]
if (
item[CONF_DOMAIN] == config_entry.domain
and item.get(CONF_AUTH_DOMAIN, item_id) == auth_domain
):
return item_id
return None
class ApplicationCredentialsProtocol(Protocol):
"""Define the format that application_credentials platforms may have.
Most platforms typically just implement async_get_authorization_server, and
the default oauth implementation will be used. Otherwise a platform may
implement async_get_auth_implementation to give their use a custom
AbstractOAuth2Implementation.
"""
async def async_get_authorization_server(
self, hass: HomeAssistant
) -> AuthorizationServer:
"""Return authorization server, for the default auth implementation."""
async def async_get_auth_implementation(
self, hass: HomeAssistant, auth_domain: str, credential: ClientCredential
) -> config_entry_oauth2_flow.AbstractOAuth2Implementation:
"""Return a custom auth implementation."""
async def async_get_description_placeholders(
self, hass: HomeAssistant
) -> dict[str, str]:
"""Return description placeholders for the credentials dialog."""
async def _get_platform(
hass: HomeAssistant, integration_domain: str
) -> ApplicationCredentialsProtocol | None:
"""Register an application_credentials platform."""
try:
integration = await async_get_integration(hass, integration_domain)
except IntegrationNotFound as err:
_LOGGER.debug("Integration '%s' does not exist: %s", integration_domain, err)
return None
try:
platform = await integration.async_get_platform("application_credentials")
except ImportError as err:
_LOGGER.debug(
"Integration '%s' does not provide application_credentials: %s",
integration_domain,
err,
)
return None
if not hasattr(platform, "async_get_authorization_server") and not hasattr(
platform, "async_get_auth_implementation"
):
raise ValueError(
f"Integration '{integration_domain}' platform {DOMAIN} did not implement"
" 'async_get_authorization_server' or 'async_get_auth_implementation'"
)
return platform
async def _async_integration_config(hass: HomeAssistant, domain: str) -> dict[str, Any]:
platform = await _get_platform(hass, domain)
if platform and hasattr(platform, "async_get_description_placeholders"):
placeholders = await platform.async_get_description_placeholders(hass)
return {"description_placeholders": placeholders}
return {}
@websocket_api.websocket_command(
{vol.Required("type"): "application_credentials/config"}
)
@websocket_api.async_response
async def handle_integration_list(
hass: HomeAssistant, connection: ActiveConnection, msg: dict[str, Any]
) -> None:
"""Handle integrations command."""
domains = await async_get_application_credentials(hass)
result = {
"domains": domains,
"integrations": {
domain: await _async_integration_config(hass, domain) for domain in domains
},
}
connection.send_result(msg["id"], result)
@websocket_api.websocket_command(
{
vol.Required("type"): "application_credentials/config_entry",
vol.Required("config_entry_id"): str,
}
)
@websocket_api.async_response
async def handle_config_entry(
hass: HomeAssistant, connection: ActiveConnection, msg: dict[str, Any]
) -> None:
"""Return application credentials information for a config entry."""
entry_id = msg["config_entry_id"]
config_entry = hass.config_entries.async_get_entry(entry_id)
if not config_entry:
connection.send_error(
msg["id"],
"invalid_config_entry_id",
f"Config entry not found: {entry_id}",
)
return
result = {}
if application_credentials_id := await _async_config_entry_app_credentials(
hass, config_entry
):
result["application_credentials_id"] = application_credentials_id
connection.send_result(msg["id"], result)
| 549500.py | [
"CWE-532: Insertion of Sensitive Information into Log File"
] |
"""AWS platform for notify component."""
from __future__ import annotations
import asyncio
import base64
import json
import logging
from typing import Any
from aiobotocore.session import AioSession
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import (
CONF_NAME,
CONF_PLATFORM,
CONF_PROFILE_NAME,
CONF_SERVICE,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import CONF_CONTEXT, CONF_CREDENTIAL_NAME, CONF_REGION, DATA_SESSIONS
_LOGGER = logging.getLogger(__name__)
async def get_available_regions(hass, service):
"""Get available regions for a service."""
session = AioSession()
return await session.get_available_regions(service)
async def async_get_service(
hass: HomeAssistant,
config: ConfigType,
discovery_info: DiscoveryInfoType | None = None,
) -> AWSNotify | None:
"""Get the AWS notification service."""
if discovery_info is None:
_LOGGER.error("Please config aws notify platform in aws component")
return None
session = None
conf = discovery_info
service = conf[CONF_SERVICE]
region_name = conf[CONF_REGION]
available_regions = await get_available_regions(hass, service)
if region_name not in available_regions:
_LOGGER.error(
"Region %s is not available for %s service, must in %s",
region_name,
service,
available_regions,
)
return None
aws_config = conf.copy()
del aws_config[CONF_SERVICE]
del aws_config[CONF_REGION]
if CONF_PLATFORM in aws_config:
del aws_config[CONF_PLATFORM]
if CONF_NAME in aws_config:
del aws_config[CONF_NAME]
if CONF_CONTEXT in aws_config:
del aws_config[CONF_CONTEXT]
if not aws_config:
# no platform config, use the first aws component credential instead
if hass.data[DATA_SESSIONS]:
session = next(iter(hass.data[DATA_SESSIONS].values()))
else:
_LOGGER.error("Missing aws credential for %s", config[CONF_NAME])
return None
if session is None:
credential_name = aws_config.get(CONF_CREDENTIAL_NAME)
if credential_name is not None:
session = hass.data[DATA_SESSIONS].get(credential_name)
if session is None:
_LOGGER.warning("No available aws session for %s", credential_name)
del aws_config[CONF_CREDENTIAL_NAME]
if session is None:
if (profile := aws_config.get(CONF_PROFILE_NAME)) is not None:
session = AioSession(profile=profile)
del aws_config[CONF_PROFILE_NAME]
else:
session = AioSession()
aws_config[CONF_REGION] = region_name
if service == "lambda":
context_str = json.dumps(
{"custom": conf.get(CONF_CONTEXT, {})}, cls=JSONEncoder
)
context_b64 = base64.b64encode(context_str.encode("utf-8"))
context = context_b64.decode("utf-8")
return AWSLambda(session, aws_config, context)
if service == "sns":
return AWSSNS(session, aws_config)
if service == "sqs":
return AWSSQS(session, aws_config)
if service == "events":
return AWSEventBridge(session, aws_config)
# should not reach here since service was checked in schema
return None
class AWSNotify(BaseNotificationService):
"""Implement the notification service for the AWS service."""
def __init__(self, session, aws_config):
"""Initialize the service."""
self.session = session
self.aws_config = aws_config
class AWSLambda(AWSNotify):
"""Implement the notification service for the AWS Lambda service."""
service = "lambda"
def __init__(self, session, aws_config, context):
"""Initialize the service."""
super().__init__(session, aws_config)
self.context = context
async def async_send_message(self, message: str = "", **kwargs: Any) -> None:
"""Send notification to specified LAMBDA ARN."""
if not kwargs.get(ATTR_TARGET):
_LOGGER.error("At least one target is required")
return
cleaned_kwargs = {k: v for k, v in kwargs.items() if v is not None}
payload = {"message": message}
payload.update(cleaned_kwargs)
json_payload = json.dumps(payload)
async with self.session.create_client(
self.service, **self.aws_config
) as client:
tasks = [
client.invoke(
FunctionName=target,
Payload=json_payload,
ClientContext=self.context,
)
for target in kwargs.get(ATTR_TARGET, [])
]
if tasks:
await asyncio.gather(*tasks)
class AWSSNS(AWSNotify):
"""Implement the notification service for the AWS SNS service."""
service = "sns"
async def async_send_message(self, message: str = "", **kwargs: Any) -> None:
"""Send notification to specified SNS ARN."""
if not kwargs.get(ATTR_TARGET):
_LOGGER.error("At least one target is required")
return
message_attributes = {}
if data := kwargs.get(ATTR_DATA):
message_attributes = {
k: {"StringValue": v, "DataType": "String"}
for k, v in data.items()
if v is not None
}
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
async with self.session.create_client(
self.service, **self.aws_config
) as client:
tasks = [
client.publish(
TargetArn=target,
Message=message,
Subject=subject,
MessageAttributes=message_attributes,
)
for target in kwargs.get(ATTR_TARGET, [])
]
if tasks:
await asyncio.gather(*tasks)
class AWSSQS(AWSNotify):
"""Implement the notification service for the AWS SQS service."""
service = "sqs"
async def async_send_message(self, message: str = "", **kwargs: Any) -> None:
"""Send notification to specified SQS ARN."""
if not kwargs.get(ATTR_TARGET):
_LOGGER.error("At least one target is required")
return
cleaned_kwargs = {k: v for k, v in kwargs.items() if v is not None}
message_body = {"message": message}
message_body.update(cleaned_kwargs)
json_body = json.dumps(message_body)
message_attributes = {}
for key, val in cleaned_kwargs.items():
message_attributes[key] = {
"StringValue": json.dumps(val),
"DataType": "String",
}
async with self.session.create_client(
self.service, **self.aws_config
) as client:
tasks = [
client.send_message(
QueueUrl=target,
MessageBody=json_body,
MessageAttributes=message_attributes,
)
for target in kwargs.get(ATTR_TARGET, [])
]
if tasks:
await asyncio.gather(*tasks)
class AWSEventBridge(AWSNotify):
"""Implement the notification service for the AWS EventBridge service."""
service = "events"
async def async_send_message(self, message: str = "", **kwargs: Any) -> None:
"""Send notification to specified EventBus."""
cleaned_kwargs = {k: v for k, v in kwargs.items() if v is not None}
data = cleaned_kwargs.get(ATTR_DATA, {})
detail = (
json.dumps(data["detail"])
if "detail" in data
else json.dumps({"message": message})
)
async with self.session.create_client(
self.service, **self.aws_config
) as client:
entries = []
for target in kwargs.get(ATTR_TARGET, [None]):
entry = {
"Source": data.get("source", "homeassistant"),
"Resources": data.get("resources", []),
"Detail": detail,
"DetailType": data.get("detail_type", ""),
}
if target:
entry["EventBusName"] = target
entries.append(entry)
tasks = [
client.put_events(Entries=entries[i : min(i + 10, len(entries))])
for i in range(0, len(entries), 10)
]
if tasks:
results = await asyncio.gather(*tasks)
for result in results:
for entry in result["Entries"]:
if len(entry.get("EventId", "")) == 0:
_LOGGER.error(
"Failed to send event: ErrorCode=%s ErrorMessage=%s",
entry["ErrorCode"],
entry["ErrorMessage"],
)
| 804802.py | [
"CWE-532: Insertion of Sensitive Information into Log File"
] |
from transformers import AutoModel, AutoTokenizer
import time
import os
import json
import threading
import importlib
from toolbox import update_ui, get_conf
from multiprocessing import Process, Pipe
load_message = "ChatGLMFT尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLMFT消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
def string_to_options(arguments):
import argparse
import shlex
# Create an argparse.ArgumentParser instance
parser = argparse.ArgumentParser()
# Add command-line arguments
parser.add_argument("--llm_to_learn", type=str, help="LLM model to learn", default="gpt-3.5-turbo")
parser.add_argument("--prompt_prefix", type=str, help="Prompt prefix", default='')
parser.add_argument("--system_prompt", type=str, help="System prompt", default='')
parser.add_argument("--batch", type=int, help="System prompt", default=50)
# Parse the arguments
args = parser.parse_args(shlex.split(arguments))
return args
#################################################################################
class GetGLMFTHandle(Process):
def __init__(self):
super().__init__(daemon=True)
self.parent, self.child = Pipe()
self.chatglmft_model = None
self.chatglmft_tokenizer = None
self.info = ""
self.success = True
self.check_dependency()
self.start()
self.threadLock = threading.Lock()
def check_dependency(self):
try:
import sentencepiece
self.info = "依赖检测通过"
self.success = True
except:
self.info = "缺少ChatGLMFT的依赖,如果要使用ChatGLMFT,除了基础的pip依赖以外,您还需要运行`pip install -r request_llms/requirements_chatglm.txt`安装ChatGLM的依赖。"
self.success = False
def ready(self):
return self.chatglmft_model is not None
def run(self):
# 子进程执行
# 第一次运行,加载参数
retry = 0
while True:
try:
if self.chatglmft_model is None:
from transformers import AutoConfig
import torch
# conf = 'request_llms/current_ptune_model.json'
# if not os.path.exists(conf): raise RuntimeError('找不到微调模型信息')
# with open(conf, 'r', encoding='utf8') as f:
# model_args = json.loads(f.read())
CHATGLM_PTUNING_CHECKPOINT = get_conf('CHATGLM_PTUNING_CHECKPOINT')
assert os.path.exists(CHATGLM_PTUNING_CHECKPOINT), "找不到微调模型检查点"
conf = os.path.join(CHATGLM_PTUNING_CHECKPOINT, "config.json")
with open(conf, 'r', encoding='utf8') as f:
model_args = json.loads(f.read())
if 'model_name_or_path' not in model_args:
model_args['model_name_or_path'] = model_args['_name_or_path']
self.chatglmft_tokenizer = AutoTokenizer.from_pretrained(
model_args['model_name_or_path'], trust_remote_code=True)
config = AutoConfig.from_pretrained(
model_args['model_name_or_path'], trust_remote_code=True)
config.pre_seq_len = model_args['pre_seq_len']
config.prefix_projection = model_args['prefix_projection']
print(f"Loading prefix_encoder weight from {CHATGLM_PTUNING_CHECKPOINT}")
model = AutoModel.from_pretrained(model_args['model_name_or_path'], config=config, trust_remote_code=True)
prefix_state_dict = torch.load(os.path.join(CHATGLM_PTUNING_CHECKPOINT, "pytorch_model.bin"))
new_prefix_state_dict = {}
for k, v in prefix_state_dict.items():
if k.startswith("transformer.prefix_encoder."):
new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v
model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict)
if model_args['quantization_bit'] is not None and model_args['quantization_bit'] != 0:
print(f"Quantized to {model_args['quantization_bit']} bit")
model = model.quantize(model_args['quantization_bit'])
model = model.cuda()
if model_args['pre_seq_len'] is not None:
# P-tuning v2
model.transformer.prefix_encoder.float()
self.chatglmft_model = model.eval()
break
else:
break
except Exception as e:
retry += 1
if retry > 3:
self.child.send('[Local Message] Call ChatGLMFT fail 不能正常加载ChatGLMFT的参数。')
raise RuntimeError("不能正常加载ChatGLMFT的参数!")
while True:
# 进入任务等待状态
kwargs = self.child.recv()
# 收到消息,开始请求
try:
for response, history in self.chatglmft_model.stream_chat(self.chatglmft_tokenizer, **kwargs):
self.child.send(response)
# # 中途接收可能的终止指令(如果有的话)
# if self.child.poll():
# command = self.child.recv()
# if command == '[Terminate]': break
except:
from toolbox import trimmed_format_exc
self.child.send('[Local Message] Call ChatGLMFT fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
# 请求处理结束,开始下一个循环
self.child.send('[Finish]')
def stream_chat(self, **kwargs):
# 主进程执行
self.threadLock.acquire()
self.parent.send(kwargs)
while True:
res = self.parent.recv()
if res != '[Finish]':
yield res
else:
break
self.threadLock.release()
global glmft_handle
glmft_handle = None
#################################################################################
def predict_no_ui_long_connection(inputs:str, llm_kwargs:dict, history:list=[], sys_prompt:str="",
observe_window:list=[], console_slience:bool=False):
"""
多线程方法
函数的说明请见 request_llms/bridge_all.py
"""
global glmft_handle
if glmft_handle is None:
glmft_handle = GetGLMFTHandle()
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glmft_handle.info
if not glmft_handle.success:
error = glmft_handle.info
glmft_handle = None
raise RuntimeError(error)
# chatglmft 没有 sys_prompt 接口,因此把prompt加入 history
history_feedin = []
history_feedin.append(["What can I do?", sys_prompt])
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
response = ""
for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
if len(observe_window) >= 1: observe_window[0] = response
if len(observe_window) >= 2:
if (time.time()-observe_window[1]) > watch_dog_patience:
raise RuntimeError("程序终止。")
return response
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
"""
单线程方法
函数的说明请见 request_llms/bridge_all.py
"""
chatbot.append((inputs, ""))
global glmft_handle
if glmft_handle is None:
glmft_handle = GetGLMFTHandle()
chatbot[-1] = (inputs, load_message + "\n\n" + glmft_handle.info)
yield from update_ui(chatbot=chatbot, history=[])
if not glmft_handle.success:
glmft_handle = None
return
if additional_fn is not None:
from core_functional import handle_core_functionality
inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
# 处理历史信息
history_feedin = []
history_feedin.append(["What can I do?", system_prompt] )
for i in range(len(history)//2):
history_feedin.append([history[2*i], history[2*i+1]] )
# 开始接收chatglmft的回复
response = "[Local Message] 等待ChatGLMFT响应中 ..."
for response in glmft_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)
# 总结输出
if response == "[Local Message] 等待ChatGLMFT响应中 ...":
response = "[Local Message] ChatGLMFT响应异常 ..."
history.extend([inputs, response])
yield from update_ui(chatbot=chatbot, history=history)
| 458056.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# -*- coding: utf-8 -*-
# Copyright: (c) 2022, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Signature verification helpers."""
from __future__ import annotations
from ansible.errors import AnsibleError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils.urls import open_url
import contextlib
import inspect
import os
import subprocess
import typing as t
from dataclasses import dataclass, fields as dc_fields
from urllib.error import HTTPError, URLError
if t.TYPE_CHECKING:
from ansible.utils.display import Display
def get_signature_from_source(source, display=None): # type: (str, t.Optional[Display]) -> str
if display is not None:
display.vvvv(f"Using signature at {source}")
try:
with open_url(
source,
http_agent=user_agent(),
validate_certs=True,
follow_redirects='safe'
) as resp:
signature = resp.read()
except (HTTPError, URLError) as e:
raise AnsibleError(
f"Failed to get signature for collection verification from '{source}': {e}"
) from e
return signature
def run_gpg_verify(
manifest_file, # type: str
signature, # type: str
keyring, # type: str
display, # type: Display
): # type: (...) -> tuple[str, int]
status_fd_read, status_fd_write = os.pipe()
# running the gpg command will create the keyring if it does not exist
remove_keybox = not os.path.exists(keyring)
cmd = [
'gpg',
f'--status-fd={status_fd_write}',
'--verify',
'--batch',
'--no-tty',
'--no-default-keyring',
f'--keyring={keyring}',
'-',
manifest_file,
]
cmd_str = ' '.join(cmd)
display.vvvv(f"Running command '{cmd}'")
try:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
pass_fds=(status_fd_write,),
encoding='utf8',
)
except (FileNotFoundError, subprocess.SubprocessError) as err:
raise AnsibleError(
f"Failed during GnuPG verification with command '{cmd_str}': {err}"
) from err
else:
stdout, stderr = p.communicate(input=signature)
finally:
os.close(status_fd_write)
if remove_keybox:
with contextlib.suppress(OSError):
os.remove(keyring)
with os.fdopen(status_fd_read) as f:
stdout = f.read()
display.vvvv(
f"stdout: \n{stdout}\nstderr: \n{stderr}\n(exit code {p.returncode})"
)
return stdout, p.returncode
def parse_gpg_errors(status_out): # type: (str) -> t.Iterator[GpgBaseError]
for line in status_out.splitlines():
if not line:
continue
try:
_dummy, status, remainder = line.split(maxsplit=2)
except ValueError:
_dummy, status = line.split(maxsplit=1)
remainder = None
try:
cls = GPG_ERROR_MAP[status]
except KeyError:
continue
fields = [status]
if remainder:
fields.extend(
remainder.split(
None,
len(dc_fields(cls)) - 2
)
)
yield cls(*fields)
@dataclass(frozen=True, slots=True)
class GpgBaseError(Exception):
status: str
@classmethod
def get_gpg_error_description(cls) -> str:
"""Return the current class description."""
return ' '.join(cls.__doc__.split())
def __post_init__(self):
for field_name, field_type in inspect.get_annotations(type(self), eval_str=True).items():
super(GpgBaseError, self).__setattr__(field_name, field_type(getattr(self, field_name)))
@dataclass(frozen=True, slots=True)
class GpgExpSig(GpgBaseError):
"""The signature with the keyid is good, but the signature is expired."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgExpKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by an expired key."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgRevKeySig(GpgBaseError):
"""The signature with the keyid is good, but the signature was made by a revoked key."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgBadSig(GpgBaseError):
"""The signature with the keyid has not been verified okay."""
keyid: str
username: str
@dataclass(frozen=True, slots=True)
class GpgErrSig(GpgBaseError):
""""It was not possible to check the signature. This may be caused by
a missing public key or an unsupported algorithm. A RC of 4
indicates unknown algorithm, a 9 indicates a missing public
key.
"""
keyid: str
pkalgo: int
hashalgo: int
sig_class: str
time: int
rc: int
fpr: str
@dataclass(frozen=True, slots=True)
class GpgNoPubkey(GpgBaseError):
"""The public key is not available."""
keyid: str
@dataclass(frozen=True, slots=True)
class GpgMissingPassPhrase(GpgBaseError):
"""No passphrase was supplied."""
@dataclass(frozen=True, slots=True)
class GpgBadPassphrase(GpgBaseError):
"""The supplied passphrase was wrong or not given."""
keyid: str
@dataclass(frozen=True, slots=True)
class GpgNoData(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
- 2 :: Expected a packet but did not find one.
- 3 :: Invalid packet found, this may indicate a non OpenPGP
message.
- 4 :: Signature expected but not found.
"""
what: str
@dataclass(frozen=True, slots=True)
class GpgUnexpected(GpgBaseError):
"""No data has been found. Codes for WHAT are:
- 1 :: No armored data.
- 2 :: Expected a packet but did not find one.
- 3 :: Invalid packet found, this may indicate a non OpenPGP
message.
- 4 :: Signature expected but not found.
"""
what: str
@dataclass(frozen=True, slots=True)
class GpgError(GpgBaseError):
"""This is a generic error status message, it might be followed by error location specific data."""
location: str
code: int
more: str = ""
@dataclass(frozen=True, slots=True)
class GpgFailure(GpgBaseError):
"""This is the counterpart to SUCCESS and used to indicate a program failure."""
location: str
code: int
@dataclass(frozen=True, slots=True)
class GpgBadArmor(GpgBaseError):
"""The ASCII armor is corrupted."""
@dataclass(frozen=True, slots=True)
class GpgKeyExpired(GpgBaseError):
"""The key has expired."""
timestamp: int
@dataclass(frozen=True, slots=True)
class GpgKeyRevoked(GpgBaseError):
"""The used key has been revoked by its owner."""
@dataclass(frozen=True, slots=True)
class GpgNoSecKey(GpgBaseError):
"""The secret key is not available."""
keyid: str
GPG_ERROR_MAP = {
'EXPSIG': GpgExpSig,
'EXPKEYSIG': GpgExpKeySig,
'REVKEYSIG': GpgRevKeySig,
'BADSIG': GpgBadSig,
'ERRSIG': GpgErrSig,
'NO_PUBKEY': GpgNoPubkey,
'MISSING_PASSPHRASE': GpgMissingPassPhrase,
'BAD_PASSPHRASE': GpgBadPassphrase,
'NODATA': GpgNoData,
'UNEXPECTED': GpgUnexpected,
'ERROR': GpgError,
'FAILURE': GpgFailure,
'BADARMOR': GpgBadArmor,
'KEYEXPIRED': GpgKeyExpired,
'KEYREVOKED': GpgKeyRevoked,
'NO_SECKEY': GpgNoSecKey,
}
| 539416.py | [
"Unknown"
] |
# (c) 2013, Jayson Vantuyl <[email protected]>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import annotations
DOCUMENTATION = """
name: sequence
author: Jayson Vantuyl (!UNKNOWN) <[email protected]>
version_added: "1.0"
short_description: generate a list based on a number sequence
description:
- generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
to increment the sequence, and an optional printf-style format string.
- 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
- 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
- Starting at version 1.9.2, negative strides are allowed.
- Generated items are strings. Use Jinja2 filters to convert items to preferred type, e.g. C({{ 1 + item|int }}).
- See also Jinja2 C(range) filter as an alternative.
options:
start:
description: number at which to start the sequence
default: 1
type: integer
end:
description: number at which to end the sequence, dont use this with count
type: integer
count:
description: number of elements in the sequence, this is not to be used with end
type: integer
stride:
description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
type: integer
default: 1
format:
description: return a string with the generated number formatted in
default: "%d"
"""
EXAMPLES = """
- name: create some test users
ansible.builtin.user:
name: "{{ item }}"
state: present
groups: "evens"
with_sequence: start=0 end=32 format=testuser%02x
- name: create a series of directories with even numbers for some reason
ansible.builtin.file:
dest: "/var/stuff/{{ item }}"
state: directory
with_sequence: start=4 end=16 stride=2
- name: a simpler way to use the sequence plugin create 4 groups
ansible.builtin.group:
name: "group{{ item }}"
state: present
with_sequence: count=4
- name: the final countdown
ansible.builtin.debug:
msg: "{{item}} seconds to detonation"
with_sequence: start=10 end=0 stride=-1
- name: Use of variable
ansible.builtin.debug:
msg: "{{ item }}"
with_sequence: start=1 end="{{ end_at }}"
vars:
- end_at: 10
"""
RETURN = """
_list:
description:
- A list containing generated sequence of items
type: list
elements: str
"""
from re import compile as re_compile, IGNORECASE
from ansible.errors import AnsibleError
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
FIELDS = frozenset(('start', 'end', 'stride', 'count', 'format'))
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in FIELDS:
value = args.pop(arg, None)
if value is not None:
self.set_option(arg, value)
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %s"
% list(args.keys())
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
dummy, start, end, dummy, stride, dummy, format = match.groups()
for key in FIELDS:
value = locals().get(key, None)
if value is not None:
self.set_option(key, value)
return True
def set_fields(self):
for f in FIELDS:
setattr(self, f, self.get_option(f))
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError("must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError("can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = range(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % (i, self.format)
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
# set defaults/global
self.set_options(direct=kwargs)
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.set_fields()
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
| 763767.py | [
"CWE-96: Improper Neutralization of Directives in Statically Saved Code ('Static Code Injection')"
] |
"""Sanity test using validate-modules."""
from __future__ import annotations
import collections
import contextlib
import json
import os
import tarfile
import typing as t
from . import (
DOCUMENTABLE_PLUGINS,
MULTI_FILE_PLUGINS,
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SanityTargets,
SANITY_ROOT,
)
from ...io import (
make_dirs,
)
from ...test import (
TestResult,
)
from ...target import (
TestTarget,
)
from ...util import (
SubprocessError,
display,
)
from ...util_common import (
ExitHandler,
process_scoped_temporary_directory,
run_command,
ResultType,
)
from ...ansible_util import (
ansible_environment,
get_collection_detail,
CollectionDetailError,
)
from ...config import (
SanityConfig,
)
from ...ci import (
get_ci_provider,
)
from ...data import (
data_context,
PayloadConfig,
)
from ...host_configs import (
PythonConfig,
)
from ...git import (
Git,
)
from ...provider.source import (
SourceProvider as GitSourceProvider,
)
class ValidateModulesTest(SanitySingleVersion):
"""Sanity test using validate-modules."""
def __init__(self) -> None:
super().__init__()
self.optional_error_codes.update([
'deprecated-date',
])
self._prefixes = {
plugin_type: plugin_path + '/'
for plugin_type, plugin_path in data_context().content.plugin_paths.items()
if plugin_type in DOCUMENTABLE_PLUGINS
}
self._exclusions = set()
if not data_context().content.collection:
self._exclusions.add('lib/ansible/plugins/cache/base.py')
@property
def error_code(self) -> t.Optional[str]:
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'A100'
def get_plugin_type(self, target: TestTarget) -> t.Optional[str]:
"""Return the plugin type of the given target, or None if it is not a plugin or module."""
if target.path.endswith('/__init__.py'):
return None
if target.path in self._exclusions:
return None
for plugin_type, prefix in self._prefixes.items():
if target.path.startswith(prefix):
return plugin_type
return None
def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if self.get_plugin_type(target) is not None]
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
env = ansible_environment(args, color=False)
settings = self.load_processor(args)
target_per_type = collections.defaultdict(list)
for target in targets.include:
target_per_type[self.get_plugin_type(target)].append(target)
# Remove plugins that cannot be associated to a single file (test and filter plugins).
for plugin_type in MULTI_FILE_PLUGINS:
target_per_type.pop(plugin_type, None)
cmd = [
python.path,
os.path.join(SANITY_ROOT, 'validate-modules', 'validate.py'),
'--format', 'json',
'--arg-spec',
] # fmt: skip
if data_context().content.collection:
cmd.extend(['--collection', data_context().content.collection.directory])
try:
collection_detail = get_collection_detail(python)
if collection_detail.version:
cmd.extend(['--collection-version', collection_detail.version])
else:
display.warning('Skipping validate-modules collection version checks since no collection version was found.')
except CollectionDetailError as ex:
display.warning('Skipping validate-modules collection version checks since collection detail loading failed: %s' % ex.reason)
else:
path = self.get_archive_path(args)
if os.path.exists(path):
temp_dir = process_scoped_temporary_directory(args)
with tarfile.open(path) as file:
# deprecated: description='extractall fallback without filter' python_version='3.11'
if hasattr(tarfile, 'data_filter'):
file.extractall(temp_dir, filter='data') # type: ignore[call-arg]
else:
file.extractall(temp_dir)
cmd.extend([
'--original-plugins', temp_dir,
])
errors = []
for plugin_type, plugin_targets in sorted(target_per_type.items()):
paths = [target.path for target in plugin_targets]
plugin_cmd = list(cmd)
if plugin_type != 'modules':
plugin_cmd += ['--plugin-type', plugin_type]
plugin_cmd += paths
try:
stdout, stderr = run_command(args, plugin_cmd, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status not in (0, 3):
raise SubprocessError(cmd=plugin_cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
continue
messages = json.loads(stdout)
for filename in messages:
output = messages[filename]
for item in output['errors']:
errors.append(SanityMessage(
path=filename,
line=int(item['line']) if 'line' in item else 0,
column=int(item['column']) if 'column' in item else 0,
code='%s' % item['code'],
message=item['msg'],
))
all_paths = [target.path for target in targets.include]
all_errors = settings.process_errors(errors, all_paths)
if args.explain:
return SanitySuccess(self.name)
if all_errors:
return SanityFailure(self.name, messages=all_errors)
return SanitySuccess(self.name)
def origin_hook(self, args: SanityConfig) -> None:
"""This method is called on the origin, before the test runs or delegation occurs."""
if not data_context().content.is_ansible:
return
if not isinstance(data_context().source_provider, GitSourceProvider):
display.warning('The validate-modules sanity test cannot compare against the base commit because git is not being used.')
return
base_commit = args.base_branch or get_ci_provider().get_base_commit(args)
if not base_commit:
display.warning('The validate-modules sanity test cannot compare against the base commit because it was not detected.')
return
path = self.get_archive_path(args)
def cleanup() -> None:
"""Cleanup callback called when the process exits."""
with contextlib.suppress(FileNotFoundError):
os.unlink(path)
def git_callback(payload_config: PayloadConfig) -> None:
"""Include the previous plugin content archive in the payload."""
files = payload_config.files
files.append((path, os.path.relpath(path, data_context().content.root)))
ExitHandler.register(cleanup)
data_context().register_payload_callback(git_callback)
make_dirs(os.path.dirname(path))
git = Git()
git.run_git(['archive', '--output', path, base_commit, 'lib/ansible/modules/', 'lib/ansible/plugins/'])
@staticmethod
def get_archive_path(args: SanityConfig) -> str:
"""Return the path to the original plugin content archive."""
return os.path.join(ResultType.TMP.path, f'validate-modules-{args.metadata.session_id}.tar')
| 589414.py | [
"CWE-22: Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')"
] |
import os
import subprocess
import sys
import sysconfig
import tempfile
from contextlib import nullcontext
from importlib import resources
from pathlib import Path
from shutil import copy2
__all__ = ["version", "bootstrap"]
_PIP_VERSION = "24.2"
# Directory of system wheel packages. Some Linux distribution packaging
# policies recommend against bundling dependencies. For example, Fedora
# installs wheel packages in the /usr/share/python-wheels/ directory and don't
# install the ensurepip._bundled package.
if (_pkg_dir := sysconfig.get_config_var('WHEEL_PKG_DIR')) is not None:
_WHEEL_PKG_DIR = Path(_pkg_dir).resolve()
else:
_WHEEL_PKG_DIR = None
def _find_wheel_pkg_dir_pip():
if _WHEEL_PKG_DIR is None:
# NOTE: The compile-time `WHEEL_PKG_DIR` is unset so there is no place
# NOTE: for looking up the wheels.
return None
dist_matching_wheels = _WHEEL_PKG_DIR.glob('pip-*.whl')
try:
last_matching_dist_wheel = sorted(dist_matching_wheels)[-1]
except IndexError:
# NOTE: `WHEEL_PKG_DIR` does not contain any wheel files for `pip`.
return None
return nullcontext(last_matching_dist_wheel)
def _get_pip_whl_path_ctx():
# Prefer pip from the wheel package directory, if present.
if (alternative_pip_wheel_path := _find_wheel_pkg_dir_pip()) is not None:
return alternative_pip_wheel_path
return resources.as_file(
resources.files('ensurepip')
/ '_bundled'
/ f'pip-{_PIP_VERSION}-py3-none-any.whl'
)
def _get_pip_version():
with _get_pip_whl_path_ctx() as bundled_wheel_path:
wheel_name = bundled_wheel_path.name
return (
# Extract '21.2.4' from 'pip-21.2.4-py3-none-any.whl'
wheel_name.
removeprefix('pip-').
partition('-')[0]
)
def _run_pip(args, additional_paths=None):
# Run the bootstrapping in a subprocess to avoid leaking any state that happens
# after pip has executed. Particularly, this avoids the case when pip holds onto
# the files in *additional_paths*, preventing us to remove them at the end of the
# invocation.
code = f"""
import runpy
import sys
sys.path = {additional_paths or []} + sys.path
sys.argv[1:] = {args}
runpy.run_module("pip", run_name="__main__", alter_sys=True)
"""
cmd = [
sys.executable,
'-W',
'ignore::DeprecationWarning',
'-c',
code,
]
if sys.flags.isolated:
# run code in isolated mode if currently running isolated
cmd.insert(1, '-I')
return subprocess.run(cmd, check=True).returncode
def version():
"""
Returns a string specifying the bundled version of pip.
"""
return _get_pip_version()
def _disable_pip_configuration_settings():
# We deliberately ignore all pip environment variables
# when invoking pip
# See http://bugs.python.org/issue19734 for details
keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
for k in keys_to_remove:
del os.environ[k]
# We also ignore the settings in the default pip configuration file
# See http://bugs.python.org/issue20053 for details
os.environ['PIP_CONFIG_FILE'] = os.devnull
def bootstrap(*, root=None, upgrade=False, user=False,
altinstall=False, default_pip=False,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory).
Note that calling this function will alter both sys.path and os.environ.
"""
# Discard the return value
_bootstrap(root=root, upgrade=upgrade, user=user,
altinstall=altinstall, default_pip=default_pip,
verbosity=verbosity)
def _bootstrap(*, root=None, upgrade=False, user=False,
altinstall=False, default_pip=False,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory). Returns pip command status code.
Note that calling this function will alter both sys.path and os.environ.
"""
if altinstall and default_pip:
raise ValueError("Cannot use altinstall and default_pip together")
sys.audit("ensurepip.bootstrap", root)
_disable_pip_configuration_settings()
# By default, installing pip installs all of the
# following scripts (X.Y == running Python version):
#
# pip, pipX, pipX.Y
#
# pip 1.5+ allows ensurepip to request that some of those be left out
if altinstall:
# omit pip, pipX
os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
elif not default_pip:
# omit pip
os.environ["ENSUREPIP_OPTIONS"] = "install"
with tempfile.TemporaryDirectory() as tmpdir:
# Put our bundled wheels into a temporary directory and construct the
# additional paths that need added to sys.path
tmpdir_path = Path(tmpdir)
with _get_pip_whl_path_ctx() as bundled_wheel_path:
tmp_wheel_path = tmpdir_path / bundled_wheel_path.name
copy2(bundled_wheel_path, tmp_wheel_path)
# Construct the arguments to be passed to the pip command
args = ["install", "--no-cache-dir", "--no-index", "--find-links", tmpdir]
if root:
args += ["--root", root]
if upgrade:
args += ["--upgrade"]
if user:
args += ["--user"]
if verbosity:
args += ["-" + "v" * verbosity]
return _run_pip([*args, "pip"], [os.fsdecode(tmp_wheel_path)])
def _uninstall_helper(*, verbosity=0):
"""Helper to support a clean default uninstall process on Windows
Note that calling this function may alter os.environ.
"""
# Nothing to do if pip was never installed, or has been removed
try:
import pip
except ImportError:
return
# If the installed pip version doesn't match the available one,
# leave it alone
available_version = version()
if pip.__version__ != available_version:
print(f"ensurepip will only uninstall a matching version "
f"({pip.__version__!r} installed, "
f"{available_version!r} available)",
file=sys.stderr)
return
_disable_pip_configuration_settings()
# Construct the arguments to be passed to the pip command
args = ["uninstall", "-y", "--disable-pip-version-check"]
if verbosity:
args += ["-" + "v" * verbosity]
return _run_pip([*args, "pip"])
def _main(argv=None):
import argparse
parser = argparse.ArgumentParser(prog="python -m ensurepip")
parser.add_argument(
"--version",
action="version",
version="pip {}".format(version()),
help="Show the version of pip that is bundled with this Python.",
)
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
dest="verbosity",
help=("Give more output. Option is additive, and can be used up to 3 "
"times."),
)
parser.add_argument(
"-U", "--upgrade",
action="store_true",
default=False,
help="Upgrade pip and dependencies, even if already installed.",
)
parser.add_argument(
"--user",
action="store_true",
default=False,
help="Install using the user scheme.",
)
parser.add_argument(
"--root",
default=None,
help="Install everything relative to this alternate root directory.",
)
parser.add_argument(
"--altinstall",
action="store_true",
default=False,
help=("Make an alternate install, installing only the X.Y versioned "
"scripts (Default: pipX, pipX.Y)."),
)
parser.add_argument(
"--default-pip",
action="store_true",
default=False,
help=("Make a default pip install, installing the unqualified pip "
"in addition to the versioned scripts."),
)
args = parser.parse_args(argv)
return _bootstrap(
root=args.root,
upgrade=args.upgrade,
user=args.user,
verbosity=args.verbosity,
altinstall=args.altinstall,
default_pip=args.default_pip,
)
| 886160.py | [
"Unknown"
] |
"""Pop up a reminder of how to call a function.
Call Tips are floating windows which display function, class, and method
parameter and docstring information when you type an opening parenthesis, and
which disappear when you type a closing parenthesis.
"""
import __main__
import inspect
import re
import sys
import textwrap
import types
from idlelib import calltip_w
from idlelib.hyperparser import HyperParser
class Calltip:
def __init__(self, editwin=None):
if editwin is None: # subprocess and test
self.editwin = None
else:
self.editwin = editwin
self.text = editwin.text
self.active_calltip = None
self._calltip_window = self._make_tk_calltip_window
def close(self):
self._calltip_window = None
def _make_tk_calltip_window(self):
# See __init__ for usage
return calltip_w.CalltipWindow(self.text)
def remove_calltip_window(self, event=None):
if self.active_calltip:
self.active_calltip.hidetip()
self.active_calltip = None
def force_open_calltip_event(self, event):
"The user selected the menu entry or hotkey, open the tip."
self.open_calltip(True)
return "break"
def try_open_calltip_event(self, event):
"""Happens when it would be nice to open a calltip, but not really
necessary, for example after an opening bracket, so function calls
won't be made.
"""
self.open_calltip(False)
def refresh_calltip_event(self, event):
if self.active_calltip and self.active_calltip.tipwindow:
self.open_calltip(False)
def open_calltip(self, evalfuncs):
"""Maybe close an existing calltip and maybe open a new calltip.
Called from (force_open|try_open|refresh)_calltip_event functions.
"""
hp = HyperParser(self.editwin, "insert")
sur_paren = hp.get_surrounding_brackets('(')
# If not inside parentheses, no calltip.
if not sur_paren:
self.remove_calltip_window()
return
# If a calltip is shown for the current parentheses, do
# nothing.
if self.active_calltip:
opener_line, opener_col = map(int, sur_paren[0].split('.'))
if (
(opener_line, opener_col) ==
(self.active_calltip.parenline, self.active_calltip.parencol)
):
return
hp.set_index(sur_paren[0])
try:
expression = hp.get_expression()
except ValueError:
expression = None
if not expression:
# No expression before the opening parenthesis, e.g.
# because it's in a string or the opener for a tuple:
# Do nothing.
return
# At this point, the current index is after an opening
# parenthesis, in a section of code, preceded by a valid
# expression. If there is a calltip shown, it's not for the
# same index and should be closed.
self.remove_calltip_window()
# Simple, fast heuristic: If the preceding expression includes
# an opening parenthesis, it likely includes a function call.
if not evalfuncs and (expression.find('(') != -1):
return
argspec = self.fetch_tip(expression)
if not argspec:
return
self.active_calltip = self._calltip_window()
self.active_calltip.showtip(argspec, sur_paren[0], sur_paren[1])
def fetch_tip(self, expression):
"""Return the argument list and docstring of a function or class.
If there is a Python subprocess, get the calltip there. Otherwise,
either this fetch_tip() is running in the subprocess or it was
called in an IDLE running without the subprocess.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
To find methods, fetch_tip must be fed a fully qualified name.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except AttributeError:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_calltip",
(expression,), {})
else:
return get_argspec(get_entity(expression))
def get_entity(expression):
"""Return the object corresponding to expression evaluated
in a namespace spanning sys.modules and __main.dict__.
"""
if expression:
namespace = {**sys.modules, **__main__.__dict__}
try:
return eval(expression, namespace) # Only protect user code.
except BaseException:
# An uncaught exception closes idle, and eval can raise any
# exception, especially if user classes are involved.
return None
# The following are used in get_argspec and some in tests
_MAX_COLS = 85
_MAX_LINES = 5 # enough for bytes
_INDENT = ' '*4 # for wrapped signatures
_first_param = re.compile(r'(?<=\()\w*\,?\s*')
_default_callable_argspec = "See source or doc"
_invalid_method = "invalid method signature"
def get_argspec(ob):
'''Return a string describing the signature of a callable object, or ''.
For Python-coded functions and methods, the first line is introspected.
Delete 'self' parameter for classes (.__init__) and bound methods.
The next lines are the first lines of the doc string up to the first
empty line or _MAX_LINES. For builtins, this typically includes
the arguments in addition to the return value.
'''
# Determine function object fob to inspect.
try:
ob_call = ob.__call__
except BaseException: # Buggy user object could raise anything.
return '' # No popup for non-callables.
# For Get_argspecTest.test_buggy_getattr_class, CallA() & CallB().
fob = ob_call if isinstance(ob_call, types.MethodType) else ob
# Initialize argspec and wrap it to get lines.
try:
argspec = str(inspect.signature(fob))
except Exception as err:
msg = str(err)
if msg.startswith(_invalid_method):
return _invalid_method
else:
argspec = ''
if isinstance(fob, type) and argspec == '()':
# If fob has no argument, use default callable argspec.
argspec = _default_callable_argspec
lines = (textwrap.wrap(argspec, _MAX_COLS, subsequent_indent=_INDENT)
if len(argspec) > _MAX_COLS else [argspec] if argspec else [])
# Augment lines from docstring, if any, and join to get argspec.
doc = inspect.getdoc(ob)
if doc:
for line in doc.split('\n', _MAX_LINES)[:_MAX_LINES]:
line = line.strip()
if not line:
break
if len(line) > _MAX_COLS:
line = line[: _MAX_COLS - 3] + '...'
lines.append(line)
argspec = '\n'.join(lines)
return argspec or _default_callable_argspec
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_calltip', verbosity=2)
| 525549.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
import os
import pathlib
import tempfile
import functools
import contextlib
import types
import importlib
import inspect
import warnings
import itertools
from typing import Union, Optional, cast
from .abc import ResourceReader, Traversable
Package = Union[types.ModuleType, str]
Anchor = Package
def package_to_anchor(func):
"""
Replace 'package' parameter as 'anchor' and warn about the change.
Other errors should fall through.
>>> files('a', 'b')
Traceback (most recent call last):
TypeError: files() takes from 0 to 1 positional arguments but 2 were given
Remove this compatibility in Python 3.14.
"""
undefined = object()
@functools.wraps(func)
def wrapper(anchor=undefined, package=undefined):
if package is not undefined:
if anchor is not undefined:
return func(anchor, package)
warnings.warn(
"First parameter to files is renamed to 'anchor'",
DeprecationWarning,
stacklevel=2,
)
return func(package)
elif anchor is undefined:
return func()
return func(anchor)
return wrapper
@package_to_anchor
def files(anchor: Optional[Anchor] = None) -> Traversable:
"""
Get a Traversable resource for an anchor.
"""
return from_package(resolve(anchor))
def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
"""
Return the package's loader if it's a ResourceReader.
"""
# We can't use
# a issubclass() check here because apparently abc.'s __subclasscheck__()
# hook wants to create a weak reference to the object, but
# zipimport.zipimporter does not support weak references, resulting in a
# TypeError. That seems terrible.
spec = package.__spec__
reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
if reader is None:
return None
return reader(spec.name) # type: ignore
@functools.singledispatch
def resolve(cand: Optional[Anchor]) -> types.ModuleType:
return cast(types.ModuleType, cand)
@resolve.register
def _(cand: str) -> types.ModuleType:
return importlib.import_module(cand)
@resolve.register
def _(cand: None) -> types.ModuleType:
return resolve(_infer_caller().f_globals['__name__'])
def _infer_caller():
"""
Walk the stack and find the frame of the first caller not in this module.
"""
def is_this_file(frame_info):
return frame_info.filename == __file__
def is_wrapper(frame_info):
return frame_info.function == 'wrapper'
not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
# also exclude 'wrapper' due to singledispatch in the call stack
callers = itertools.filterfalse(is_wrapper, not_this_file)
return next(callers).frame
def from_package(package: types.ModuleType):
"""
Return a Traversable object for the given package.
"""
# deferred for performance (python/cpython#109829)
from ._adapters import wrap_spec
spec = wrap_spec(package)
reader = spec.loader.get_resource_reader(spec.name)
return reader.files()
@contextlib.contextmanager
def _tempfile(
reader,
suffix='',
# gh-93353: Keep a reference to call os.remove() in late Python
# finalization.
*,
_os_remove=os.remove,
):
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
# blocks due to the need to close the temporary file to work on Windows
# properly.
fd, raw_path = tempfile.mkstemp(suffix=suffix)
try:
try:
os.write(fd, reader())
finally:
os.close(fd)
del reader
yield pathlib.Path(raw_path)
finally:
try:
_os_remove(raw_path)
except FileNotFoundError:
pass
def _temp_file(path):
return _tempfile(path.read_bytes, suffix=path.name)
def _is_present_dir(path: Traversable) -> bool:
"""
Some Traversables implement ``is_dir()`` to raise an
exception (i.e. ``FileNotFoundError``) when the
directory doesn't exist. This function wraps that call
to always return a boolean and only return True
if there's a dir and it exists.
"""
with contextlib.suppress(FileNotFoundError):
return path.is_dir()
return False
@functools.singledispatch
def as_file(path):
"""
Given a Traversable object, return that object as a
path on the local file system in a context manager.
"""
return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
@as_file.register(pathlib.Path)
@contextlib.contextmanager
def _(path):
"""
Degenerate behavior for pathlib.Path objects.
"""
yield path
@contextlib.contextmanager
def _temp_path(dir: tempfile.TemporaryDirectory):
"""
Wrap tempfile.TemporyDirectory to return a pathlib object.
"""
with dir as result:
yield pathlib.Path(result)
@contextlib.contextmanager
def _temp_dir(path):
"""
Given a traversable dir, recursively replicate the whole tree
to the file system in a context manager.
"""
assert path.is_dir()
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
yield _write_contents(temp_dir, path)
def _write_contents(target, source):
child = target.joinpath(source.name)
if source.is_dir():
child.mkdir()
for item in source.iterdir():
_write_contents(child, item)
else:
child.write_bytes(source.read_bytes())
return child
| 348050.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
"""
Title: Multi-GPU distributed training with PyTorch
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2023/06/29
Last modified: 2023/06/29
Description: Guide to multi-GPU training for Keras models with PyTorch.
Accelerator: GPU
"""
"""
## Introduction
There are generally two ways to distribute computation across multiple devices:
**Data parallelism**, where a single model gets replicated on multiple devices or
multiple machines. Each of them processes different batches of data, then they merge
their results. There exist many variants of this setup, that differ in how the different
model replicas merge results, in whether they stay in sync at every batch or whether they
are more loosely coupled, etc.
**Model parallelism**, where different parts of a single model run on different devices,
processing a single batch of data together. This works best with models that have a
naturally-parallel architecture, such as models that feature multiple branches.
This guide focuses on data parallelism, in particular **synchronous data parallelism**,
where the different replicas of the model stay in sync after each batch they process.
Synchronicity keeps the model convergence behavior identical to what you would see for
single-device training.
Specifically, this guide teaches you how to use PyTorch's `DistributedDataParallel`
module wrapper to train Keras, with minimal changes to your code,
on multiple GPUs (typically 2 to 16) installed on a single machine (single host,
multi-device training). This is the most common setup for researchers and small-scale
industry workflows.
"""
"""
## Setup
Let's start by defining the function that creates the model that we will train,
and the function that creates the dataset we will train on (MNIST in this case).
"""
import os
os.environ["KERAS_BACKEND"] = "torch"
import torch
import numpy as np
import keras
def get_model():
# Make a simple convnet with batch normalization and dropout.
inputs = keras.Input(shape=(28, 28, 1))
x = keras.layers.Rescaling(1.0 / 255.0)(inputs)
x = keras.layers.Conv2D(
filters=12, kernel_size=3, padding="same", use_bias=False
)(x)
x = keras.layers.BatchNormalization(scale=False, center=True)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv2D(
filters=24,
kernel_size=6,
use_bias=False,
strides=2,
)(x)
x = keras.layers.BatchNormalization(scale=False, center=True)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.Conv2D(
filters=32,
kernel_size=6,
padding="same",
strides=2,
name="large_k",
)(x)
x = keras.layers.BatchNormalization(scale=False, center=True)(x)
x = keras.layers.ReLU()(x)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(256, activation="relu")(x)
x = keras.layers.Dropout(0.5)(x)
outputs = keras.layers.Dense(10)(x)
model = keras.Model(inputs, outputs)
return model
def get_dataset():
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
return dataset
"""
Next, let's define a simple PyTorch training loop that targets
a GPU (note the calls to `.cuda()`).
"""
def train_model(model, dataloader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
running_loss_count = 0
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
running_loss_count += 1
# Print loss statistics
print(
f"Epoch {epoch + 1}/{num_epochs}, "
f"Loss: {running_loss / running_loss_count}"
)
"""
## Single-host, multi-device synchronous training
In this setup, you have one machine with several GPUs on it (typically 2 to 16). Each
device will run a copy of your model (called a **replica**). For simplicity, in what
follows, we'll assume we're dealing with 8 GPUs, at no loss of generality.
**How it works**
At each step of training:
- The current batch of data (called **global batch**) is split into 8 different
sub-batches (called **local batches**). For instance, if the global batch has 512
samples, each of the 8 local batches will have 64 samples.
- Each of the 8 replicas independently processes a local batch: they run a forward pass,
then a backward pass, outputting the gradient of the weights with respect to the loss of
the model on the local batch.
- The weight updates originating from local gradients are efficiently merged across the 8
replicas. Because this is done at the end of every step, the replicas always stay in
sync.
In practice, the process of synchronously updating the weights of the model replicas is
handled at the level of each individual weight variable. This is done through a **mirrored
variable** object.
**How to use it**
To do single-host, multi-device synchronous training with a Keras model, you would use
the `torch.nn.parallel.DistributedDataParallel` module wrapper.
Here's how it works:
- We use `torch.multiprocessing.start_processes` to start multiple Python processes, one
per device. Each process will run the `per_device_launch_fn` function.
- The `per_device_launch_fn` function does the following:
- It uses `torch.distributed.init_process_group` and `torch.cuda.set_device`
to configure the device to be used for that process.
- It uses `torch.utils.data.distributed.DistributedSampler`
and `torch.utils.data.DataLoader` to turn our data into a distributed data loader.
- It also uses `torch.nn.parallel.DistributedDataParallel` to turn our model into
a distributed PyTorch module.
- It then calls the `train_model` function.
- The `train_model` function will then run in each process, with the model using
a separate device in each process.
Here's the flow, where each step is split into its own utility function:
"""
# Config
num_gpu = torch.cuda.device_count()
num_epochs = 2
batch_size = 64
print(f"Running on {num_gpu} GPUs")
def setup_device(current_gpu_index, num_gpus):
# Device setup
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "56492"
device = torch.device("cuda:{}".format(current_gpu_index))
torch.distributed.init_process_group(
backend="nccl",
init_method="env://",
world_size=num_gpus,
rank=current_gpu_index,
)
torch.cuda.set_device(device)
def cleanup():
torch.distributed.destroy_process_group()
def prepare_dataloader(dataset, current_gpu_index, num_gpus, batch_size):
sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=num_gpus,
rank=current_gpu_index,
shuffle=False,
)
dataloader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_size=batch_size,
shuffle=False,
)
return dataloader
def per_device_launch_fn(current_gpu_index, num_gpu):
# Setup the process groups
setup_device(current_gpu_index, num_gpu)
dataset = get_dataset()
model = get_model()
# prepare the dataloader
dataloader = prepare_dataloader(
dataset, current_gpu_index, num_gpu, batch_size
)
# Instantiate the torch optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
# Instantiate the torch loss function
loss_fn = torch.nn.CrossEntropyLoss()
# Put model on device
model = model.to(current_gpu_index)
ddp_model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[current_gpu_index], output_device=current_gpu_index
)
train_model(ddp_model, dataloader, num_epochs, optimizer, loss_fn)
cleanup()
"""
Time to start multiple processes:
"""
if __name__ == "__main__":
# We use the "fork" method rather than "spawn" to support notebooks
torch.multiprocessing.start_processes(
per_device_launch_fn,
args=(num_gpu,),
nprocs=num_gpu,
join=True,
start_method="fork",
)
"""
That's it!
"""
| 422890.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
"""IMDB sentiment classification dataset."""
import json
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
from keras.src.utils.python_utils import remove_long_seq
@keras_export("keras.datasets.imdb.load_data")
def load_data(
path="imdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3,
**kwargs,
):
"""Loads the [IMDB dataset](https://ai.stanford.edu/~amaas/data/sentiment/).
This is a dataset of 25,000 movies reviews from IMDB, labeled by sentiment
(positive/negative). Reviews have been preprocessed, and each review is
encoded as a list of word indexes (integers).
For convenience, words are indexed by overall frequency in the dataset,
so that for instance the integer "3" encodes the 3rd most frequent word in
the data. This allows for quick filtering operations such as:
"only consider the top 10,000 most
common words, but eliminate the top 20 most common words".
As a convention, "0" does not stand for a specific word, but instead is used
to encode the pad token.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: integer or None. Words are
ranked by how often they occur (in the training set) and only
the `num_words` most frequent words are kept. Any less frequent word
will appear as `oov_char` value in the sequence data. If None,
all words are kept. Defaults to `None`.
skip_top: skip the top N most frequently occurring words
(which may not be informative). These words will appear as
`oov_char` value in the dataset. When 0, no words are
skipped. Defaults to `0`.
maxlen: int or None. Maximum sequence length.
Any longer sequence will be truncated. None, means no truncation.
Defaults to `None`.
seed: int. Seed for reproducible data shuffling.
start_char: int. The start of a sequence will be marked with this
character. 0 is usually the padding character. Defaults to `1`.
oov_char: int. The out-of-vocabulary character.
Words that were cut out because of the `num_words` or
`skip_top` limits will be replaced with this character.
index_from: int. Index actual words with this index and higher.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`, `x_test`**: lists of sequences, which are lists of indexes
(integers). If the num_words argument was specific, the maximum
possible index value is `num_words - 1`. If the `maxlen` argument was
specified, the largest possible sequence length is `maxlen`.
**`y_train`, `y_test`**: lists of integer labels (1 or 0).
**Note**: The 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=origin_folder + "imdb.npz",
file_hash=( # noqa: E501
"69664113be75683a8fe16e3ed0ab59fda8886cb3cd7ada244f7d9544e4676b9f"
),
)
with np.load(path, allow_pickle=True) as f:
x_train, labels_train = f["x_train"], f["y_train"]
x_test, labels_test = f["x_test"], f["y_test"]
rng = np.random.RandomState(seed)
indices = np.arange(len(x_train))
rng.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
indices = np.arange(len(x_test))
rng.shuffle(indices)
x_test = x_test[indices]
labels_test = labels_test[indices]
if start_char is not None:
x_train = [[start_char] + [w + index_from for w in x] for x in x_train]
x_test = [[start_char] + [w + index_from for w in x] for x in x_test]
elif index_from:
x_train = [[w + index_from for w in x] for x in x_train]
x_test = [[w + index_from for w in x] for x in x_test]
else:
x_train = [[w for w in x] for x in x_train]
x_test = [[w for w in x] for x in x_test]
if maxlen:
x_train, labels_train = remove_long_seq(maxlen, x_train, labels_train)
x_test, labels_test = remove_long_seq(maxlen, x_test, labels_test)
if not x_train or not x_test:
raise ValueError(
"After filtering for sequences shorter than maxlen="
f"{str(maxlen)}, no sequence was kept. Increase maxlen."
)
xs = x_train + x_test
labels = np.concatenate([labels_train, labels_test])
if not num_words:
num_words = max(max(x) for x in xs)
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [
[w if (skip_top <= w < num_words) else oov_char for w in x]
for x in xs
]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = len(x_train)
x_train, y_train = np.array(xs[:idx], dtype="object"), labels[:idx]
x_test, y_test = np.array(xs[idx:], dtype="object"), labels[idx:]
return (x_train, y_train), (x_test, y_test)
@keras_export("keras.datasets.imdb.get_word_index")
def get_word_index(path="imdb_word_index.json"):
"""Retrieves a dict mapping words to their index in the IMDB dataset.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary. Keys are word strings, values are their
index.
Example:
```python
# Use the default parameters to keras.datasets.imdb.load_data
start_char = 1
oov_char = 2
index_from = 3
# Retrieve the training sequences.
(x_train, _), _ = keras.datasets.imdb.load_data(
start_char=start_char, oov_char=oov_char, index_from=index_from
)
# Retrieve the word index file mapping words to indices
word_index = keras.datasets.imdb.get_word_index()
# Reverse the word index to obtain a dict mapping indices to words
# And add `index_from` to indices to sync with `x_train`
inverted_word_index = dict(
(i + index_from, word) for (word, i) in word_index.items()
)
# Update `inverted_word_index` to include `start_char` and `oov_char`
inverted_word_index[start_char] = "[START]"
inverted_word_index[oov_char] = "[OOV]"
# Decode the first sequence in the dataset
decoded_sequence = " ".join(inverted_word_index[i] for i in x_train[0])
```
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=origin_folder + "imdb_word_index.json",
file_hash="bfafd718b763782e994055a2d397834f",
)
with open(path) as f:
return json.load(f)
| 001029.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
"""Reuters topic classification dataset."""
import json
import numpy as np
from keras.src.api_export import keras_export
from keras.src.utils.file_utils import get_file
from keras.src.utils.python_utils import remove_long_seq
@keras_export("keras.datasets.reuters.load_data")
def load_data(
path="reuters.npz",
num_words=None,
skip_top=0,
maxlen=None,
test_split=0.2,
seed=113,
start_char=1,
oov_char=2,
index_from=3,
):
"""Loads the Reuters newswire classification dataset.
This is a dataset of 11,228 newswires from Reuters, labeled over 46 topics.
This was originally generated by parsing and preprocessing the classic
Reuters-21578 dataset, but the preprocessing code is no longer packaged
with Keras. See this
[GitHub discussion](https://github.com/keras-team/keras/issues/12072)
for more info.
Each newswire is encoded as a list of word indexes (integers).
For convenience, words are indexed by overall frequency in the dataset,
so that for instance the integer "3" encodes the 3rd most frequent word in
the data. This allows for quick filtering operations such as:
"only consider the top 10,000 most
common words, but eliminate the top 20 most common words".
As a convention, "0" does not stand for a specific word, but instead is used
to encode any unknown word.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: integer or None. Words are
ranked by how often they occur (in the training set) and only
the `num_words` most frequent words are kept. Any less frequent word
will appear as `oov_char` value in the sequence data. If None,
all words are kept. Defaults to `None`.
skip_top: skip the top N most frequently occurring words
(which may not be informative). These words will appear as
`oov_char` value in the dataset. 0 means no words are
skipped. Defaults to `0`.
maxlen: int or None. Maximum sequence length.
Any longer sequence will be truncated. None means no truncation.
Defaults to `None`.
test_split: Float between `0.` and `1.`. Fraction of the dataset to be
used as test data. `0.2` means that 20% of the dataset is used as
test data. Defaults to `0.2`.
seed: int. Seed for reproducible data shuffling.
start_char: int. The start of a sequence will be marked with this
character. 0 is usually the padding character. Defaults to `1`.
oov_char: int. The out-of-vocabulary character.
Words that were cut out because of the `num_words` or
`skip_top` limits will be replaced with this character.
index_from: int. Index actual words with this index and higher.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`, `x_test`**: lists of sequences, which are lists of indexes
(integers). If the num_words argument was specific, the maximum
possible index value is `num_words - 1`. If the `maxlen` argument was
specified, the largest possible sequence length is `maxlen`.
**`y_train`, `y_test`**: lists of integer labels (1 or 0).
**Note**: The 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
fname=path,
origin=origin_folder + "reuters.npz",
file_hash=( # noqa: E501
"d6586e694ee56d7a4e65172e12b3e987c03096cb01eab99753921ef915959916"
),
)
with np.load(path, allow_pickle=True) as f:
xs, labels = f["x"], f["y"]
rng = np.random.RandomState(seed)
indices = np.arange(len(xs))
rng.shuffle(indices)
xs = xs[indices]
labels = labels[indices]
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
if maxlen:
xs, labels = remove_long_seq(maxlen, xs, labels)
if not num_words:
num_words = max(max(x) for x in xs)
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [
[w if skip_top <= w < num_words else oov_char for w in x]
for x in xs
]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = int(len(xs) * (1 - test_split))
x_train, y_train = np.array(xs[:idx], dtype="object"), np.array(
labels[:idx]
)
x_test, y_test = np.array(xs[idx:], dtype="object"), np.array(labels[idx:])
return (x_train, y_train), (x_test, y_test)
@keras_export("keras.datasets.reuters.get_word_index")
def get_word_index(path="reuters_word_index.json"):
"""Retrieves a dict mapping words to their index in the Reuters dataset.
Actual word indices starts from 3, with 3 indices reserved for:
0 (padding), 1 (start), 2 (oov).
E.g. word index of 'the' is 1, but the in the actual training data, the
index of 'the' will be 1 + 3 = 4. Vice versa, to translate word indices in
training data back to words using this mapping, indices need to subtract 3.
Args:
path: where to cache the data (relative to `~/.keras/dataset`).
Returns:
The word index dictionary. Keys are word strings, values are their
index.
"""
origin_folder = (
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
)
path = get_file(
path,
origin=origin_folder + "reuters_word_index.json",
file_hash="4d44cc38712099c9e383dc6e5f11a921",
)
with open(path) as f:
return json.load(f)
@keras_export("keras.datasets.reuters.get_label_names")
def get_label_names():
"""Returns labels as a list of strings with indices matching training data.
Reference:
- [Reuters Dataset](https://martin-thoma.com/nlp-reuters/)
"""
return (
"cocoa",
"grain",
"veg-oil",
"earn",
"acq",
"wheat",
"copper",
"housing",
"money-supply",
"coffee",
"sugar",
"trade",
"reserves",
"ship",
"cotton",
"carcass",
"crude",
"nat-gas",
"cpi",
"money-fx",
"interest",
"gnp",
"meal-feed",
"alum",
"oilseed",
"gold",
"tin",
"strategic-metal",
"livestock",
"retail",
"ipi",
"iron-steel",
"rubber",
"heat",
"jobs",
"lei",
"bop",
"zinc",
"orange",
"pet-chem",
"dlr",
"gas",
"silver",
"wpi",
"hog",
"lead",
)
| 780018.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
from __future__ import annotations
import os
from xml.etree import ElementTree as ET
import numpy as np
import svgelements as se
import io
from manimlib.constants import RIGHT
from manimlib.logger import log
from manimlib.mobject.geometry import Circle
from manimlib.mobject.geometry import Line
from manimlib.mobject.geometry import Polygon
from manimlib.mobject.geometry import Polyline
from manimlib.mobject.geometry import Rectangle
from manimlib.mobject.geometry import RoundedRectangle
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.directories import get_mobject_data_dir
from manimlib.utils.images import get_full_vector_image_path
from manimlib.utils.iterables import hash_obj
from manimlib.utils.simple_functions import hash_string
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Tuple
from manimlib.typing import ManimColor, Vect3Array
SVG_HASH_TO_MOB_MAP: dict[int, list[VMobject]] = {}
PATH_TO_POINTS: dict[str, Vect3Array] = {}
def _convert_point_to_3d(x: float, y: float) -> np.ndarray:
return np.array([x, y, 0.0])
class SVGMobject(VMobject):
file_name: str = ""
height: float | None = 2.0
width: float | None = None
def __init__(
self,
file_name: str = "",
should_center: bool = True,
height: float | None = None,
width: float | None = None,
# Style that overrides the original svg
color: ManimColor = None,
fill_color: ManimColor = None,
fill_opacity: float | None = None,
stroke_width: float | None = 0.0,
stroke_color: ManimColor = None,
stroke_opacity: float | None = None,
# Style that fills only when not specified
# If None, regarded as default values from svg standard
svg_default: dict = dict(
color=None,
opacity=None,
fill_color=None,
fill_opacity=None,
stroke_width=None,
stroke_color=None,
stroke_opacity=None,
),
path_string_config: dict = dict(),
**kwargs
):
self.file_name = file_name or self.file_name
self.svg_default = dict(svg_default)
self.path_string_config = dict(path_string_config)
super().__init__(**kwargs )
self.init_svg_mobject()
self.ensure_positive_orientation()
# Rather than passing style into super().__init__
# do it after svg has been taken in
self.set_style(
fill_color=color or fill_color,
fill_opacity=fill_opacity,
stroke_color=color or stroke_color,
stroke_width=stroke_width,
stroke_opacity=stroke_opacity,
)
# Initialize position
height = height or self.height
width = width or self.width
if should_center:
self.center()
if height is not None:
self.set_height(height)
if width is not None:
self.set_width(width)
def init_svg_mobject(self) -> None:
hash_val = hash_obj(self.hash_seed)
if hash_val in SVG_HASH_TO_MOB_MAP:
submobs = [sm.copy() for sm in SVG_HASH_TO_MOB_MAP[hash_val]]
else:
submobs = self.mobjects_from_file(self.get_file_path())
SVG_HASH_TO_MOB_MAP[hash_val] = [sm.copy() for sm in submobs]
self.add(*submobs)
self.flip(RIGHT) # Flip y
@property
def hash_seed(self) -> tuple:
# Returns data which can uniquely represent the result of `init_points`.
# The hashed value of it is stored as a key in `SVG_HASH_TO_MOB_MAP`.
return (
self.__class__.__name__,
self.svg_default,
self.path_string_config,
self.file_name
)
def mobjects_from_file(self, file_path: str) -> list[VMobject]:
element_tree = ET.parse(file_path)
new_tree = self.modify_xml_tree(element_tree)
# New svg based on tree contents
data_stream = io.BytesIO()
new_tree.write(data_stream)
data_stream.seek(0)
svg = se.SVG.parse(data_stream)
data_stream.close()
return self.mobjects_from_svg(svg)
def get_file_path(self) -> str:
if self.file_name is None:
raise Exception("Must specify file for SVGMobject")
return get_full_vector_image_path(self.file_name)
def modify_xml_tree(self, element_tree: ET.ElementTree) -> ET.ElementTree:
config_style_attrs = self.generate_config_style_dict()
style_keys = (
"fill",
"fill-opacity",
"stroke",
"stroke-opacity",
"stroke-width",
"style"
)
root = element_tree.getroot()
style_attrs = {
k: v
for k, v in root.attrib.items()
if k in style_keys
}
# Ignore other attributes in case that svgelements cannot parse them
SVG_XMLNS = "{http://www.w3.org/2000/svg}"
new_root = ET.Element("svg")
config_style_node = ET.SubElement(new_root, f"{SVG_XMLNS}g", config_style_attrs)
root_style_node = ET.SubElement(config_style_node, f"{SVG_XMLNS}g", style_attrs)
root_style_node.extend(root)
return ET.ElementTree(new_root)
def generate_config_style_dict(self) -> dict[str, str]:
keys_converting_dict = {
"fill": ("color", "fill_color"),
"fill-opacity": ("opacity", "fill_opacity"),
"stroke": ("color", "stroke_color"),
"stroke-opacity": ("opacity", "stroke_opacity"),
"stroke-width": ("stroke_width",)
}
svg_default_dict = self.svg_default
result = {}
for svg_key, style_keys in keys_converting_dict.items():
for style_key in style_keys:
if svg_default_dict[style_key] is None:
continue
result[svg_key] = str(svg_default_dict[style_key])
return result
def mobjects_from_svg(self, svg: se.SVG) -> list[VMobject]:
result = []
for shape in svg.elements():
if isinstance(shape, (se.Group, se.Use)):
continue
elif isinstance(shape, se.Path):
mob = self.path_to_mobject(shape)
elif isinstance(shape, se.SimpleLine):
mob = self.line_to_mobject(shape)
elif isinstance(shape, se.Rect):
mob = self.rect_to_mobject(shape)
elif isinstance(shape, (se.Circle, se.Ellipse)):
mob = self.ellipse_to_mobject(shape)
elif isinstance(shape, se.Polygon):
mob = self.polygon_to_mobject(shape)
elif isinstance(shape, se.Polyline):
mob = self.polyline_to_mobject(shape)
# elif isinstance(shape, se.Text):
# mob = self.text_to_mobject(shape)
elif type(shape) == se.SVGElement:
continue
else:
log.warning("Unsupported element type: %s", type(shape))
continue
if not mob.has_points():
continue
if isinstance(shape, se.GraphicObject):
self.apply_style_to_mobject(mob, shape)
if isinstance(shape, se.Transformable) and shape.apply:
self.handle_transform(mob, shape.transform)
result.append(mob)
return result
@staticmethod
def handle_transform(mob: VMobject, matrix: se.Matrix) -> VMobject:
mat = np.array([
[matrix.a, matrix.c],
[matrix.b, matrix.d]
])
vec = np.array([matrix.e, matrix.f, 0.0])
mob.apply_matrix(mat)
mob.shift(vec)
return mob
@staticmethod
def apply_style_to_mobject(
mob: VMobject,
shape: se.GraphicObject
) -> VMobject:
mob.set_style(
stroke_width=shape.stroke_width,
stroke_color=shape.stroke.hexrgb,
stroke_opacity=shape.stroke.opacity,
fill_color=shape.fill.hexrgb,
fill_opacity=shape.fill.opacity
)
return mob
def path_to_mobject(self, path: se.Path) -> VMobjectFromSVGPath:
return VMobjectFromSVGPath(path, **self.path_string_config)
def line_to_mobject(self, line: se.SimpleLine) -> Line:
return Line(
start=_convert_point_to_3d(line.x1, line.y1),
end=_convert_point_to_3d(line.x2, line.y2)
)
def rect_to_mobject(self, rect: se.Rect) -> Rectangle:
if rect.rx == 0 or rect.ry == 0:
mob = Rectangle(
width=rect.width,
height=rect.height,
)
else:
mob = RoundedRectangle(
width=rect.width,
height=rect.height * rect.rx / rect.ry,
corner_radius=rect.rx
)
mob.stretch_to_fit_height(rect.height)
mob.shift(_convert_point_to_3d(
rect.x + rect.width / 2,
rect.y + rect.height / 2
))
return mob
def ellipse_to_mobject(self, ellipse: se.Circle | se.Ellipse) -> Circle:
mob = Circle(radius=ellipse.rx)
mob.stretch_to_fit_height(2 * ellipse.ry)
mob.shift(_convert_point_to_3d(
ellipse.cx, ellipse.cy
))
return mob
def polygon_to_mobject(self, polygon: se.Polygon) -> Polygon:
points = [
_convert_point_to_3d(*point)
for point in polygon
]
return Polygon(*points)
def polyline_to_mobject(self, polyline: se.Polyline) -> Polyline:
points = [
_convert_point_to_3d(*point)
for point in polyline
]
return Polyline(*points)
def text_to_mobject(self, text: se.Text):
pass
class VMobjectFromSVGPath(VMobject):
def __init__(
self,
path_obj: se.Path,
**kwargs
):
# Get rid of arcs
path_obj.approximate_arcs_with_quads()
self.path_obj = path_obj
super().__init__(**kwargs)
def init_points(self) -> None:
# After a given svg_path has been converted into points, the result
# will be saved so that future calls for the same pathdon't need to
# retrace the same computation.
path_string = self.path_obj.d()
if path_string not in PATH_TO_POINTS:
self.handle_commands()
if not self._use_winding_fill:
self.subdivide_intersections()
# Save for future use
PATH_TO_POINTS[path_string] = self.get_points().copy()
else:
points = PATH_TO_POINTS[path_string]
self.set_points(points)
def handle_commands(self) -> None:
segment_class_to_func_map = {
se.Move: (self.start_new_path, ("end",)),
se.Close: (self.close_path, ()),
se.Line: (self.add_line_to, ("end",)),
se.QuadraticBezier: (self.add_quadratic_bezier_curve_to, ("control", "end")),
se.CubicBezier: (self.add_cubic_bezier_curve_to, ("control1", "control2", "end"))
}
for segment in self.path_obj:
segment_class = segment.__class__
func, attr_names = segment_class_to_func_map[segment_class]
points = [
_convert_point_to_3d(*segment.__getattribute__(attr_name))
for attr_name in attr_names
]
func(*points)
# Get rid of the side effect of trailing "Z M" commands.
if self.has_new_path_started():
self.resize_points(self.get_num_points() - 2)
| 703706.py | [
"CWE-611: Improper Restriction of XML External Entity Reference"
] |
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
https://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
https://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in scikit-learn.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import os
from bz2 import BZ2File
from datetime import datetime
from pprint import pprint
from time import time
from urllib.request import urlopen
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
# %%
# Download data, if not already on disk
# -------------------------------------
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
with open(filename, "wb") as f:
f.write(opener.read())
print()
# %%
# Loading the redirect files
# --------------------------
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = {source}
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# %%
# Computing the Adjacency matrix
# ------------------------------
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000
)
names = {i: name for name, i in index_map.items()}
# %%
# Computing Principal Singular Vector using Randomized SVD
# --------------------------------------------------------
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest components of the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
# %%
# Computing Centrality scores
# ---------------------------
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i] : X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0), 1.0 / n, 0)).ravel()
scores = np.full(n, 1.0 / n, dtype=np.float32) # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (
alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n
)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| 502316.py | [
"CWE-939: Improper Authorization in Handler for Custom URL Scheme"
] |
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import importlib
import inspect
import os
import warnings
from inspect import signature
from pkgutil import walk_packages
import numpy as np
import pytest
import sklearn
from sklearn.datasets import make_classification
# make it possible to discover experimental estimators when calling `all_estimators`
from sklearn.experimental import (
enable_halving_search_cv, # noqa
enable_iterative_imputer, # noqa
)
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import all_estimators
from sklearn.utils._testing import (
_get_func_name,
check_docstring_parameters,
ignore_warnings,
)
from sklearn.utils.deprecation import _is_deprecated
from sklearn.utils.estimator_checks import (
_construct_instance,
_enforce_estimator_tags_X,
_enforce_estimator_tags_y,
)
# walk_packages() ignores DeprecationWarnings, now we need to ignore
# FutureWarnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# mypy error: Module has no attribute "__path__"
sklearn_path = [os.path.dirname(sklearn.__file__)]
PUBLIC_MODULES = set(
[
pckg[1]
for pckg in walk_packages(prefix="sklearn.", path=sklearn_path)
if not ("._" in pckg[1] or ".tests." in pckg[1])
]
)
# functions to ignore args / docstring of
# TODO(1.7): remove "sklearn.utils._joblib"
_DOCSTRING_IGNORES = [
"sklearn.utils.deprecation.load_mlcomp",
"sklearn.pipeline.make_pipeline",
"sklearn.pipeline.make_union",
"sklearn.utils.extmath.safe_sparse_dot",
"sklearn.utils._joblib",
"HalfBinomialLoss",
]
# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
"fit",
"score",
"fit_predict",
"fit_transform",
"partial_fit",
"predict",
]
def test_docstring_parameters():
# Test module docstring formatting
# Skip test if numpydoc is not found
pytest.importorskip(
"numpydoc", reason="numpydoc is required to test the docstrings"
)
# XXX unreached code as of v0.22
from numpydoc import docscrape
incorrect = []
for name in PUBLIC_MODULES:
if name.endswith(".conftest"):
# pytest tooling, not part of the scikit-learn API
continue
if name == "sklearn.utils.fixes":
# We cannot always control these docstrings
continue
with warnings.catch_warnings(record=True):
module = importlib.import_module(name)
classes = inspect.getmembers(module, inspect.isclass)
# Exclude non-scikit-learn classes
classes = [cls for cls in classes if cls[1].__module__.startswith("sklearn")]
for cname, cls in classes:
this_incorrect = []
if cname in _DOCSTRING_IGNORES or cname.startswith("_"):
continue
if inspect.isabstract(cls):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError(
"Error for __init__ of %s in %s:\n%s" % (cls, name, w[0])
)
# Skip checks on deprecated classes
if _is_deprecated(cls.__new__):
continue
this_incorrect += check_docstring_parameters(cls.__init__, cdoc)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
if _is_deprecated(method):
continue
param_ignore = None
# Now skip docstring test for y when y is None
# by default for API reason
if method_name in _METHODS_IGNORE_NONE_Y:
sig = signature(method)
if "y" in sig.parameters and sig.parameters["y"].default is None:
param_ignore = ["y"] # ignore y for fit and score
result = check_docstring_parameters(method, ignore=param_ignore)
this_incorrect += result
incorrect += this_incorrect
functions = inspect.getmembers(module, inspect.isfunction)
# Exclude imported functions
functions = [fn for fn in functions if fn[1].__module__ == name]
for fname, func in functions:
# Don't test private methods / functions
if fname.startswith("_"):
continue
if fname == "configuration" and name.endswith("setup"):
continue
name_ = _get_func_name(func)
if not any(d in name_ for d in _DOCSTRING_IGNORES) and not _is_deprecated(
func
):
incorrect += check_docstring_parameters(func)
msg = "\n".join(incorrect)
if len(incorrect) > 0:
raise AssertionError("Docstring Error:\n" + msg)
def _construct_searchcv_instance(SearchCV):
return SearchCV(LogisticRegression(), {"C": [0.1, 1]})
def _construct_compose_pipeline_instance(Estimator):
# Minimal / degenerate instances: only useful to test the docstrings.
if Estimator.__name__ == "ColumnTransformer":
return Estimator(transformers=[("transformer", "passthrough", [0, 1])])
elif Estimator.__name__ == "Pipeline":
return Estimator(steps=[("clf", LogisticRegression())])
elif Estimator.__name__ == "FeatureUnion":
return Estimator(transformer_list=[("transformer", FunctionTransformer())])
def _construct_sparse_coder(Estimator):
# XXX: hard-coded assumption that n_features=3
dictionary = np.array(
[[0, 1, 0], [-1, -1, 2], [1, 1, 1], [0, 1, 1], [0, 2, 1]],
dtype=np.float64,
)
return Estimator(dictionary=dictionary)
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
# TODO(1.6): remove "@pytest.mark.filterwarnings" as SAMME.R will be removed
# and substituted with the SAMME algorithm as a default
@pytest.mark.filterwarnings("ignore:The SAMME.R algorithm")
@pytest.mark.parametrize("name, Estimator", all_estimators())
def test_fit_docstring_attributes(name, Estimator):
pytest.importorskip("numpydoc")
from numpydoc import docscrape
doc = docscrape.ClassDoc(Estimator)
attributes = doc["Attributes"]
if Estimator.__name__ in (
"HalvingRandomSearchCV",
"RandomizedSearchCV",
"HalvingGridSearchCV",
"GridSearchCV",
):
est = _construct_searchcv_instance(Estimator)
elif Estimator.__name__ in (
"ColumnTransformer",
"Pipeline",
"FeatureUnion",
):
est = _construct_compose_pipeline_instance(Estimator)
elif Estimator.__name__ == "SparseCoder":
est = _construct_sparse_coder(Estimator)
else:
est = _construct_instance(Estimator)
if Estimator.__name__ == "SelectKBest":
est.set_params(k=2)
elif Estimator.__name__ == "DummyClassifier":
est.set_params(strategy="stratified")
elif Estimator.__name__ == "CCA" or Estimator.__name__.startswith("PLS"):
# default = 2 is invalid for single target
est.set_params(n_components=1)
elif Estimator.__name__ in (
"GaussianRandomProjection",
"SparseRandomProjection",
):
# default="auto" raises an error with the shape of `X`
est.set_params(n_components=2)
elif Estimator.__name__ == "TSNE":
# default raises an error, perplexity must be less than n_samples
est.set_params(perplexity=2)
# TODO(1.6): remove (avoid FutureWarning)
if Estimator.__name__ in ("NMF", "MiniBatchNMF"):
est.set_params(n_components="auto")
# Low max iter to speed up tests: we are only interested in checking the existence
# of fitted attributes. This should be invariant to whether it has converged or not.
if "max_iter" in est.get_params():
est.set_params(max_iter=2)
# min value for `TSNE` is 250
if Estimator.__name__ == "TSNE":
est.set_params(max_iter=250)
if "random_state" in est.get_params():
est.set_params(random_state=0)
# In case we want to deprecate some attributes in the future
skipped_attributes = {}
if Estimator.__name__.endswith("Vectorizer"):
# Vectorizer require some specific input data
if Estimator.__name__ in (
"CountVectorizer",
"HashingVectorizer",
"TfidfVectorizer",
):
X = [
"This is the first document.",
"This document is the second document.",
"And this is the third one.",
"Is this the first document?",
]
elif Estimator.__name__ == "DictVectorizer":
X = [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]
y = None
else:
X, y = make_classification(
n_samples=20,
n_features=3,
n_redundant=0,
n_classes=2,
random_state=2,
)
y = _enforce_estimator_tags_y(est, y)
X = _enforce_estimator_tags_X(est, X)
if "1dlabels" in est._get_tags()["X_types"]:
est.fit(y)
elif "2dlabels" in est._get_tags()["X_types"]:
est.fit(np.c_[y, y])
elif "3darray" in est._get_tags()["X_types"]:
est.fit(X[np.newaxis, ...], y)
else:
est.fit(X, y)
for attr in attributes:
if attr.name in skipped_attributes:
continue
desc = " ".join(attr.desc).lower()
# As certain attributes are present "only" if a certain parameter is
# provided, this checks if the word "only" is present in the attribute
# description, and if not the attribute is required to be present.
if "only " in desc:
continue
# ignore deprecation warnings
with ignore_warnings(category=FutureWarning):
assert hasattr(est, attr.name)
fit_attr = _get_all_fitted_attributes(est)
fit_attr_names = [attr.name for attr in attributes]
undocumented_attrs = set(fit_attr).difference(fit_attr_names)
undocumented_attrs = set(undocumented_attrs).difference(skipped_attributes)
if undocumented_attrs:
raise AssertionError(
f"Undocumented attributes for {Estimator.__name__}: {undocumented_attrs}"
)
def _get_all_fitted_attributes(estimator):
"Get all the fitted attributes of an estimator including properties"
# attributes
fit_attr = list(estimator.__dict__.keys())
# properties
with warnings.catch_warnings():
warnings.filterwarnings("error", category=FutureWarning)
for name in dir(estimator.__class__):
obj = getattr(estimator.__class__, name)
if not isinstance(obj, property):
continue
# ignore properties that raises an AttributeError and deprecated
# properties
try:
getattr(estimator, name)
except (AttributeError, FutureWarning):
continue
fit_attr.append(name)
return [k for k in fit_attr if k.endswith("_") and not k.startswith("_")]
| 352492.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
"""Module for initialization hooks https://docs.localstack.cloud/references/init-hooks/"""
import dataclasses
import logging
import os.path
import subprocess
import time
from enum import Enum
from functools import cached_property
from typing import Dict, List, Optional
from plux import Plugin, PluginManager
from localstack import constants
from localstack.runtime import hooks
from localstack.utils.objects import singleton_factory
LOG = logging.getLogger(__name__)
class State(Enum):
UNKNOWN = "UNKNOWN"
RUNNING = "RUNNING"
SUCCESSFUL = "SUCCESSFUL"
ERROR = "ERROR"
def __str__(self):
return self.name
def __repr__(self):
return self.name
class Stage(Enum):
BOOT = 0
START = 1
READY = 2
SHUTDOWN = 3
def __str__(self):
return self.name
def __repr__(self):
return self.name
@dataclasses.dataclass
class Script:
path: str
stage: Stage
state: State = State.UNKNOWN
class ScriptRunner(Plugin):
"""
Interface for running scripts.
"""
namespace = "localstack.init.runner"
suffixes = []
def run(self, path: str) -> None:
"""
Run the given script with the appropriate runtime.
:param path: the path to the script
"""
raise NotImplementedError
def should_run(self, script_file: str) -> bool:
"""
Checks whether the given file should be run with this script runner. In case multiple runners
evaluate this condition to true on the same file (ideally this doesn't happen), the first one
loaded will be used, which is potentially indeterministic.
:param script_file: the script file to run
:return: True if this runner should be used, False otherwise
"""
for suffix in self.suffixes:
if script_file.endswith(suffix):
return True
return False
class ShellScriptRunner(ScriptRunner):
"""
Runner that interprets scripts as shell scripts and calls them directly.
"""
name = "sh"
suffixes = [".sh"]
def run(self, path: str) -> None:
exit_code = subprocess.call(args=[], executable=path)
if exit_code != 0:
raise OSError("Script %s returned a non-zero exit code %s" % (path, exit_code))
class PythonScriptRunner(ScriptRunner):
"""
Runner that uses ``exec`` to run a python script.
"""
name = "py"
suffixes = [".py"]
def run(self, path: str) -> None:
with open(path, "rb") as fd:
exec(fd.read(), {})
class InitScriptManager:
_stage_directories: Dict[Stage, str] = {
Stage.BOOT: "boot.d",
Stage.START: "start.d",
Stage.READY: "ready.d",
Stage.SHUTDOWN: "shutdown.d",
}
script_root: str
stage_completed: Dict[Stage, bool]
def __init__(self, script_root: str):
self.script_root = script_root
self.stage_completed = {stage: False for stage in Stage}
self.runner_manager: PluginManager[ScriptRunner] = PluginManager(ScriptRunner.namespace)
@cached_property
def scripts(self) -> Dict[Stage, List[Script]]:
return self._find_scripts()
def get_script_runner(self, script_file: str) -> Optional[ScriptRunner]:
runners = self.runner_manager.load_all()
for runner in runners:
if runner.should_run(script_file):
return runner
return None
def has_script_runner(self, script_file: str) -> bool:
return self.get_script_runner(script_file) is not None
def run_stage(self, stage: Stage) -> List[Script]:
"""
Runs all scripts in the given stage.
:param stage: the stage to run
:return: the scripts that were in the stage
"""
scripts = self.scripts.get(stage, [])
if self.stage_completed[stage]:
LOG.debug("Stage %s already completed, skipping", stage)
return scripts
try:
for script in scripts:
LOG.debug("Running %s script %s", script.stage, script.path)
# Deprecated: To be removed in v4.0 major release.
# Explicit AWS credentials and region will need to be set in the script.
env_original = os.environ.copy()
os.environ["AWS_ACCESS_KEY_ID"] = constants.DEFAULT_AWS_ACCOUNT_ID
os.environ["AWS_SECRET_ACCESS_KEY"] = constants.INTERNAL_AWS_SECRET_ACCESS_KEY
os.environ["AWS_REGION"] = constants.AWS_REGION_US_EAST_1
try:
script.state = State.RUNNING
runner = self.get_script_runner(script.path)
runner.run(script.path)
except Exception as e:
script.state = State.ERROR
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception("Error while running script %s", script)
else:
LOG.error("Error while running script %s: %s", script, e)
else:
script.state = State.SUCCESSFUL
finally:
# Restore original state of Boto credentials.
for env_var in ("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"):
if env_var in env_original:
os.environ[env_var] = env_original[env_var]
else:
os.environ.pop(env_var, None)
finally:
self.stage_completed[stage] = True
return scripts
def _find_scripts(self) -> Dict[Stage, List[Script]]:
scripts = {}
if self.script_root is None:
LOG.debug("Unable to discover init scripts as script_root is None")
return {}
for stage in Stage:
scripts[stage] = []
stage_dir = self._stage_directories[stage]
if not stage_dir:
continue
stage_path = os.path.join(self.script_root, stage_dir)
if not os.path.isdir(stage_path):
continue
for root, dirs, files in os.walk(stage_path, topdown=True):
# from the docs: "When topdown is true, the caller can modify the dirnames list in-place"
dirs.sort()
files.sort()
for file in files:
script_path = os.path.abspath(os.path.join(root, file))
if not os.path.isfile(script_path):
continue
# only add the script if there's a runner for it
if not self.has_script_runner(script_path):
LOG.debug("No runner available for script %s", script_path)
continue
scripts[stage].append(Script(path=script_path, stage=stage))
LOG.debug("Init scripts discovered: %s", scripts)
return scripts
# runtime integration
@singleton_factory
def init_script_manager() -> InitScriptManager:
from localstack import config
return InitScriptManager(script_root=config.dirs.init)
@hooks.on_infra_start()
def _run_init_scripts_on_start():
# this is a hack since we currently cannot know whether boot scripts have been executed or not
init_script_manager().stage_completed[Stage.BOOT] = True
_run_and_log(Stage.START)
@hooks.on_infra_ready()
def _run_init_scripts_on_ready():
_run_and_log(Stage.READY)
@hooks.on_infra_shutdown()
def _run_init_scripts_on_shutdown():
_run_and_log(Stage.SHUTDOWN)
def _run_and_log(stage: Stage):
from localstack.utils.analytics import log
then = time.time()
scripts = init_script_manager().run_stage(stage)
took = (time.time() - then) * 1000
if scripts:
log.event("run_init", {"stage": stage.name, "scripts": len(scripts), "duration": took})
def main():
"""
Run the init scripts for a particular stage. For example, to run all boot scripts run::
python -m localstack.runtime.init BOOT
The __main__ entrypoint is currently mainly used for the docker-entrypoint.sh. Other stages
are executed from runtime hooks.
"""
import sys
stage = Stage[sys.argv[1]]
init_script_manager().run_stage(stage)
if __name__ == "__main__":
main()
| 974666.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
import glob
import logging
import os
import re
import shutil
import textwrap
import threading
from typing import List
import semver
from localstack import config
from localstack.constants import (
ELASTICSEARCH_DEFAULT_VERSION,
ELASTICSEARCH_DELETE_MODULES,
ELASTICSEARCH_PLUGIN_LIST,
OPENSEARCH_DEFAULT_VERSION,
OPENSEARCH_PLUGIN_LIST,
)
from localstack.packages import InstallTarget, Package, PackageInstaller
from localstack.services.opensearch import versions
from localstack.utils.archives import download_and_extract_with_retry
from localstack.utils.files import chmod_r, load_file, mkdir, rm_rf, save_file
from localstack.utils.run import run
from localstack.utils.ssl import create_ssl_cert, install_predefined_cert_if_available
from localstack.utils.sync import SynchronizedDefaultDict, retry
LOG = logging.getLogger(__name__)
_OPENSEARCH_INSTALL_LOCKS = SynchronizedDefaultDict(threading.RLock)
class OpensearchPackage(Package):
def __init__(self, default_version: str = OPENSEARCH_DEFAULT_VERSION):
super().__init__(name="OpenSearch", default_version=default_version)
def _get_installer(self, version: str) -> PackageInstaller:
if version in versions._prefixed_elasticsearch_install_versions:
return ElasticsearchPackageInstaller(version)
else:
return OpensearchPackageInstaller(version)
def get_versions(self) -> List[str]:
return list(versions.install_versions.keys())
class OpensearchPackageInstaller(PackageInstaller):
def __init__(self, version: str):
super().__init__("opensearch", version)
def _install(self, target: InstallTarget):
# locally import to avoid having a dependency on ASF when starting the CLI
from localstack.aws.api.opensearch import EngineType
from localstack.services.opensearch import versions
version = self._get_opensearch_install_version()
install_dir = self._get_install_dir(target)
with _OPENSEARCH_INSTALL_LOCKS[version]:
if not os.path.exists(install_dir):
opensearch_url = versions.get_download_url(version, EngineType.OpenSearch)
install_dir_parent = os.path.dirname(install_dir)
mkdir(install_dir_parent)
# download and extract archive
tmp_archive = os.path.join(
config.dirs.cache, f"localstack.{os.path.basename(opensearch_url)}"
)
print(f"DEBUG: installing opensearch to path {install_dir_parent}")
download_and_extract_with_retry(opensearch_url, tmp_archive, install_dir_parent)
opensearch_dir = glob.glob(os.path.join(install_dir_parent, "opensearch*"))
if not opensearch_dir:
raise Exception(f"Unable to find OpenSearch folder in {install_dir_parent}")
shutil.move(opensearch_dir[0], install_dir)
for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"):
dir_path = os.path.join(install_dir, dir_name)
mkdir(dir_path)
chmod_r(dir_path, 0o777)
parsed_version = semver.VersionInfo.parse(version)
# setup security based on the version
self._setup_security(install_dir, parsed_version)
# install other default plugins for opensearch 1.1+
# https://forum.opensearch.org/t/ingest-attachment-cannot-be-installed/6494/12
if parsed_version >= "1.1.0":
for plugin in OPENSEARCH_PLUGIN_LIST:
plugin_binary = os.path.join(install_dir, "bin", "opensearch-plugin")
plugin_dir = os.path.join(install_dir, "plugins", plugin)
if not os.path.exists(plugin_dir):
LOG.info("Installing OpenSearch plugin %s", plugin)
def try_install():
output = run([plugin_binary, "install", "-b", plugin])
LOG.debug("Plugin installation output: %s", output)
# We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries
download_attempts = 3
try:
retry(try_install, retries=download_attempts - 1, sleep=2)
except Exception:
LOG.warning(
"Unable to download OpenSearch plugin '%s' after %s attempts",
plugin,
download_attempts,
)
if not os.environ.get("IGNORE_OS_DOWNLOAD_ERRORS"):
raise
def _setup_security(self, install_dir: str, parsed_version: semver.VersionInfo):
"""
Prepares the usage of the SecurityPlugin for the different versions of OpenSearch.
:param install_dir: root installation directory for OpenSearch which should be configured
:param parsed_version: parsed semantic version of the OpenSearch installation which should be configured
"""
# create & copy SSL certs to opensearch config dir
install_predefined_cert_if_available()
config_path = os.path.join(install_dir, "config")
_, cert_file_name, key_file_name = create_ssl_cert()
shutil.copyfile(cert_file_name, os.path.join(config_path, "cert.crt"))
shutil.copyfile(key_file_name, os.path.join(config_path, "cert.key"))
# configure the default roles, roles_mappings, and internal_users
if parsed_version >= "2.0.0":
# with version 2 of opensearch and the security plugin, the config moved to the root config folder
security_config_folder = os.path.join(install_dir, "config", "opensearch-security")
else:
security_config_folder = os.path.join(
install_dir, "plugins", "opensearch-security", "securityconfig"
)
# no non-default roles (not even the demo roles) should be set up
roles_path = os.path.join(security_config_folder, "roles.yml")
save_file(
file=roles_path,
permissions=0o666,
content=textwrap.dedent(
"""\
_meta:
type: "roles"
config_version: 2
"""
),
)
# create the internal user which allows localstack to manage the running instance
internal_users_path = os.path.join(security_config_folder, "internal_users.yml")
save_file(
file=internal_users_path,
permissions=0o666,
content=textwrap.dedent(
"""\
_meta:
type: "internalusers"
config_version: 2
# Define your internal users here
localstack-internal:
hash: "$2y$12$ZvpKLI2nsdGj1ResAmlLne7ki5o45XpBppyg9nXF2RLNfmwjbFY22"
reserved: true
hidden: true
backend_roles: []
attributes: {}
opendistro_security_roles: []
static: false
"""
),
)
# define the necessary roles mappings for the internal user
roles_mapping_path = os.path.join(security_config_folder, "roles_mapping.yml")
save_file(
file=roles_mapping_path,
permissions=0o666,
content=textwrap.dedent(
"""\
_meta:
type: "rolesmapping"
config_version: 2
security_manager:
hosts: []
users:
- localstack-internal
reserved: false
hidden: false
backend_roles: []
and_backend_roles: []
all_access:
hosts: []
users:
- localstack-internal
reserved: false
hidden: false
backend_roles: []
and_backend_roles: []
"""
),
)
def _get_install_marker_path(self, install_dir: str) -> str:
return os.path.join(install_dir, "bin", "opensearch")
def _get_opensearch_install_version(self) -> str:
from localstack.services.opensearch import versions
if config.SKIP_INFRA_DOWNLOADS:
self.version = OPENSEARCH_DEFAULT_VERSION
return versions.get_install_version(self.version)
class ElasticsearchPackageInstaller(PackageInstaller):
def __init__(self, version: str):
super().__init__("elasticsearch", version)
def _install(self, target: InstallTarget):
# locally import to avoid having a dependency on ASF when starting the CLI
from localstack.aws.api.opensearch import EngineType
from localstack.services.opensearch import versions
version = self.get_elasticsearch_install_version()
install_dir = self._get_install_dir(target)
installed_executable = os.path.join(install_dir, "bin", "elasticsearch")
if not os.path.exists(installed_executable):
es_url = versions.get_download_url(version, EngineType.Elasticsearch)
install_dir_parent = os.path.dirname(install_dir)
mkdir(install_dir_parent)
# download and extract archive
tmp_archive = os.path.join(config.dirs.cache, f"localstack.{os.path.basename(es_url)}")
download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent)
elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, "elasticsearch*"))
if not elasticsearch_dir:
raise Exception(f"Unable to find Elasticsearch folder in {install_dir_parent}")
shutil.move(elasticsearch_dir[0], install_dir)
for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"):
dir_path = os.path.join(install_dir, dir_name)
mkdir(dir_path)
chmod_r(dir_path, 0o777)
# install default plugins
for plugin in ELASTICSEARCH_PLUGIN_LIST:
plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin")
plugin_dir = os.path.join(install_dir, "plugins", plugin)
if not os.path.exists(plugin_dir):
LOG.info("Installing Elasticsearch plugin %s", plugin)
def try_install():
output = run([plugin_binary, "install", "-b", plugin])
LOG.debug("Plugin installation output: %s", output)
# We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries
download_attempts = 3
try:
retry(try_install, retries=download_attempts - 1, sleep=2)
except Exception:
LOG.warning(
"Unable to download Elasticsearch plugin '%s' after %s attempts",
plugin,
download_attempts,
)
if not os.environ.get("IGNORE_ES_DOWNLOAD_ERRORS"):
raise
# delete some plugins to free up space
for plugin in ELASTICSEARCH_DELETE_MODULES:
module_dir = os.path.join(install_dir, "modules", plugin)
rm_rf(module_dir)
# disable x-pack-ml plugin (not working on Alpine)
xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform")
rm_rf(xpack_dir)
# patch JVM options file - replace hardcoded heap size settings
jvm_options_file = os.path.join(install_dir, "config", "jvm.options")
if os.path.exists(jvm_options_file):
jvm_options = load_file(jvm_options_file)
jvm_options_replaced = re.sub(
r"(^-Xm[sx][a-zA-Z0-9.]+$)", r"# \1", jvm_options, flags=re.MULTILINE
)
if jvm_options != jvm_options_replaced:
save_file(jvm_options_file, jvm_options_replaced)
# patch JVM options file - replace hardcoded heap size settings
jvm_options_file = os.path.join(install_dir, "config", "jvm.options")
if os.path.exists(jvm_options_file):
jvm_options = load_file(jvm_options_file)
jvm_options_replaced = re.sub(
r"(^-Xm[sx][a-zA-Z0-9.]+$)", r"# \1", jvm_options, flags=re.MULTILINE
)
if jvm_options != jvm_options_replaced:
save_file(jvm_options_file, jvm_options_replaced)
def _get_install_marker_path(self, install_dir: str) -> str:
return os.path.join(install_dir, "bin", "elasticsearch")
def get_elasticsearch_install_version(self) -> str:
from localstack.services.opensearch import versions
if config.SKIP_INFRA_DOWNLOADS:
return ELASTICSEARCH_DEFAULT_VERSION
return versions.get_install_version(self.version)
opensearch_package = OpensearchPackage(default_version=OPENSEARCH_DEFAULT_VERSION)
elasticsearch_package = OpensearchPackage(default_version=ELASTICSEARCH_DEFAULT_VERSION)
| 215937.py | [
"CWE-798: Use of Hard-coded Credentials"
] |
import io
import tarfile
import zipfile
from subprocess import Popen
from typing import IO, Optional
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import glob
import logging
import os
import re
import tempfile
import time
from typing import Union
from localstack.constants import MAVEN_REPO_URL
from localstack.utils.files import load_file, mkdir, new_tmp_file, rm_rf, save_file
from localstack.utils.http import download
from localstack.utils.run import run
from .run import is_command_available
from .strings import truncate
LOG = logging.getLogger(__name__)
StrPath = Union[str, os.PathLike]
def is_zip_file(content):
stream = io.BytesIO(content)
return zipfile.is_zipfile(stream)
def get_unzipped_size(zip_file: Union[str, IO[bytes]]):
"""Returns the size of the unzipped file."""
with zipfile.ZipFile(zip_file, "r") as zip_ref:
return sum(f.file_size for f in zip_ref.infolist())
def unzip(path: str, target_dir: str, overwrite: bool = True) -> Optional[Union[str, Popen]]:
from localstack.utils.platform import is_debian
use_native_cmd = is_debian() or is_command_available("unzip")
if use_native_cmd:
# Running the native command can be an order of magnitude faster in the container. Also, `unzip`
# is capable of extracting zip files with incorrect CRC codes (sometimes happens, e.g., with some
# Node.js/Serverless versions), which can fail with Python's `zipfile` (extracting empty files).
flags = ["-o"] if overwrite else []
flags += ["-q"]
try:
cmd = ["unzip"] + flags + [path]
return run(cmd, cwd=target_dir, print_error=False)
except Exception as e:
error_str = truncate(str(e), max_length=200)
LOG.info(
'Unable to use native "unzip" command (using fallback mechanism): %s', error_str
)
try:
zip_ref = zipfile.ZipFile(path, "r")
except Exception as e:
LOG.warning("Unable to open zip file: %s: %s", path, e)
raise e
def _unzip_file_entry(zip_ref, file_entry, target_dir):
"""Extracts a Zipfile entry and preserves permissions"""
out_path = os.path.join(target_dir, file_entry.filename)
if use_native_cmd and os.path.exists(out_path) and os.path.getsize(out_path) > 0:
# this can happen under certain circumstances if the native "unzip" command
# fails with a non-zero exit code, yet manages to extract parts of the zip file
return
zip_ref.extract(file_entry.filename, path=target_dir)
perm = file_entry.external_attr >> 16
# Make sure to preserve file permissions in the zip file
# https://www.burgundywall.com/post/preserving-file-perms-with-python-zipfile-module
os.chmod(out_path, perm or 0o777)
try:
for file_entry in zip_ref.infolist():
_unzip_file_entry(zip_ref, file_entry, target_dir)
finally:
zip_ref.close()
def untar(path: str, target_dir: str):
mode = "r:gz" if path.endswith("gz") else "r"
with tarfile.open(path, mode) as tar:
tar.extractall(path=target_dir)
def create_zip_file_cli(source_path: StrPath, base_dir: StrPath, zip_file: StrPath):
"""
Creates a zip archive by using the native zip command. The native command can be an order of magnitude faster in CI
"""
source = "." if source_path == base_dir else os.path.basename(source_path)
run(["zip", "-r", zip_file, source], cwd=base_dir)
def create_zip_file_python(
base_dir: StrPath,
zip_file: StrPath,
mode: Literal["r", "w", "x", "a"] = "w",
content_root: Optional[str] = None,
):
with zipfile.ZipFile(zip_file, mode) as zip_file:
for root, dirs, files in os.walk(base_dir):
for name in files:
full_name = os.path.join(root, name)
relative = os.path.relpath(root, start=base_dir)
if content_root:
dest = os.path.join(content_root, relative, name)
else:
dest = os.path.join(relative, name)
zip_file.write(full_name, dest)
def add_file_to_jar(class_file, class_url, target_jar, base_dir=None):
base_dir = base_dir or os.path.dirname(target_jar)
patch_class_file = os.path.join(base_dir, class_file)
if not os.path.exists(patch_class_file):
download(class_url, patch_class_file)
run(["zip", target_jar, class_file], cwd=base_dir)
def update_jar_manifest(
jar_file_name: str, parent_dir: str, search: Union[str, re.Pattern], replace: str
):
manifest_file_path = "META-INF/MANIFEST.MF"
jar_path = os.path.join(parent_dir, jar_file_name)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_manifest_file = os.path.join(tmp_dir, manifest_file_path)
run(["unzip", "-o", jar_path, manifest_file_path], cwd=tmp_dir)
manifest = load_file(tmp_manifest_file)
# return if the search pattern does not match (for idempotence, to avoid file permission issues further below)
if isinstance(search, re.Pattern):
if not search.search(manifest):
return
manifest = search.sub(replace, manifest, 1)
else:
if search not in manifest:
return
manifest = manifest.replace(search, replace, 1)
manifest_file = os.path.join(parent_dir, manifest_file_path)
save_file(manifest_file, manifest)
run(["zip", jar_file_name, manifest_file_path], cwd=parent_dir)
def upgrade_jar_file(base_dir: str, file_glob: str, maven_asset: str):
"""
Upgrade the matching Java JAR file in a local directory with the given Maven asset
:param base_dir: base directory to search the JAR file to replace in
:param file_glob: glob pattern for the JAR file to replace
:param maven_asset: name of Maven asset to download, in the form "<qualified_name>:<version>"
"""
local_path = os.path.join(base_dir, file_glob)
parent_dir = os.path.dirname(local_path)
maven_asset = maven_asset.replace(":", "/")
parts = maven_asset.split("/")
maven_asset_url = f"{MAVEN_REPO_URL}/{maven_asset}/{parts[-2]}-{parts[-1]}.jar"
target_file = os.path.join(parent_dir, os.path.basename(maven_asset_url))
if os.path.exists(target_file):
# avoid re-downloading the newer JAR version if it already exists locally
return
matches = glob.glob(local_path)
if not matches:
return
for match in matches:
os.remove(match)
download(maven_asset_url, target_file)
def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None):
mkdir(target_dir)
_, ext = os.path.splitext(tmp_archive or archive_url)
tmp_archive = tmp_archive or new_tmp_file()
if not os.path.exists(tmp_archive) or os.path.getsize(tmp_archive) <= 0:
# create temporary placeholder file, to avoid duplicate parallel downloads
save_file(tmp_archive, "")
for i in range(retries + 1):
try:
download(archive_url, tmp_archive)
break
except Exception as e:
LOG.warning(
"Attempt %d. Failed to download archive from %s: %s",
i + 1,
archive_url,
e,
)
# only sleep between retries, not after the last one
if i < retries:
time.sleep(sleep)
# if the temporary file we created above hasn't been replaced, we assume failure
if os.path.getsize(tmp_archive) <= 0:
raise Exception("Failed to download archive from %s: . Retries exhausted", archive_url)
if ext == ".zip":
unzip(tmp_archive, target_dir)
elif ext in (
".bz2",
".gz",
".tgz",
".xz",
):
untar(tmp_archive, target_dir)
else:
raise Exception(f"Unsupported archive format: {ext}")
def download_and_extract_with_retry(archive_url, tmp_archive, target_dir):
try:
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
except Exception as e:
# try deleting and re-downloading the zip file
LOG.info("Unable to extract file, re-downloading ZIP archive %s: %s", tmp_archive, e)
rm_rf(tmp_archive)
download_and_extract(archive_url, target_dir, tmp_archive=tmp_archive)
| 124108.py | [
"CWE-22: Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')"
] |
"""Scrapy Shell
See documentation in docs/topics/shell.rst
"""
from __future__ import annotations
import os
import signal
from typing import Any, Callable, Dict, Optional, Tuple, Union
from itemadapter import is_item
from twisted.internet import defer, threads
from twisted.python import threadable
from w3lib.url import any_to_uri
from scrapy.crawler import Crawler
from scrapy.exceptions import IgnoreRequest
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy.utils.conf import get_config
from scrapy.utils.console import DEFAULT_PYTHON_SHELLS, start_python_console
from scrapy.utils.datatypes import SequenceExclude
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import is_asyncio_reactor_installed, set_asyncio_event_loop
from scrapy.utils.response import open_in_browser
class Shell:
relevant_classes: Tuple[type, ...] = (Crawler, Spider, Request, Response, Settings)
def __init__(
self,
crawler: Crawler,
update_vars: Optional[Callable[[Dict[str, Any]], None]] = None,
code: Optional[str] = None,
):
self.crawler: Crawler = crawler
self.update_vars: Callable[[Dict[str, Any]], None] = update_vars or (
lambda x: None
)
self.item_class: type = load_object(crawler.settings["DEFAULT_ITEM_CLASS"])
self.spider: Optional[Spider] = None
self.inthread: bool = not threadable.isInIOThread()
self.code: Optional[str] = code
self.vars: Dict[str, Any] = {}
def start(
self,
url: Optional[str] = None,
request: Optional[Request] = None,
response: Optional[Response] = None,
spider: Optional[Spider] = None,
redirect: bool = True,
) -> None:
# disable accidental Ctrl-C key press from shutting down the engine
signal.signal(signal.SIGINT, signal.SIG_IGN)
if url:
self.fetch(url, spider, redirect=redirect)
elif request:
self.fetch(request, spider)
elif response:
request = response.request
self.populate_vars(response, request, spider)
else:
self.populate_vars()
if self.code:
print(eval(self.code, globals(), self.vars)) # nosec
else:
"""
Detect interactive shell setting in scrapy.cfg
e.g.: ~/.config/scrapy.cfg or ~/.scrapy.cfg
[settings]
# shell can be one of ipython, bpython or python;
# to be used as the interactive python console, if available.
# (default is ipython, fallbacks in the order listed above)
shell = python
"""
cfg = get_config()
section, option = "settings", "shell"
env = os.environ.get("SCRAPY_PYTHON_SHELL")
shells = []
if env:
shells += env.strip().lower().split(",")
elif cfg.has_option(section, option):
shells += [cfg.get(section, option).strip().lower()]
else: # try all by default
shells += DEFAULT_PYTHON_SHELLS.keys()
# always add standard shell as fallback
shells += ["python"]
start_python_console(
self.vars, shells=shells, banner=self.vars.pop("banner", "")
)
def _schedule(
self, request: Request, spider: Optional[Spider]
) -> defer.Deferred[Any]:
if is_asyncio_reactor_installed():
# set the asyncio event loop for the current thread
event_loop_path = self.crawler.settings["ASYNCIO_EVENT_LOOP"]
set_asyncio_event_loop(event_loop_path)
spider = self._open_spider(request, spider)
d = _request_deferred(request)
d.addCallback(lambda x: (x, spider))
assert self.crawler.engine
self.crawler.engine.crawl(request)
return d
def _open_spider(self, request: Request, spider: Optional[Spider]) -> Spider:
if self.spider:
return self.spider
if spider is None:
spider = self.crawler.spider or self.crawler._create_spider()
self.crawler.spider = spider
assert self.crawler.engine
self.crawler.engine.open_spider(spider, close_if_idle=False)
self.spider = spider
return spider
def fetch(
self,
request_or_url: Union[Request, str],
spider: Optional[Spider] = None,
redirect: bool = True,
**kwargs: Any,
) -> None:
from twisted.internet import reactor
if isinstance(request_or_url, Request):
request = request_or_url
else:
url = any_to_uri(request_or_url)
request = Request(url, dont_filter=True, **kwargs)
if redirect:
request.meta["handle_httpstatus_list"] = SequenceExclude(
range(300, 400)
)
else:
request.meta["handle_httpstatus_all"] = True
response = None
try:
response, spider = threads.blockingCallFromThread(
reactor, self._schedule, request, spider
)
except IgnoreRequest:
pass
self.populate_vars(response, request, spider)
def populate_vars(
self,
response: Optional[Response] = None,
request: Optional[Request] = None,
spider: Optional[Spider] = None,
) -> None:
import scrapy
self.vars["scrapy"] = scrapy
self.vars["crawler"] = self.crawler
self.vars["item"] = self.item_class()
self.vars["settings"] = self.crawler.settings
self.vars["spider"] = spider
self.vars["request"] = request
self.vars["response"] = response
if self.inthread:
self.vars["fetch"] = self.fetch
self.vars["view"] = open_in_browser
self.vars["shelp"] = self.print_help
self.update_vars(self.vars)
if not self.code:
self.vars["banner"] = self.get_help()
def print_help(self) -> None:
print(self.get_help())
def get_help(self) -> str:
b = []
b.append("Available Scrapy objects:")
b.append(
" scrapy scrapy module (contains scrapy.Request, scrapy.Selector, etc)"
)
for k, v in sorted(self.vars.items()):
if self._is_relevant(v):
b.append(f" {k:<10} {v}")
b.append("Useful shortcuts:")
if self.inthread:
b.append(
" fetch(url[, redirect=True]) "
"Fetch URL and update local objects (by default, redirects are followed)"
)
b.append(
" fetch(req) "
"Fetch a scrapy.Request and update local objects "
)
b.append(" shelp() Shell help (print this help)")
b.append(" view(response) View response in a browser")
return "\n".join(f"[s] {line}" for line in b)
def _is_relevant(self, value: Any) -> bool:
return isinstance(value, self.relevant_classes) or is_item(value)
def inspect_response(response: Response, spider: Spider) -> None:
"""Open a shell to inspect the given response"""
# Shell.start removes the SIGINT handler, so save it and re-add it after
# the shell has closed
sigint_handler = signal.getsignal(signal.SIGINT)
Shell(spider.crawler).start(response=response, spider=spider)
signal.signal(signal.SIGINT, sigint_handler)
def _request_deferred(request: Request) -> defer.Deferred[Any]:
"""Wrap a request inside a Deferred.
This function is harmful, do not use it until you know what you are doing.
This returns a Deferred whose first pair of callbacks are the request
callback and errback. The Deferred also triggers when the request
callback/errback is executed (i.e. when the request is downloaded)
WARNING: Do not call request.replace() until after the deferred is called.
"""
request_callback = request.callback
request_errback = request.errback
def _restore_callbacks(result: Any) -> Any:
request.callback = request_callback
request.errback = request_errback
return result
d: defer.Deferred[Any] = defer.Deferred()
d.addBoth(_restore_callbacks)
if request.callback:
d.addCallback(request.callback)
if request.errback:
d.addErrback(request.errback)
request.callback, request.errback = d.callback, d.errback
return d
| 671115.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
"""
Scheduler queues
"""
from __future__ import annotations
import marshal
import pickle # nosec
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Optional, Type, Union
from queuelib import queue
from scrapy.utils.request import request_from_dict
if TYPE_CHECKING:
from os import PathLike
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request
from scrapy.crawler import Crawler
def _with_mkdir(queue_class: Type[queue.BaseQueue]) -> Type[queue.BaseQueue]:
class DirectoriesCreated(queue_class): # type: ignore[valid-type,misc]
def __init__(self, path: Union[str, PathLike], *args: Any, **kwargs: Any):
dirname = Path(path).parent
if not dirname.exists():
dirname.mkdir(parents=True, exist_ok=True)
super().__init__(path, *args, **kwargs)
return DirectoriesCreated
def _serializable_queue(
queue_class: Type[queue.BaseQueue],
serialize: Callable[[Any], bytes],
deserialize: Callable[[bytes], Any],
) -> Type[queue.BaseQueue]:
class SerializableQueue(queue_class): # type: ignore[valid-type,misc]
def push(self, obj: Any) -> None:
s = serialize(obj)
super().push(s)
def pop(self) -> Optional[Any]:
s = super().pop()
if s:
return deserialize(s)
return None
def peek(self) -> Optional[Any]:
"""Returns the next object to be returned by :meth:`pop`,
but without removing it from the queue.
Raises :exc:`NotImplementedError` if the underlying queue class does
not implement a ``peek`` method, which is optional for queues.
"""
try:
s = super().peek()
except AttributeError as ex:
raise NotImplementedError(
"The underlying queue class does not implement 'peek'"
) from ex
if s:
return deserialize(s)
return None
return SerializableQueue
def _scrapy_serialization_queue(
queue_class: Type[queue.BaseQueue],
) -> Type[queue.BaseQueue]:
class ScrapyRequestQueue(queue_class): # type: ignore[valid-type,misc]
def __init__(self, crawler: Crawler, key: str):
self.spider = crawler.spider
super().__init__(key)
@classmethod
def from_crawler(
cls, crawler: Crawler, key: str, *args: Any, **kwargs: Any
) -> Self:
return cls(crawler, key)
def push(self, request: Request) -> None:
request_dict = request.to_dict(spider=self.spider)
super().push(request_dict)
def pop(self) -> Optional[Request]:
request = super().pop()
if not request:
return None
return request_from_dict(request, spider=self.spider)
def peek(self) -> Optional[Request]:
"""Returns the next object to be returned by :meth:`pop`,
but without removing it from the queue.
Raises :exc:`NotImplementedError` if the underlying queue class does
not implement a ``peek`` method, which is optional for queues.
"""
request = super().peek()
if not request:
return None
return request_from_dict(request, spider=self.spider)
return ScrapyRequestQueue
def _scrapy_non_serialization_queue(
queue_class: Type[queue.BaseQueue],
) -> Type[queue.BaseQueue]:
class ScrapyRequestQueue(queue_class): # type: ignore[valid-type,misc]
@classmethod
def from_crawler(cls, crawler: Crawler, *args: Any, **kwargs: Any) -> Self:
return cls()
def peek(self) -> Optional[Any]:
"""Returns the next object to be returned by :meth:`pop`,
but without removing it from the queue.
Raises :exc:`NotImplementedError` if the underlying queue class does
not implement a ``peek`` method, which is optional for queues.
"""
try:
s = super().peek()
except AttributeError as ex:
raise NotImplementedError(
"The underlying queue class does not implement 'peek'"
) from ex
return s
return ScrapyRequestQueue
def _pickle_serialize(obj: Any) -> bytes:
try:
return pickle.dumps(obj, protocol=4)
# Both pickle.PicklingError and AttributeError can be raised by pickle.dump(s)
# TypeError is raised from parsel.Selector
except (pickle.PicklingError, AttributeError, TypeError) as e:
raise ValueError(str(e)) from e
# queue.*Queue aren't subclasses of queue.BaseQueue
_PickleFifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.FifoDiskQueue), _pickle_serialize, pickle.loads # type: ignore[arg-type]
)
_PickleLifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.LifoDiskQueue), _pickle_serialize, pickle.loads # type: ignore[arg-type]
)
_MarshalFifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.FifoDiskQueue), marshal.dumps, marshal.loads # type: ignore[arg-type]
)
_MarshalLifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.LifoDiskQueue), marshal.dumps, marshal.loads # type: ignore[arg-type]
)
# public queue classes
PickleFifoDiskQueue = _scrapy_serialization_queue(_PickleFifoSerializationDiskQueue)
PickleLifoDiskQueue = _scrapy_serialization_queue(_PickleLifoSerializationDiskQueue)
MarshalFifoDiskQueue = _scrapy_serialization_queue(_MarshalFifoSerializationDiskQueue)
MarshalLifoDiskQueue = _scrapy_serialization_queue(_MarshalLifoSerializationDiskQueue)
FifoMemoryQueue = _scrapy_non_serialization_queue(queue.FifoMemoryQueue) # type: ignore[arg-type]
LifoMemoryQueue = _scrapy_non_serialization_queue(queue.LifoMemoryQueue) # type: ignore[arg-type]
| 452701.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
"""
This module provides some useful functions for working with
scrapy.http.Request objects
"""
from __future__ import annotations
import hashlib
import json
import warnings
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Protocol,
Tuple,
Type,
Union,
)
from urllib.parse import urlunparse
from weakref import WeakKeyDictionary
from w3lib.http import basic_auth_header
from w3lib.url import canonicalize_url
from scrapy import Request, Spider
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import load_object
from scrapy.utils.python import to_bytes, to_unicode
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
def _serialize_headers(headers: Iterable[bytes], request: Request) -> Iterable[bytes]:
for header in headers:
if header in request.headers:
yield header
yield from request.headers.getlist(header)
_fingerprint_cache: WeakKeyDictionary[
Request, Dict[Tuple[Optional[Tuple[bytes, ...]], bool], bytes]
]
_fingerprint_cache = WeakKeyDictionary()
def fingerprint(
request: Request,
*,
include_headers: Optional[Iterable[Union[bytes, str]]] = None,
keep_fragments: bool = False,
) -> bytes:
"""
Return the request fingerprint.
The request fingerprint is a hash that uniquely identifies the resource the
request points to. For example, take the following two urls:
http://www.example.com/query?id=111&cat=222
http://www.example.com/query?cat=222&id=111
Even though those are two different URLs both point to the same resource
and are equivalent (i.e. they should return the same response).
Another example are cookies used to store session ids. Suppose the
following page is only accessible to authenticated users:
http://www.example.com/members/offers.html
Lots of sites use a cookie to store the session id, which adds a random
component to the HTTP Request and thus should be ignored when calculating
the fingerprint.
For this reason, request headers are ignored by default when calculating
the fingerprint. If you want to include specific headers use the
include_headers argument, which is a list of Request headers to include.
Also, servers usually ignore fragments in urls when handling requests,
so they are also ignored by default when calculating the fingerprint.
If you want to include them, set the keep_fragments argument to True
(for instance when handling requests with a headless browser).
"""
processed_include_headers: Optional[Tuple[bytes, ...]] = None
if include_headers:
processed_include_headers = tuple(
to_bytes(h.lower()) for h in sorted(include_headers)
)
cache = _fingerprint_cache.setdefault(request, {})
cache_key = (processed_include_headers, keep_fragments)
if cache_key not in cache:
# To decode bytes reliably (JSON does not support bytes), regardless of
# character encoding, we use bytes.hex()
headers: Dict[str, List[str]] = {}
if processed_include_headers:
for header in processed_include_headers:
if header in request.headers:
headers[header.hex()] = [
header_value.hex()
for header_value in request.headers.getlist(header)
]
fingerprint_data = {
"method": to_unicode(request.method),
"url": canonicalize_url(request.url, keep_fragments=keep_fragments),
"body": (request.body or b"").hex(),
"headers": headers,
}
fingerprint_json = json.dumps(fingerprint_data, sort_keys=True)
cache[cache_key] = hashlib.sha1(fingerprint_json.encode()).digest() # nosec
return cache[cache_key]
class RequestFingerprinterProtocol(Protocol):
def fingerprint(self, request: Request) -> bytes: ...
class RequestFingerprinter:
"""Default fingerprinter.
It takes into account a canonical version
(:func:`w3lib.url.canonicalize_url`) of :attr:`request.url
<scrapy.http.Request.url>` and the values of :attr:`request.method
<scrapy.http.Request.method>` and :attr:`request.body
<scrapy.http.Request.body>`. It then generates an `SHA1
<https://en.wikipedia.org/wiki/SHA-1>`_ hash.
.. seealso:: :setting:`REQUEST_FINGERPRINTER_IMPLEMENTATION`.
"""
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler)
def __init__(self, crawler: Optional[Crawler] = None):
if crawler:
implementation = crawler.settings.get(
"REQUEST_FINGERPRINTER_IMPLEMENTATION"
)
else:
implementation = "SENTINEL"
if implementation != "SENTINEL":
message = (
"'REQUEST_FINGERPRINTER_IMPLEMENTATION' is a deprecated setting.\n"
"And it will be removed in future version of Scrapy."
)
warnings.warn(message, category=ScrapyDeprecationWarning, stacklevel=2)
self._fingerprint = fingerprint
def fingerprint(self, request: Request) -> bytes:
return self._fingerprint(request)
def request_authenticate(
request: Request,
username: str,
password: str,
) -> None:
"""Authenticate the given request (in place) using the HTTP basic access
authentication mechanism (RFC 2617) and the given username and password
"""
request.headers["Authorization"] = basic_auth_header(username, password)
def request_httprepr(request: Request) -> bytes:
"""Return the raw HTTP representation (as bytes) of the given request.
This is provided only for reference since it's not the actual stream of
bytes that will be send when performing the request (that's controlled
by Twisted).
"""
parsed = urlparse_cached(request)
path = urlunparse(("", "", parsed.path or "/", parsed.params, parsed.query, ""))
s = to_bytes(request.method) + b" " + to_bytes(path) + b" HTTP/1.1\r\n"
s += b"Host: " + to_bytes(parsed.hostname or b"") + b"\r\n"
if request.headers:
s += request.headers.to_string() + b"\r\n"
s += b"\r\n"
s += request.body
return s
def referer_str(request: Request) -> Optional[str]:
"""Return Referer HTTP header suitable for logging."""
referrer = request.headers.get("Referer")
if referrer is None:
return referrer
return to_unicode(referrer, errors="replace")
def request_from_dict(d: Dict[str, Any], *, spider: Optional[Spider] = None) -> Request:
"""Create a :class:`~scrapy.Request` object from a dict.
If a spider is given, it will try to resolve the callbacks looking at the
spider for methods with the same name.
"""
request_cls: Type[Request] = load_object(d["_class"]) if "_class" in d else Request
kwargs = {key: value for key, value in d.items() if key in request_cls.attributes}
if d.get("callback") and spider:
kwargs["callback"] = _get_method(spider, d["callback"])
if d.get("errback") and spider:
kwargs["errback"] = _get_method(spider, d["errback"])
return request_cls(**kwargs)
def _get_method(obj: Any, name: Any) -> Any:
"""Helper function for request_from_dict"""
name = str(name)
try:
return getattr(obj, name)
except AttributeError:
raise ValueError(f"Method {name!r} not found in: {obj}")
def request_to_curl(request: Request) -> str:
"""
Converts a :class:`~scrapy.Request` object to a curl command.
:param :class:`~scrapy.Request`: Request object to be converted
:return: string containing the curl command
"""
method = request.method
data = f"--data-raw '{request.body.decode('utf-8')}'" if request.body else ""
headers = " ".join(
f"-H '{k.decode()}: {v[0].decode()}'" for k, v in request.headers.items()
)
url = request.url
cookies = ""
if request.cookies:
if isinstance(request.cookies, dict):
cookie = "; ".join(f"{k}={v}" for k, v in request.cookies.items())
cookies = f"--cookie '{cookie}'"
elif isinstance(request.cookies, list):
cookie = "; ".join(
f"{list(c.keys())[0]}={list(c.values())[0]}" for c in request.cookies
)
cookies = f"--cookie '{cookie}'"
curl_cmd = f"curl -X {method} {url} {data} {headers} {cookies}".strip()
return " ".join(curl_cmd.split())
| 109129.py | [
"CWE-327: Use of a Broken or Risky Cryptographic Algorithm"
] |
from encoder.params_data import *
from encoder.model import SpeakerEncoder
from encoder.audio import preprocess_wav # We want to expose this function from here
from matplotlib import cm
from encoder import audio
from pathlib import Path
import numpy as np
import torch
_model = None # type: SpeakerEncoder
_device = None # type: torch.device
def load_model(weights_fpath: Path, device=None):
"""
Loads the model in memory. If this function is not explicitely called, it will be run on the
first call to embed_frames() with the default weights file.
:param weights_fpath: the path to saved model weights.
:param device: either a torch device or the name of a torch device (e.g. "cpu", "cuda"). The
model will be loaded and will run on this device. Outputs will however always be on the cpu.
If None, will default to your GPU if it"s available, otherwise your CPU.
"""
# TODO: I think the slow loading of the encoder might have something to do with the device it
# was saved on. Worth investigating.
global _model, _device
if device is None:
_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
elif isinstance(device, str):
_device = torch.device(device)
_model = SpeakerEncoder(_device, torch.device("cpu"))
checkpoint = torch.load(weights_fpath, _device)
_model.load_state_dict(checkpoint["model_state"])
_model.eval()
print("Loaded encoder \"%s\" trained to step %d" % (weights_fpath.name, checkpoint["step"]))
def is_loaded():
return _model is not None
def embed_frames_batch(frames_batch):
"""
Computes embeddings for a batch of mel spectrogram.
:param frames_batch: a batch mel of spectrogram as a numpy array of float32 of shape
(batch_size, n_frames, n_channels)
:return: the embeddings as a numpy array of float32 of shape (batch_size, model_embedding_size)
"""
if _model is None:
raise Exception("Model was not loaded. Call load_model() before inference.")
frames = torch.from_numpy(frames_batch).to(_device)
embed = _model.forward(frames).detach().cpu().numpy()
return embed
def compute_partial_slices(n_samples, partial_utterance_n_frames=partials_n_frames,
min_pad_coverage=0.75, overlap=0.5):
"""
Computes where to split an utterance waveform and its corresponding mel spectrogram to obtain
partial utterances of <partial_utterance_n_frames> each. Both the waveform and the mel
spectrogram slices are returned, so as to make each partial utterance waveform correspond to
its spectrogram. This function assumes that the mel spectrogram parameters used are those
defined in params_data.py.
The returned ranges may be indexing further than the length of the waveform. It is
recommended that you pad the waveform with zeros up to wave_slices[-1].stop.
:param n_samples: the number of samples in the waveform
:param partial_utterance_n_frames: the number of mel spectrogram frames in each partial
utterance
:param min_pad_coverage: when reaching the last partial utterance, it may or may not have
enough frames. If at least <min_pad_coverage> of <partial_utterance_n_frames> are present,
then the last partial utterance will be considered, as if we padded the audio. Otherwise,
it will be discarded, as if we trimmed the audio. If there aren't enough frames for 1 partial
utterance, this parameter is ignored so that the function always returns at least 1 slice.
:param overlap: by how much the partial utterance should overlap. If set to 0, the partial
utterances are entirely disjoint.
:return: the waveform slices and mel spectrogram slices as lists of array slices. Index
respectively the waveform and the mel spectrogram with these slices to obtain the partial
utterances.
"""
assert 0 <= overlap < 1
assert 0 < min_pad_coverage <= 1
samples_per_frame = int((sampling_rate * mel_window_step / 1000))
n_frames = int(np.ceil((n_samples + 1) / samples_per_frame))
frame_step = max(int(np.round(partial_utterance_n_frames * (1 - overlap))), 1)
# Compute the slices
wav_slices, mel_slices = [], []
steps = max(1, n_frames - partial_utterance_n_frames + frame_step + 1)
for i in range(0, steps, frame_step):
mel_range = np.array([i, i + partial_utterance_n_frames])
wav_range = mel_range * samples_per_frame
mel_slices.append(slice(*mel_range))
wav_slices.append(slice(*wav_range))
# Evaluate whether extra padding is warranted or not
last_wav_range = wav_slices[-1]
coverage = (n_samples - last_wav_range.start) / (last_wav_range.stop - last_wav_range.start)
if coverage < min_pad_coverage and len(mel_slices) > 1:
mel_slices = mel_slices[:-1]
wav_slices = wav_slices[:-1]
return wav_slices, mel_slices
def embed_utterance(wav, using_partials=True, return_partials=False, **kwargs):
"""
Computes an embedding for a single utterance.
# TODO: handle multiple wavs to benefit from batching on GPU
:param wav: a preprocessed (see audio.py) utterance waveform as a numpy array of float32
:param using_partials: if True, then the utterance is split in partial utterances of
<partial_utterance_n_frames> frames and the utterance embedding is computed from their
normalized average. If False, the utterance is instead computed from feeding the entire
spectogram to the network.
:param return_partials: if True, the partial embeddings will also be returned along with the
wav slices that correspond to the partial embeddings.
:param kwargs: additional arguments to compute_partial_splits()
:return: the embedding as a numpy array of float32 of shape (model_embedding_size,). If
<return_partials> is True, the partial utterances as a numpy array of float32 of shape
(n_partials, model_embedding_size) and the wav partials as a list of slices will also be
returned. If <using_partials> is simultaneously set to False, both these values will be None
instead.
"""
# Process the entire utterance if not using partials
if not using_partials:
frames = audio.wav_to_mel_spectrogram(wav)
embed = embed_frames_batch(frames[None, ...])[0]
if return_partials:
return embed, None, None
return embed
# Compute where to split the utterance into partials and pad if necessary
wave_slices, mel_slices = compute_partial_slices(len(wav), **kwargs)
max_wave_length = wave_slices[-1].stop
if max_wave_length >= len(wav):
wav = np.pad(wav, (0, max_wave_length - len(wav)), "constant")
# Split the utterance into partials
frames = audio.wav_to_mel_spectrogram(wav)
frames_batch = np.array([frames[s] for s in mel_slices])
partial_embeds = embed_frames_batch(frames_batch)
# Compute the utterance embedding from the partial embeddings
raw_embed = np.mean(partial_embeds, axis=0)
embed = raw_embed / np.linalg.norm(raw_embed, 2)
if return_partials:
return embed, partial_embeds, wave_slices
return embed
def embed_speaker(wavs, **kwargs):
raise NotImplemented()
def plot_embedding_as_heatmap(embed, ax=None, title="", shape=None, color_range=(0, 0.30)):
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if shape is None:
height = int(np.sqrt(len(embed)))
shape = (height, -1)
embed = embed.reshape(shape)
cmap = cm.get_cmap()
mappable = ax.imshow(embed, cmap=cmap)
cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04)
sm = cm.ScalarMappable(cmap=cmap)
sm.set_clim(*color_range)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title(title)
| 770044.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
from datetime import datetime
from functools import partial
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from synthesizer import audio
from synthesizer.models.tacotron import Tacotron
from synthesizer.synthesizer_dataset import SynthesizerDataset, collate_synthesizer
from synthesizer.utils import ValueWindow, data_parallel_workaround
from synthesizer.utils.plot import plot_spectrogram
from synthesizer.utils.symbols import symbols
from synthesizer.utils.text import sequence_to_text
from vocoder.display import *
def np_now(x: torch.Tensor): return x.detach().cpu().numpy()
def time_string():
return datetime.now().strftime("%Y-%m-%d %H:%M")
def train(run_id: str, syn_dir: Path, models_dir: Path, save_every: int, backup_every: int, force_restart: bool,
hparams):
models_dir.mkdir(exist_ok=True)
model_dir = models_dir.joinpath(run_id)
plot_dir = model_dir.joinpath("plots")
wav_dir = model_dir.joinpath("wavs")
mel_output_dir = model_dir.joinpath("mel-spectrograms")
meta_folder = model_dir.joinpath("metas")
model_dir.mkdir(exist_ok=True)
plot_dir.mkdir(exist_ok=True)
wav_dir.mkdir(exist_ok=True)
mel_output_dir.mkdir(exist_ok=True)
meta_folder.mkdir(exist_ok=True)
weights_fpath = model_dir / f"synthesizer.pt"
metadata_fpath = syn_dir.joinpath("train.txt")
print("Checkpoint path: {}".format(weights_fpath))
print("Loading training data from: {}".format(metadata_fpath))
print("Using model: Tacotron")
# Bookkeeping
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
# From WaveRNN/train_tacotron.py
if torch.cuda.is_available():
device = torch.device("cuda")
for session in hparams.tts_schedule:
_, _, _, batch_size = session
if batch_size % torch.cuda.device_count() != 0:
raise ValueError("`batch_size` must be evenly divisible by n_gpus!")
else:
device = torch.device("cpu")
print("Using device:", device)
# Instantiate Tacotron Model
print("\nInitialising Tacotron Model...\n")
model = Tacotron(embed_dims=hparams.tts_embed_dims,
num_chars=len(symbols),
encoder_dims=hparams.tts_encoder_dims,
decoder_dims=hparams.tts_decoder_dims,
n_mels=hparams.num_mels,
fft_bins=hparams.num_mels,
postnet_dims=hparams.tts_postnet_dims,
encoder_K=hparams.tts_encoder_K,
lstm_dims=hparams.tts_lstm_dims,
postnet_K=hparams.tts_postnet_K,
num_highways=hparams.tts_num_highways,
dropout=hparams.tts_dropout,
stop_threshold=hparams.tts_stop_threshold,
speaker_embedding_size=hparams.speaker_embedding_size).to(device)
# Initialize the optimizer
optimizer = optim.Adam(model.parameters())
# Load the weights
if force_restart or not weights_fpath.exists():
print("\nStarting the training of Tacotron from scratch\n")
model.save(weights_fpath)
# Embeddings metadata
char_embedding_fpath = meta_folder.joinpath("CharacterEmbeddings.tsv")
with open(char_embedding_fpath, "w", encoding="utf-8") as f:
for symbol in symbols:
if symbol == " ":
symbol = "\\s" # For visual purposes, swap space with \s
f.write("{}\n".format(symbol))
else:
print("\nLoading weights at %s" % weights_fpath)
model.load(weights_fpath, optimizer)
print("Tacotron weights loaded from step %d" % model.step)
# Initialize the dataset
metadata_fpath = syn_dir.joinpath("train.txt")
mel_dir = syn_dir.joinpath("mels")
embed_dir = syn_dir.joinpath("embeds")
dataset = SynthesizerDataset(metadata_fpath, mel_dir, embed_dir, hparams)
for i, session in enumerate(hparams.tts_schedule):
current_step = model.get_step()
r, lr, max_step, batch_size = session
training_steps = max_step - current_step
# Do we need to change to the next session?
if current_step >= max_step:
# Are there no further sessions than the current one?
if i == len(hparams.tts_schedule) - 1:
# We have completed training. Save the model and exit
model.save(weights_fpath, optimizer)
break
else:
# There is a following session, go to it
continue
model.r = r
# Begin the training
simple_table([(f"Steps with r={r}", str(training_steps // 1000) + "k Steps"),
("Batch Size", batch_size),
("Learning Rate", lr),
("Outputs/Step (r)", model.r)])
for p in optimizer.param_groups:
p["lr"] = lr
collate_fn = partial(collate_synthesizer, r=r, hparams=hparams)
data_loader = DataLoader(dataset, batch_size, shuffle=True, num_workers=2, collate_fn=collate_fn)
total_iters = len(dataset)
steps_per_epoch = np.ceil(total_iters / batch_size).astype(np.int32)
epochs = np.ceil(training_steps / steps_per_epoch).astype(np.int32)
for epoch in range(1, epochs+1):
for i, (texts, mels, embeds, idx) in enumerate(data_loader, 1):
start_time = time.time()
# Generate stop tokens for training
stop = torch.ones(mels.shape[0], mels.shape[2])
for j, k in enumerate(idx):
stop[j, :int(dataset.metadata[k][4])-1] = 0
texts = texts.to(device)
mels = mels.to(device)
embeds = embeds.to(device)
stop = stop.to(device)
# Forward pass
# Parallelize model onto GPUS using workaround due to python bug
if device.type == "cuda" and torch.cuda.device_count() > 1:
m1_hat, m2_hat, attention, stop_pred = data_parallel_workaround(model, texts, mels, embeds)
else:
m1_hat, m2_hat, attention, stop_pred = model(texts, mels, embeds)
# Backward pass
m1_loss = F.mse_loss(m1_hat, mels) + F.l1_loss(m1_hat, mels)
m2_loss = F.mse_loss(m2_hat, mels)
stop_loss = F.binary_cross_entropy(stop_pred, stop)
loss = m1_loss + m2_loss + stop_loss
optimizer.zero_grad()
loss.backward()
if hparams.tts_clip_grad_norm is not None:
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.tts_clip_grad_norm)
if np.isnan(grad_norm.cpu()):
print("grad_norm was NaN!")
optimizer.step()
time_window.append(time.time() - start_time)
loss_window.append(loss.item())
step = model.get_step()
k = step // 1000
msg = f"| Epoch: {epoch}/{epochs} ({i}/{steps_per_epoch}) | Loss: {loss_window.average:#.4} | " \
f"{1./time_window.average:#.2} steps/s | Step: {k}k | "
stream(msg)
# Backup or save model as appropriate
if backup_every != 0 and step % backup_every == 0 :
backup_fpath = weights_fpath.parent / f"synthesizer_{k:06d}.pt"
model.save(backup_fpath, optimizer)
if save_every != 0 and step % save_every == 0 :
# Must save latest optimizer state to ensure that resuming training
# doesn't produce artifacts
model.save(weights_fpath, optimizer)
# Evaluate model to generate samples
epoch_eval = hparams.tts_eval_interval == -1 and i == steps_per_epoch # If epoch is done
step_eval = hparams.tts_eval_interval > 0 and step % hparams.tts_eval_interval == 0 # Every N steps
if epoch_eval or step_eval:
for sample_idx in range(hparams.tts_eval_num_samples):
# At most, generate samples equal to number in the batch
if sample_idx + 1 <= len(texts):
# Remove padding from mels using frame length in metadata
mel_length = int(dataset.metadata[idx[sample_idx]][4])
mel_prediction = np_now(m2_hat[sample_idx]).T[:mel_length]
target_spectrogram = np_now(mels[sample_idx]).T[:mel_length]
attention_len = mel_length // model.r
eval_model(attention=np_now(attention[sample_idx][:, :attention_len]),
mel_prediction=mel_prediction,
target_spectrogram=target_spectrogram,
input_seq=np_now(texts[sample_idx]),
step=step,
plot_dir=plot_dir,
mel_output_dir=mel_output_dir,
wav_dir=wav_dir,
sample_num=sample_idx + 1,
loss=loss,
hparams=hparams)
# Break out of loop to update training schedule
if step >= max_step:
break
# Add line break after every epoch
print("")
def eval_model(attention, mel_prediction, target_spectrogram, input_seq, step,
plot_dir, mel_output_dir, wav_dir, sample_num, loss, hparams):
# Save some results for evaluation
attention_path = str(plot_dir.joinpath("attention_step_{}_sample_{}".format(step, sample_num)))
save_attention(attention, attention_path)
# save predicted mel spectrogram to disk (debug)
mel_output_fpath = mel_output_dir.joinpath("mel-prediction-step-{}_sample_{}.npy".format(step, sample_num))
np.save(str(mel_output_fpath), mel_prediction, allow_pickle=False)
# save griffin lim inverted wav for debug (mel -> wav)
wav = audio.inv_mel_spectrogram(mel_prediction.T, hparams)
wav_fpath = wav_dir.joinpath("step-{}-wave-from-mel_sample_{}.wav".format(step, sample_num))
audio.save_wav(wav, str(wav_fpath), sr=hparams.sample_rate)
# save real and predicted mel-spectrogram plot to disk (control purposes)
spec_fpath = plot_dir.joinpath("step-{}-mel-spectrogram_sample_{}.png".format(step, sample_num))
title_str = "{}, {}, step={}, loss={:.5f}".format("Tacotron", time_string(), step, loss)
plot_spectrogram(mel_prediction, str(spec_fpath), title=title_str,
target_spectrogram=target_spectrogram,
max_len=target_spectrogram.size // hparams.num_mels)
print("Input at step {}: {}".format(step, sequence_to_text(input_seq)))
| 508391.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
"""
Main entry point for the benchmarking tool.
This module provides a command-line interface for running benchmarks using Typer.
It allows users to specify the path to an agent, the benchmark(s) to run, and other
options such as verbosity.
Functions
---------
get_agent : function
Dynamically imports and returns the default configuration agent from the given path.
main : function
The main function that runs the specified benchmarks with the given agent.
Outputs the results to the console.
Attributes
----------
__name__ : str
The standard boilerplate for invoking the main function when the script is executed.
"""
import importlib
import os.path
import sys
from typing import Annotated, Optional
import typer
from langchain.globals import set_llm_cache
from langchain_community.cache import SQLiteCache
from gpt_engineer.applications.cli.main import load_env_if_needed
from gpt_engineer.benchmark.bench_config import BenchConfig
from gpt_engineer.benchmark.benchmarks.load import get_benchmark
from gpt_engineer.benchmark.run import export_yaml_results, print_results, run
app = typer.Typer(
context_settings={"help_option_names": ["-h", "--help"]}
) # creates a CLI app
def get_agent(path):
"""
Dynamically imports and returns the default configuration agent from the given path.
Parameters
----------
path : str
The file path to the module containing the default configuration agent.
Returns
-------
BaseAgent
An instance of the imported default configuration agent.
"""
# Dynamically import the python module at path
sys.path.append(os.path.dirname(path))
agent_module = importlib.import_module(path.replace("/", ".").replace(".py", ""))
return agent_module.default_config_agent()
@app.command(
help="""
Run any benchmark(s) against the specified agent.
\b
Currently available benchmarks are: apps and mbpp
"""
)
def main(
path_to_agent: Annotated[
str,
typer.Argument(
help="python file that contains a function called 'default_config_agent'"
),
],
bench_config: Annotated[
str, typer.Argument(help="optional task name in benchmark")
] = os.path.join(os.path.dirname(__file__), "default_bench_config.toml"),
yaml_output: Annotated[
Optional[str],
typer.Option(help="print results for each task", show_default=False),
] = None,
verbose: Annotated[
Optional[bool],
typer.Option(help="print results for each task", show_default=False),
] = False,
use_cache: Annotated[
Optional[bool],
typer.Option(
help="Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.",
show_default=False,
),
] = True,
):
"""
The main function that runs the specified benchmarks with the given agent and outputs the results to the console.
Parameters
----------
path_to_agent : str
The file path to the Python module that contains a function called 'default_config_agent'.
bench_config : str, default=default_bench_config.toml
Configuration file for choosing which benchmark problems to run. See default config for more details.
yaml_output: Optional[str], default=None
Pass a path to a yaml file to have results written to file.
verbose : Optional[bool], default=False
A flag to indicate whether to print results for each task.
use_cache : Optional[bool], default=True
Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.
Returns
-------
None
"""
if use_cache:
set_llm_cache(SQLiteCache(database_path=".langchain.db"))
load_env_if_needed()
config = BenchConfig.from_toml(bench_config)
print("using config file: " + bench_config)
benchmarks = list()
benchmark_results = dict()
for specific_config_name in vars(config):
specific_config = getattr(config, specific_config_name)
if hasattr(specific_config, "active"):
if specific_config.active:
benchmarks.append(specific_config_name)
for benchmark_name in benchmarks:
benchmark = get_benchmark(benchmark_name, config)
if len(benchmark.tasks) == 0:
print(
benchmark_name
+ " was skipped, since no tasks are specified. Increase the number of tasks in the config file at: "
+ bench_config
)
continue
agent = get_agent(path_to_agent)
results = run(agent, benchmark, verbose=verbose)
print(
f"\n--- Results for agent {path_to_agent}, benchmark: {benchmark_name} ---"
)
print_results(results)
print()
benchmark_results[benchmark_name] = {
"detailed": [result.to_dict() for result in results]
}
if yaml_output is not None:
export_yaml_results(yaml_output, benchmark_results, config.to_dict())
if __name__ == "__main__":
typer.run(main)
| 641969.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
# This is a websocket interpreter, TTS and STT disabled.
# It makes a websocket on a port that sends/receives LMC messages in *streaming* format.
### You MUST send a start and end flag with each message! For example: ###
"""
{"role": "user", "type": "message", "start": True})
{"role": "user", "type": "message", "content": "hi"})
{"role": "user", "type": "message", "end": True})
"""
import asyncio
import json
###
# from RealtimeTTS import TextToAudioStream, OpenAIEngine, CoquiEngine
# from RealtimeSTT import AudioToTextRecorder
# from beeper import Beeper
import time
import traceback
from typing import Any, Dict, List
from fastapi import FastAPI, Header, WebSocket
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from uvicorn import Config, Server
class Settings(BaseModel):
auto_run: bool
custom_instructions: str
model: str
class AsyncInterpreter:
def __init__(self, interpreter):
self.interpreter = interpreter
# STT
# self.stt = AudioToTextRecorder(use_microphone=False)
# self.stt.stop() # It needs this for some reason
# TTS
# if self.interpreter.tts == "coqui":
# engine = CoquiEngine()
# elif self.interpreter.tts == "openai":
# engine = OpenAIEngine()
# self.tts = TextToAudioStream(engine)
# Clock
# clock()
# self.beeper = Beeper()
# Startup sounds
# self.beeper.beep("Blow")
# self.tts.feed("Hi, how can I help you?")
# self.tts.play_async(on_audio_chunk=self.on_tts_chunk, muted=True)
self._input_queue = asyncio.Queue() # Queue that .input will shove things into
self._output_queue = asyncio.Queue() # Queue to put output chunks into
self._last_lmc_start_flag = None # Unix time of last LMC start flag received
self._in_keyboard_write_block = (
False # Tracks whether interpreter is trying to use the keyboard
)
# self.loop = asyncio.get_event_loop()
async def _add_to_queue(self, queue, item):
await queue.put(item)
async def clear_queue(self, queue):
while not queue.empty():
await queue.get()
async def clear_input_queue(self):
await self.clear_queue(self._input_queue)
async def clear_output_queue(self):
await self.clear_queue(self._output_queue)
async def input(self, chunk):
"""
Expects a chunk in streaming LMC format.
"""
if isinstance(chunk, bytes):
# It's probably a chunk of audio
# self.stt.feed_audio(chunk)
pass
else:
try:
chunk = json.loads(chunk)
except:
pass
if "start" in chunk:
# self.stt.start()
self._last_lmc_start_flag = time.time()
self.interpreter.computer.terminate()
# Stop any code execution... maybe we should make interpreter.stop()?
elif "end" in chunk:
asyncio.create_task(self.run())
else:
await self._add_to_queue(self._input_queue, chunk)
def add_to_output_queue_sync(self, chunk):
"""
Synchronous function to add a chunk to the output queue.
"""
asyncio.create_task(self._add_to_queue(self._output_queue, chunk))
async def run(self):
"""
Runs OI on the audio bytes submitted to the input. Will add streaming LMC chunks to the _output_queue.
"""
# self.beeper.start()
# self.stt.stop()
# message = self.stt.text()
# print("THE MESSAGE:", message)
input_queue = list(self._input_queue._queue)
message = [i for i in input_queue if i["type"] == "message"][0]["content"]
def generate(message):
last_lmc_start_flag = self._last_lmc_start_flag
# interpreter.messages = self.active_chat_messages
# print("🍀🍀🍀🍀GENERATING, using these messages: ", self.interpreter.messages)
print("passing this in:", message)
for chunk in self.interpreter.chat(message, display=False, stream=True):
if self._last_lmc_start_flag != last_lmc_start_flag:
# self.beeper.stop()
break
# self.add_to_output_queue_sync(chunk) # To send text, not just audio
content = chunk.get("content")
# Handle message blocks
if chunk.get("type") == "message":
self.add_to_output_queue_sync(
chunk.copy()
) # To send text, not just audio
# ^^^^^^^ MUST be a copy, otherwise the first chunk will get modified by OI >>while<< it's in the queue. Insane
if content:
# self.beeper.stop()
# Experimental: The AI voice sounds better with replacements like these, but it should happen at the TTS layer
# content = content.replace(". ", ". ... ").replace(", ", ", ... ").replace("!", "! ... ").replace("?", "? ... ")
yield content
# Handle code blocks
elif chunk.get("type") == "code":
pass
# if "start" in chunk:
# self.beeper.start()
# Experimental: If the AI wants to type, we should type immediately
# if (
# self.interpreter.messages[-1]
# .get("content", "")
# .startswith("computer.keyboard.write(")
# ):
# keyboard.controller.type(content)
# self._in_keyboard_write_block = True
# if "end" in chunk and self._in_keyboard_write_block:
# self._in_keyboard_write_block = False
# # (This will make it so it doesn't type twice when the block executes)
# if self.interpreter.messages[-1]["content"].startswith(
# "computer.keyboard.write("
# ):
# self.interpreter.messages[-1]["content"] = (
# "dummy_variable = ("
# + self.interpreter.messages[-1]["content"][
# len("computer.keyboard.write(") :
# ]
# )
# Send a completion signal
self.add_to_output_queue_sync(
{"role": "server", "type": "completion", "content": "DONE"}
)
# Feed generate to RealtimeTTS
# self.tts.feed(generate(message))
for _ in generate(message):
pass
# self.tts.play_async(on_audio_chunk=self.on_tts_chunk, muted=True)
async def output(self):
return await self._output_queue.get()
def server(interpreter, port=8000): # Default port is 8000 if not specified
async_interpreter = AsyncInterpreter(interpreter)
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"], # Allow all methods (GET, POST, etc.)
allow_headers=["*"], # Allow all headers
)
@app.post("/settings")
async def settings(payload: Dict[str, Any]):
for key, value in payload.items():
print("Updating interpreter settings with the following:")
print(key, value)
if key == "llm" and isinstance(value, dict):
for sub_key, sub_value in value.items():
setattr(async_interpreter.interpreter, sub_key, sub_value)
else:
setattr(async_interpreter.interpreter, key, value)
return {"status": "success"}
@app.websocket("/")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
async def receive_input():
while True:
data = await websocket.receive()
print(data)
if isinstance(data, bytes):
await async_interpreter.input(data)
elif "text" in data:
await async_interpreter.input(data["text"])
elif data == {"type": "websocket.disconnect", "code": 1000}:
print("Websocket disconnected with code 1000.")
break
async def send_output():
while True:
output = await async_interpreter.output()
if isinstance(output, bytes):
# await websocket.send_bytes(output)
# we don't send out bytes rn, no TTS
pass
elif isinstance(output, dict):
await websocket.send_text(json.dumps(output))
await asyncio.gather(receive_input(), send_output())
except Exception as e:
print(f"WebSocket connection closed with exception: {e}")
traceback.print_exc()
finally:
await websocket.close()
config = Config(app, host="0.0.0.0", port=port)
interpreter.uvicorn_server = Server(config)
interpreter.uvicorn_server.run()
| 262477.py | [
"CWE-942: Permissive Cross-domain Policy with Untrusted Domains"
] |
#!/usr/bin python3
""" Handles command line calls to git """
import logging
import os
import sys
from subprocess import PIPE, Popen
logger = logging.getLogger(__name__)
class Git():
""" Handles calls to github """
def __init__(self) -> None:
logger.debug("Initializing: %s", self.__class__.__name__)
self._working_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
self._available = self._check_available()
logger.debug("Initialized: %s", self.__class__.__name__)
def _from_git(self, command: str) -> tuple[bool, list[str]]:
""" Execute a git command
Parameters
----------
command : str
The command to send to git
Returns
-------
success: bool
``True`` if the command succesfully executed otherwise ``False``
list[str]
The output lines from stdout if there was no error, otherwise from stderr
"""
logger.debug("command: '%s'", command)
cmd = f"git {command}"
with Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self._working_dir) as proc:
stdout, stderr = proc.communicate()
retcode = proc.returncode
success = retcode == 0
lines = stdout.decode("utf-8", errors="replace").splitlines()
if not lines:
lines = stderr.decode("utf-8", errors="replace").splitlines()
logger.debug("command: '%s', returncode: %s, success: %s, lines: %s",
cmd, retcode, success, lines)
return success, lines
def _check_available(self) -> bool:
""" Check if git is available. Does a call to git status. If the process errors due to
folder ownership, attempts to add the folder to github safe folders list and tries
again
Returns
-------
bool
``True`` if git is available otherwise ``False``
"""
success, msg = self._from_git("status")
if success:
return True
config = next((line.strip() for line in msg if "add safe.directory" in line), None)
if not config:
return False
success, _ = self._from_git(config.split("git ", 1)[-1])
return True
@property
def status(self) -> list[str]:
""" Obtain the output of git status for tracked files only """
if not self._available:
return []
success, status = self._from_git("status -uno")
if not success or not status:
return []
return status
@property
def branch(self) -> str:
""" str: The git branch that is currently being used to execute Faceswap. """
status = next((line.strip() for line in self.status if "On branch" in line), "Not Found")
return status.replace("On branch ", "")
@property
def branches(self) -> list[str]:
""" list[str]: List of all available branches. """
if not self._available:
return []
success, branches = self._from_git("branch -a")
if not success or not branches:
return []
return branches
def update_remote(self) -> bool:
""" Update all branches to track remote
Returns
-------
bool
``True`` if update was succesful otherwise ``False``
"""
if not self._available:
return False
return self._from_git("remote update")[0]
def pull(self) -> bool:
""" Pull the current branch
Returns
-------
bool
``True`` if pull is successful otherwise ``False``
"""
if not self._available:
return False
return self._from_git("pull")[0]
def checkout(self, branch: str) -> bool:
""" Checkout the requested branch
Parameters
----------
branch : str
The branch to checkout
Returns
-------
bool
``True`` if the branch was succesfully checkout out otherwise ``False``
"""
if not self._available:
return False
return self._from_git(f"checkout {branch}")[0]
def get_commits(self, count: int) -> list[str]:
""" Obtain the last commits to the repo
Parameters
----------
count : int
The last number of commits to obtain
Returns
-------
list[str]
list of commits, or empty list if none found
"""
if not self._available:
return []
success, commits = self._from_git(f"log --pretty=oneline --abbrev-commit -n {count}")
if not success or not commits:
return []
return commits
git = Git()
""" :class:`Git`: Handles calls to github """
| 513433.py | [
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
] |
#!/usr/bin/env python3
""" Plugin loader for Faceswap extract, training and convert tasks """
from __future__ import annotations
import logging
import os
import typing as T
from importlib import import_module
if T.TYPE_CHECKING:
from collections.abc import Callable
from plugins.extract.detect._base import Detector
from plugins.extract.align._base import Aligner
from plugins.extract.mask._base import Masker
from plugins.extract.recognition._base import Identity
from plugins.train.model._base import ModelBase
from plugins.train.trainer._base import TrainerBase
logger = logging.getLogger(__name__)
class PluginLoader():
""" Retrieve, or get information on, Faceswap plugins
Return a specific plugin, list available plugins, or get the default plugin for a
task.
Example
-------
>>> from plugins.plugin_loader import PluginLoader
>>> align_plugins = PluginLoader.get_available_extractors('align')
>>> aligner = PluginLoader.get_aligner('cv2-dnn')
"""
@staticmethod
def get_detector(name: str, disable_logging: bool = False) -> type[Detector]:
""" Return requested detector plugin
Parameters
----------
name: str
The name of the requested detector plugin
disable_logging: bool, optional
Whether to disable the INFO log message that the plugin is being imported.
Default: `False`
Returns
-------
:class:`plugins.extract.detect` object:
An extraction detector plugin
"""
return PluginLoader._import("extract.detect", name, disable_logging)
@staticmethod
def get_aligner(name: str, disable_logging: bool = False) -> type[Aligner]:
""" Return requested aligner plugin
Parameters
----------
name: str
The name of the requested aligner plugin
disable_logging: bool, optional
Whether to disable the INFO log message that the plugin is being imported.
Default: `False`
Returns
-------
:class:`plugins.extract.align` object:
An extraction aligner plugin
"""
return PluginLoader._import("extract.align", name, disable_logging)
@staticmethod
def get_masker(name: str, disable_logging: bool = False) -> type[Masker]:
""" Return requested masker plugin
Parameters
----------
name: str
The name of the requested masker plugin
disable_logging: bool, optional
Whether to disable the INFO log message that the plugin is being imported.
Default: `False`
Returns
-------
:class:`plugins.extract.mask` object:
An extraction masker plugin
"""
return PluginLoader._import("extract.mask", name, disable_logging)
@staticmethod
def get_recognition(name: str, disable_logging: bool = False) -> type[Identity]:
""" Return requested recognition plugin
Parameters
----------
name: str
The name of the requested reccognition plugin
disable_logging: bool, optional
Whether to disable the INFO log message that the plugin is being imported.
Default: `False`
Returns
-------
:class:`plugins.extract.recognition` object:
An extraction recognition plugin
"""
return PluginLoader._import("extract.recognition", name, disable_logging)
@staticmethod
def get_model(name: str, disable_logging: bool = False) -> type[ModelBase]:
""" Return requested training model plugin
Parameters
----------
name: str
The name of the requested training model plugin
disable_logging: bool, optional
Whether to disable the INFO log message that the plugin is being imported.
Default: `False`
Returns
-------
:class:`plugins.train.model` object:
A training model plugin
"""
return PluginLoader._import("train.model", name, disable_logging)
@staticmethod
def get_trainer(name: str, disable_logging: bool = False) -> type[TrainerBase]:
""" Return requested training trainer plugin
Parameters
----------
name: str
The name of the requested training trainer plugin
disable_logging: bool, optional
Whether to disable the INFO log message that the plugin is being imported.
Default: `False`
Returns
-------
:class:`plugins.train.trainer` object:
A training trainer plugin
"""
return PluginLoader._import("train.trainer", name, disable_logging)
@staticmethod
def get_converter(category: str, name: str, disable_logging: bool = False) -> Callable:
""" Return requested converter plugin
Converters work slightly differently to other faceswap plugins. They are created to do a
specific task (e.g. color adjustment, mask blending etc.), so multiple plugins will be
loaded in the convert phase, rather than just one plugin for the other phases.
Parameters
----------
name: str
The name of the requested converter plugin
disable_logging: bool, optional
Whether to disable the INFO log message that the plugin is being imported.
Default: `False`
Returns
-------
:class:`plugins.convert` object:
A converter sub plugin
"""
return PluginLoader._import(f"convert.{category}", name, disable_logging)
@staticmethod
def _import(attr: str, name: str, disable_logging: bool):
""" Import the plugin's module
Parameters
----------
name: str
The name of the requested converter plugin
disable_logging: bool
Whether to disable the INFO log message that the plugin is being imported.
Returns
-------
:class:`plugin` object:
A plugin
"""
name = name.replace("-", "_")
ttl = attr.split(".")[-1].title()
if not disable_logging:
logger.info("Loading %s from %s plugin...", ttl, name.title())
attr = "model" if attr == "Trainer" else attr.lower()
mod = ".".join(("plugins", attr, name))
module = import_module(mod)
return getattr(module, ttl)
@staticmethod
def get_available_extractors(extractor_type: T.Literal["align", "detect", "mask"],
add_none: bool = False,
extend_plugin: bool = False) -> list[str]:
""" Return a list of available extractors of the given type
Parameters
----------
extractor_type: {'align', 'detect', 'mask'}
The type of extractor to return the plugins for
add_none: bool, optional
Append "none" to the list of returned plugins. Default: False
extend_plugin: bool, optional
Some plugins have configuration options that mean that multiple 'pseudo-plugins'
can be generated based on their settings. An example of this is the bisenet-fp mask
which, whilst selected as 'bisenet-fp' can be stored as 'bisenet-fp-face' and
'bisenet-fp-head' depending on whether hair has been included in the mask or not.
``True`` will generate each pseudo-plugin, ``False`` will generate the original
plugin name. Default: ``False``
Returns
-------
list:
A list of the available extractor plugin names for the given type
"""
extractpath = os.path.join(os.path.dirname(__file__),
"extract",
extractor_type)
extractors = [item.name.replace(".py", "").replace("_", "-")
for item in os.scandir(extractpath)
if not item.name.startswith("_")
and not item.name.endswith("defaults.py")
and item.name.endswith(".py")]
extendable = ["bisenet-fp", "custom"]
if extend_plugin and extractor_type == "mask" and any(ext in extendable
for ext in extractors):
for msk in extendable:
extractors.remove(msk)
extractors.extend([f"{msk}_face", f"{msk}_head"])
extractors = sorted(extractors)
if add_none:
extractors.insert(0, "none")
return extractors
@staticmethod
def get_available_models() -> list[str]:
""" Return a list of available training models
Returns
-------
list:
A list of the available training model plugin names
"""
modelpath = os.path.join(os.path.dirname(__file__), "train", "model")
models = sorted(item.name.replace(".py", "").replace("_", "-")
for item in os.scandir(modelpath)
if not item.name.startswith("_")
and not item.name.endswith("defaults.py")
and item.name.endswith(".py"))
return models
@staticmethod
def get_default_model() -> str:
""" Return the default training model plugin name
Returns
-------
str:
The default faceswap training model
"""
models = PluginLoader.get_available_models()
return 'original' if 'original' in models else models[0]
@staticmethod
def get_available_convert_plugins(convert_category: str, add_none: bool = True) -> list[str]:
""" Return a list of available converter plugins in the given category
Parameters
----------
convert_category: {'color', 'mask', 'scaling', 'writer'}
The category of converter plugin to return the plugins for
add_none: bool, optional
Append "none" to the list of returned plugins. Default: True
Returns
-------
list
A list of the available converter plugin names in the given category
"""
convertpath = os.path.join(os.path.dirname(__file__),
"convert",
convert_category)
converters = sorted(item.name.replace(".py", "").replace("_", "-")
for item in os.scandir(convertpath)
if not item.name.startswith("_")
and not item.name.endswith("defaults.py")
and item.name.endswith(".py"))
if add_none:
converters.insert(0, "none")
return converters
| 335203.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
"""
Contains some simple tests.
The purpose of this tests is to detect crashes and hangs
but NOT to guarantee the corectness of the operations.
For this we want another set of testcases using pytest.
Due to my lazy coding, DON'T USE PATHES WITH BLANKS !
"""
import sys
from subprocess import check_call, CalledProcessError
import urllib
from urllib.request import urlretrieve
import os
from os.path import join as pathjoin, expanduser
FAIL_COUNT = 0
TEST_COUNT = 0
_COLORS = {
"FAIL": "\033[1;31m",
"OK": "\033[1;32m",
"STATUS": "\033[1;37m",
"BOLD": "\033[1m",
"ENDC": "\033[0m"
}
def print_colored(text, color="OK", bold=False):
""" Print colored text
This might not work on windows,
although travis runs windows stuff in git bash, so it might ?
"""
color = _COLORS.get(color, color)
fmt = '' if not bold else _COLORS['BOLD']
print(f"{color}{fmt}{text}{_COLORS['ENDC']}")
def print_ok(text):
""" Print ok in colored text """
print_colored(text, "OK", True)
def print_fail(text):
""" Print fail in colored text """
print_colored(text, "FAIL", True)
def print_status(text):
""" Print status in colored text """
print_colored(text, "STATUS", True)
def run_test(name, cmd):
""" run a test """
global FAIL_COUNT, TEST_COUNT # pylint:disable=global-statement
print_status(f"[?] running {name}")
print(f"Cmd: {' '.join(cmd)}")
TEST_COUNT += 1
try:
check_call(cmd)
print_ok("[+] Test success")
return True
except CalledProcessError as err:
print_fail(f"[-] Test failed with {err}")
FAIL_COUNT += 1
return False
def download_file(url, filename): # TODO: retry
""" Download a file from given url """
if os.path.isfile(filename):
print_status(f"[?] '{url}' already cached as '{filename}'")
return filename
try:
print_status(f"[?] Downloading '{url}' to '{filename}'")
video, _ = urlretrieve(url, filename)
return video
except urllib.error.URLError as err:
print_fail(f"[-] Failed downloading: {err}")
return None
def extract_args(detector, aligner, in_path, out_path, args=None):
""" Extraction command """
py_exe = sys.executable
_extract_args = (f"{py_exe} faceswap.py extract -i {in_path} -o {out_path} -D {detector} "
f"-A {aligner}")
if args:
_extract_args += f" {args}"
return _extract_args.split()
def train_args(model, model_path, faces, iterations=1, batchsize=2, extra_args=""):
""" Train command """
py_exe = sys.executable
args = (f"{py_exe} faceswap.py train -A {faces} -B {faces} -m {model_path} -t {model} "
f"-b {batchsize} -i {iterations} {extra_args}")
return args.split()
def convert_args(in_path, out_path, model_path, writer, args=None):
""" Convert command """
py_exe = sys.executable
conv_args = (f"{py_exe} faceswap.py convert -i {in_path} -o {out_path} -m {model_path} "
f"-w {writer}")
if args:
conv_args += f" {args}"
return conv_args.split() # Don't use pathes with spaces ;)
def sort_args(in_path, out_path, sortby="face", groupby="hist"):
""" Sort command """
py_exe = sys.executable
_sort_args = (f"{py_exe} tools.py sort -i {in_path} -o {out_path} -s {sortby} -g {groupby} -k")
return _sort_args.split()
def set_train_config(value):
""" Update the mixed_precision and autoclip values to given value
Parameters
----------
value: bool
The value to set the config parameters to.
"""
old_val, new_val = ("False", "True") if value else ("True", "False")
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
train_ini = os.path.join(base_path, "config", "train.ini")
try:
cmd = ["sed", "-i", f"s/autoclip = {old_val}/autoclip = {new_val}/", train_ini]
check_call(cmd)
cmd = ["sed",
"-i",
f"s/mixed_precision = {old_val}/mixed_precision = {new_val}/",
train_ini]
check_call(cmd)
print_ok(f"Set autoclip and mixed_precision to `{new_val}`")
except CalledProcessError as err:
print_fail(f"[-] Test failed with {err}")
return False
def main():
""" Main testing script """
vid_src = "https://faceswap.dev/data/test.mp4"
img_src = "https://archive.org/download/GPN-2003-00070/GPN-2003-00070.jpg"
base_dir = pathjoin(expanduser("~"), "cache", "tests")
vid_base = pathjoin(base_dir, "vid")
img_base = pathjoin(base_dir, "imgs")
os.makedirs(vid_base, exist_ok=True)
os.makedirs(img_base, exist_ok=True)
py_exe = sys.executable
was_trained = False
vid_path = download_file(vid_src, pathjoin(vid_base, "test.mp4"))
if not vid_path:
print_fail("[-] Aborting")
sys.exit(1)
vid_extract = run_test(
"Extraction video with cv2-dnn detector and cv2-dnn aligner.",
extract_args("Cv2-Dnn", "Cv2-Dnn", vid_path, pathjoin(vid_base, "faces"))
)
img_path = download_file(img_src, pathjoin(img_base, "test_img.jpg"))
if not img_path:
print_fail("[-] Aborting")
sys.exit(1)
run_test(
"Extraction images with cv2-dnn detector and cv2-dnn aligner.",
extract_args("Cv2-Dnn", "Cv2-Dnn", img_base, pathjoin(img_base, "faces"))
)
if vid_extract:
run_test(
"Generate configs and test help output",
(
py_exe, "faceswap.py", "-h"
)
)
run_test(
"Sort faces.",
sort_args(
pathjoin(vid_base, "faces"), pathjoin(vid_base, "faces_sorted"),
sortby="face"
)
)
run_test(
"Rename sorted faces.",
(
py_exe, "tools.py", "alignments", "-j", "rename",
"-a", pathjoin(vid_base, "test_alignments.fsa"),
"-c", pathjoin(vid_base, "faces_sorted"),
)
)
set_train_config(True)
run_test(
"Train lightweight model for 1 iteration with WTL, AutoClip, MixedPrecion",
train_args("lightweight",
pathjoin(vid_base, "model"),
pathjoin(vid_base, "faces"),
iterations=1,
batchsize=1,
extra_args="-M"))
set_train_config(False)
was_trained = run_test(
"Train lightweight model for 1 iterations WITHOUT WTL, AutoClip, MixedPrecion",
train_args("lightweight",
pathjoin(vid_base, "model"),
pathjoin(vid_base, "faces"),
iterations=1,
batchsize=1))
if was_trained:
run_test(
"Convert video.",
convert_args(
vid_path, pathjoin(vid_base, "conv"),
pathjoin(vid_base, "model"), "ffmpeg"
)
)
run_test(
"Convert images.",
convert_args(
img_base, pathjoin(img_base, "conv"),
pathjoin(vid_base, "model"), "opencv"
)
)
if FAIL_COUNT == 0:
print_ok(f"[+] Failed {FAIL_COUNT}/{TEST_COUNT} tests.")
sys.exit(0)
else:
print_fail(f"[-] Failed {FAIL_COUNT}/{TEST_COUNT} tests.")
sys.exit(1)
if __name__ == '__main__':
main()
| 006394.py | [
"CWE-939: Improper Authorization in Handler for Custom URL Scheme"
] |
#!/usr/bin/env python
#__all__ = ['pptv_download', 'pptv_download_by_id']
from ..common import *
from ..extractor import VideoExtractor
import re
import time
import urllib
import random
import binascii
from xml.dom.minidom import parseString
def lshift(a, b):
return (a << b) & 0xffffffff
def rshift(a, b):
if a >= 0:
return a >> b
return (0x100000000 + a) >> b
def le32_pack(b_str):
result = 0
result |= b_str[0]
result |= (b_str[1] << 8)
result |= (b_str[2] << 16)
result |= (b_str[3] << 24)
return result
def tea_core(data, key_seg):
delta = 2654435769
d0 = le32_pack(data[:4])
d1 = le32_pack(data[4:8])
sum_ = 0
for rnd in range(32):
sum_ = (sum_ + delta) & 0xffffffff
p1 = (lshift(d1, 4) + key_seg[0]) & 0xffffffff
p2 = (d1 + sum_) & 0xffffffff
p3 = (rshift(d1, 5) + key_seg[1]) & 0xffffffff
mid_p = p1 ^ p2 ^ p3
d0 = (d0 + mid_p) & 0xffffffff
p4 = (lshift(d0, 4) + key_seg[2]) & 0xffffffff
p5 = (d0 + sum_) & 0xffffffff
p6 = (rshift(d0, 5) + key_seg[3]) & 0xffffffff
mid_p = p4 ^ p5 ^ p6
d1 = (d1 + mid_p) & 0xffffffff
return bytes(unpack_le32(d0) + unpack_le32(d1))
def ran_hex(size):
result = []
for i in range(size):
result.append(hex(int(15 * random.random()))[2:])
return ''.join(result)
def zpad(b_str, size):
size_diff = size - len(b_str)
return b_str + bytes(size_diff)
def gen_key(t):
key_seg = [1896220160,101056625, 100692230, 7407110]
t_s = hex(int(t))[2:].encode('utf8')
input_data = zpad(t_s, 16)
out = tea_core(input_data, key_seg)
return binascii.hexlify(out[:8]).decode('utf8') + ran_hex(16)
def unpack_le32(i32):
result = []
result.append(i32 & 0xff)
i32 = rshift(i32, 8)
result.append(i32 & 0xff)
i32 = rshift(i32, 8)
result.append(i32 & 0xff)
i32 = rshift(i32, 8)
result.append(i32 & 0xff)
return result
def get_elem(elem, tag):
return elem.getElementsByTagName(tag)
def get_attr(elem, attr):
return elem.getAttribute(attr)
def get_text(elem):
return elem.firstChild.nodeValue
def shift_time(time_str):
ts = time_str[:-4]
return time.mktime(time.strptime(ts)) - 60
def parse_pptv_xml(dom):
channel = get_elem(dom, 'channel')[0]
title = get_attr(channel, 'nm')
file_list = get_elem(channel, 'file')[0]
item_list = get_elem(file_list, 'item')
streams_cnt = len(item_list)
item_mlist = []
for item in item_list:
rid = get_attr(item, 'rid')
file_type = get_attr(item, 'ft')
size = get_attr(item, 'filesize')
width = get_attr(item, 'width')
height = get_attr(item, 'height')
bitrate = get_attr(item, 'bitrate')
res = '{}x{}@{}kbps'.format(width, height, bitrate)
item_meta = (file_type, rid, size, res)
item_mlist.append(item_meta)
dt_list = get_elem(dom, 'dt')
dragdata_list = get_elem(dom, 'dragdata')
stream_mlist = []
for dt in dt_list:
file_type = get_attr(dt, 'ft')
serv_time = get_text(get_elem(dt, 'st')[0])
expr_time = get_text(get_elem(dt, 'key')[0])
serv_addr = get_text(get_elem(dt, 'sh')[0])
stream_meta = (file_type, serv_addr, expr_time, serv_time)
stream_mlist.append(stream_meta)
segs_mlist = []
for dd in dragdata_list:
file_type = get_attr(dd, 'ft')
seg_list = get_elem(dd, 'sgm')
segs = []
segs_size = []
for seg in seg_list:
rid = get_attr(seg, 'rid')
size = get_attr(seg, 'fs')
segs.append(rid)
segs_size.append(size)
segs_meta = (file_type, segs, segs_size)
segs_mlist.append(segs_meta)
return title, item_mlist, stream_mlist, segs_mlist
#mergs 3 meta_data
def merge_meta(item_mlist, stream_mlist, segs_mlist):
streams = {}
for i in range(len(segs_mlist)):
streams[str(i)] = {}
for item in item_mlist:
stream = streams[item[0]]
stream['rid'] = item[1]
stream['size'] = item[2]
stream['res'] = item[3]
for s in stream_mlist:
stream = streams[s[0]]
stream['serv_addr'] = s[1]
stream['expr_time'] = s[2]
stream['serv_time'] = s[3]
for seg in segs_mlist:
stream = streams[seg[0]]
stream['segs'] = seg[1]
stream['segs_size'] = seg[2]
return streams
def make_url(stream):
host = stream['serv_addr']
rid = stream['rid']
key = gen_key(shift_time(stream['serv_time']))
key_expr = stream['expr_time']
src = []
for i, seg in enumerate(stream['segs']):
url = 'http://{}/{}/{}?key={}&k={}'.format(host, i, rid, key, key_expr)
url += '&type=web.fpp'
src.append(url)
return src
class PPTV(VideoExtractor):
name = 'PPTV'
stream_types = [
{'itag': '4'},
{'itag': '3'},
{'itag': '2'},
{'itag': '1'},
{'itag': '0'},
]
def prepare(self, **kwargs):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/69.0.3497.100 Safari/537.36"
}
self.vid = match1(self.url, r'https?://sports.pptv.com/vod/(\d+)/*')
if self.url and not self.vid:
if not re.match(r'https?://v.pptv.com/show/(\w+)\.html', self.url):
raise('Unknown url pattern')
page_content = get_content(self.url, headers)
self.vid = match1(page_content, r'webcfg\s*=\s*{"id":\s*(\d+)')
if not self.vid:
request = urllib.request.Request(self.url, headers=headers)
response = urllib.request.urlopen(request)
self.vid = match1(response.url, r'https?://sports.pptv.com/vod/(\d+)/*')
if not self.vid:
raise('Cannot find id')
api_url = 'http://web-play.pptv.com/webplay3-0-{}.xml'.format(self.vid)
api_url += '?type=web.fpp¶m=type=web.fpp&version=4'
dom = parseString(get_content(api_url, headers))
self.title, m_items, m_streams, m_segs = parse_pptv_xml(dom)
xml_streams = merge_meta(m_items, m_streams, m_segs)
for stream_id in xml_streams:
stream_data = xml_streams[stream_id]
src = make_url(stream_data)
self.streams[stream_id] = {
'container': 'mp4',
'video_profile': stream_data['res'],
'size': int(stream_data['size']),
'src': src
}
site = PPTV()
#site_info = "PPTV.com"
#download = pptv_download
download = site.download_by_url
download_playlist = playlist_not_supported('pptv')
| 454489.py | [
"CWE-939: Improper Authorization in Handler for Custom URL Scheme"
] |
import json
import shutil
import traceback
from pathlib import Path
import numpy as np
from core import pathex
from core.cv2ex import *
from core.interact import interact as io
from core.leras import nn
from DFLIMG import *
from facelib import XSegNet, LandmarksProcessor, FaceType
import pickle
def apply_xseg(input_path, model_path):
if not input_path.exists():
raise ValueError(f'{input_path} not found. Please ensure it exists.')
if not model_path.exists():
raise ValueError(f'{model_path} not found. Please ensure it exists.')
face_type = None
model_dat = model_path / 'XSeg_data.dat'
if model_dat.exists():
dat = pickle.loads( model_dat.read_bytes() )
dat_options = dat.get('options', None)
if dat_options is not None:
face_type = dat_options.get('face_type', None)
if face_type is None:
face_type = io.input_str ("XSeg model face type", 'same', ['h','mf','f','wf','head','same'], help_message="Specify face type of trained XSeg model. For example if XSeg model trained as WF, but faceset is HEAD, specify WF to apply xseg only on WF part of HEAD. Default is 'same'").lower()
if face_type == 'same':
face_type = None
if face_type is not None:
face_type = {'h' : FaceType.HALF,
'mf' : FaceType.MID_FULL,
'f' : FaceType.FULL,
'wf' : FaceType.WHOLE_FACE,
'head' : FaceType.HEAD}[face_type]
io.log_info(f'Applying trained XSeg model to {input_path.name}/ folder.')
device_config = nn.DeviceConfig.ask_choose_device(choose_only_one=True)
nn.initialize(device_config)
xseg = XSegNet(name='XSeg',
load_weights=True,
weights_file_root=model_path,
data_format=nn.data_format,
raise_on_no_model_files=True)
xseg_res = xseg.get_resolution()
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
for filepath in io.progress_bar_generator(images_paths, "Processing"):
dflimg = DFLIMG.load(filepath)
if dflimg is None or not dflimg.has_data():
io.log_info(f'{filepath} is not a DFLIMG')
continue
img = cv2_imread(filepath).astype(np.float32) / 255.0
h,w,c = img.shape
img_face_type = FaceType.fromString( dflimg.get_face_type() )
if face_type is not None and img_face_type != face_type:
lmrks = dflimg.get_source_landmarks()
fmat = LandmarksProcessor.get_transform_mat(lmrks, w, face_type)
imat = LandmarksProcessor.get_transform_mat(lmrks, w, img_face_type)
g_p = LandmarksProcessor.transform_points (np.float32([(0,0),(w,0),(0,w) ]), fmat, True)
g_p2 = LandmarksProcessor.transform_points (g_p, imat)
mat = cv2.getAffineTransform( g_p2, np.float32([(0,0),(w,0),(0,w) ]) )
img = cv2.warpAffine(img, mat, (w, w), cv2.INTER_LANCZOS4)
img = cv2.resize(img, (xseg_res, xseg_res), interpolation=cv2.INTER_LANCZOS4)
else:
if w != xseg_res:
img = cv2.resize( img, (xseg_res,xseg_res), interpolation=cv2.INTER_LANCZOS4 )
if len(img.shape) == 2:
img = img[...,None]
mask = xseg.extract(img)
if face_type is not None and img_face_type != face_type:
mask = cv2.resize(mask, (w, w), interpolation=cv2.INTER_LANCZOS4)
mask = cv2.warpAffine( mask, mat, (w,w), np.zeros( (h,w,c), dtype=np.float), cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4)
mask = cv2.resize(mask, (xseg_res, xseg_res), interpolation=cv2.INTER_LANCZOS4)
mask[mask < 0.5]=0
mask[mask >= 0.5]=1
dflimg.set_xseg_mask(mask)
dflimg.save()
def fetch_xseg(input_path):
if not input_path.exists():
raise ValueError(f'{input_path} not found. Please ensure it exists.')
output_path = input_path.parent / (input_path.name + '_xseg')
output_path.mkdir(exist_ok=True, parents=True)
io.log_info(f'Copying faces containing XSeg polygons to {output_path.name}/ folder.')
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
files_copied = []
for filepath in io.progress_bar_generator(images_paths, "Processing"):
dflimg = DFLIMG.load(filepath)
if dflimg is None or not dflimg.has_data():
io.log_info(f'{filepath} is not a DFLIMG')
continue
ie_polys = dflimg.get_seg_ie_polys()
if ie_polys.has_polys():
files_copied.append(filepath)
shutil.copy ( str(filepath), str(output_path / filepath.name) )
io.log_info(f'Files copied: {len(files_copied)}')
is_delete = io.input_bool (f"\r\nDelete original files?", True)
if is_delete:
for filepath in files_copied:
Path(filepath).unlink()
def remove_xseg(input_path):
if not input_path.exists():
raise ValueError(f'{input_path} not found. Please ensure it exists.')
io.log_info(f'Processing folder {input_path}')
io.log_info('!!! WARNING : APPLIED XSEG MASKS WILL BE REMOVED FROM THE FRAMES !!!')
io.log_info('!!! WARNING : APPLIED XSEG MASKS WILL BE REMOVED FROM THE FRAMES !!!')
io.log_info('!!! WARNING : APPLIED XSEG MASKS WILL BE REMOVED FROM THE FRAMES !!!')
io.input_str('Press enter to continue.')
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
files_processed = 0
for filepath in io.progress_bar_generator(images_paths, "Processing"):
dflimg = DFLIMG.load(filepath)
if dflimg is None or not dflimg.has_data():
io.log_info(f'{filepath} is not a DFLIMG')
continue
if dflimg.has_xseg_mask():
dflimg.set_xseg_mask(None)
dflimg.save()
files_processed += 1
io.log_info(f'Files processed: {files_processed}')
def remove_xseg_labels(input_path):
if not input_path.exists():
raise ValueError(f'{input_path} not found. Please ensure it exists.')
io.log_info(f'Processing folder {input_path}')
io.log_info('!!! WARNING : LABELED XSEG POLYGONS WILL BE REMOVED FROM THE FRAMES !!!')
io.log_info('!!! WARNING : LABELED XSEG POLYGONS WILL BE REMOVED FROM THE FRAMES !!!')
io.log_info('!!! WARNING : LABELED XSEG POLYGONS WILL BE REMOVED FROM THE FRAMES !!!')
io.input_str('Press enter to continue.')
images_paths = pathex.get_image_paths(input_path, return_Path_class=True)
files_processed = 0
for filepath in io.progress_bar_generator(images_paths, "Processing"):
dflimg = DFLIMG.load(filepath)
if dflimg is None or not dflimg.has_data():
io.log_info(f'{filepath} is not a DFLIMG')
continue
if dflimg.has_seg_ie_polys():
dflimg.set_seg_ie_polys(None)
dflimg.save()
files_processed += 1
io.log_info(f'Files processed: {files_processed}') | 494107.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/11 17:46
@Author : alexanderwu
@File : run_code.py
@Modified By: mashenquan, 2023/11/27.
1. Mark the location of Console logs in the PROMPT_TEMPLATE with markdown code-block formatting to enhance
the understanding for the LLM.
2. Fix bug: Add the "install dependency" operation.
3. Encapsulate the input of RunCode into RunCodeContext and encapsulate the output of RunCode into
RunCodeResult to standardize and unify parameter passing between WriteCode, RunCode, and DebugError.
4. According to section 2.2.3.5.7 of RFC 135, change the method of transferring file content
(code files, unit test files, log files) from using the message to using the file name.
5. Merged the `Config` class of send18:dev branch to take over the set/get operations of the Environment
class.
"""
import subprocess
from pathlib import Path
from typing import Tuple
from pydantic import Field
from metagpt.actions.action import Action
from metagpt.logs import logger
from metagpt.schema import RunCodeContext, RunCodeResult
from metagpt.utils.exceptions import handle_exception
PROMPT_TEMPLATE = """
Role: You are a senior development and qa engineer, your role is summarize the code running result.
If the running result does not include an error, you should explicitly approve the result.
On the other hand, if the running result indicates some error, you should point out which part, the development code or the test code, produces the error,
and give specific instructions on fixing the errors. Here is the code info:
{context}
Now you should begin your analysis
---
## instruction:
Please summarize the cause of the errors and give correction instruction
## File To Rewrite:
Determine the ONE file to rewrite in order to fix the error, for example, xyz.py, or test_xyz.py
## Status:
Determine if all of the code works fine, if so write PASS, else FAIL,
WRITE ONLY ONE WORD, PASS OR FAIL, IN THIS SECTION
## Send To:
Please write NoOne if there are no errors, Engineer if the errors are due to problematic development codes, else QaEngineer,
WRITE ONLY ONE WORD, NoOne OR Engineer OR QaEngineer, IN THIS SECTION.
---
You should fill in necessary instruction, status, send to, and finally return all content between the --- segment line.
"""
TEMPLATE_CONTEXT = """
## Development Code File Name
{code_file_name}
## Development Code
```python
{code}
```
## Test File Name
{test_file_name}
## Test Code
```python
{test_code}
```
## Running Command
{command}
## Running Output
standard output:
```text
{outs}
```
standard errors:
```text
{errs}
```
"""
class RunCode(Action):
name: str = "RunCode"
i_context: RunCodeContext = Field(default_factory=RunCodeContext)
@classmethod
async def run_text(cls, code) -> Tuple[str, str]:
try:
# We will document_store the result in this dictionary
namespace = {}
exec(code, namespace)
except Exception as e:
return "", str(e)
return namespace.get("result", ""), ""
async def run_script(self, working_directory, additional_python_paths=[], command=[]) -> Tuple[str, str]:
working_directory = str(working_directory)
additional_python_paths = [str(path) for path in additional_python_paths]
# Copy the current environment variables
env = self.context.new_environ()
# Modify the PYTHONPATH environment variable
additional_python_paths = [working_directory] + additional_python_paths
additional_python_paths = ":".join(additional_python_paths)
env["PYTHONPATH"] = additional_python_paths + ":" + env.get("PYTHONPATH", "")
RunCode._install_dependencies(working_directory=working_directory, env=env)
# Start the subprocess
process = subprocess.Popen(
command, cwd=working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
logger.info(" ".join(command))
try:
# Wait for the process to complete, with a timeout
stdout, stderr = process.communicate(timeout=10)
except subprocess.TimeoutExpired:
logger.info("The command did not complete within the given timeout.")
process.kill() # Kill the process if it times out
stdout, stderr = process.communicate()
return stdout.decode("utf-8"), stderr.decode("utf-8")
async def run(self, *args, **kwargs) -> RunCodeResult:
logger.info(f"Running {' '.join(self.i_context.command)}")
if self.i_context.mode == "script":
outs, errs = await self.run_script(
command=self.i_context.command,
working_directory=self.i_context.working_directory,
additional_python_paths=self.i_context.additional_python_paths,
)
elif self.i_context.mode == "text":
outs, errs = await self.run_text(code=self.i_context.code)
logger.info(f"{outs=}")
logger.info(f"{errs=}")
context = TEMPLATE_CONTEXT.format(
code=self.i_context.code,
code_file_name=self.i_context.code_filename,
test_code=self.i_context.test_code,
test_file_name=self.i_context.test_filename,
command=" ".join(self.i_context.command),
outs=outs[:500], # outs might be long but they are not important, truncate them to avoid token overflow
errs=errs[:10000], # truncate errors to avoid token overflow
)
prompt = PROMPT_TEMPLATE.format(context=context)
rsp = await self._aask(prompt)
return RunCodeResult(summary=rsp, stdout=outs, stderr=errs)
@staticmethod
@handle_exception(exception_type=subprocess.CalledProcessError)
def _install_via_subprocess(cmd, check, cwd, env):
return subprocess.run(cmd, check=check, cwd=cwd, env=env)
@staticmethod
def _install_requirements(working_directory, env):
file_path = Path(working_directory) / "requirements.txt"
if not file_path.exists():
return
if file_path.stat().st_size == 0:
return
install_command = ["python", "-m", "pip", "install", "-r", "requirements.txt"]
logger.info(" ".join(install_command))
RunCode._install_via_subprocess(install_command, check=True, cwd=working_directory, env=env)
@staticmethod
def _install_pytest(working_directory, env):
install_pytest_command = ["python", "-m", "pip", "install", "pytest"]
logger.info(" ".join(install_pytest_command))
RunCode._install_via_subprocess(install_pytest_command, check=True, cwd=working_directory, env=env)
@staticmethod
def _install_dependencies(working_directory, env):
RunCode._install_requirements(working_directory, env)
RunCode._install_pytest(working_directory, env)
| 173324.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
# -*- coding: utf-8 -*-
# @Date : 12/23/2023 4:51 PM
# @Author : stellahong ([email protected])
# @Desc :
from __future__ import annotations
import asyncio
from typing import Any, List, Optional
from pydantic import BaseModel, ConfigDict, Field
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.provider.base_llm import BaseLLM
from metagpt.strategy.base import ThoughtNode, ThoughtTree
from metagpt.strategy.tot_schema import MethodSelect, Strategy, ThoughtSolverConfig
from metagpt.utils.common import CodeParser
OUTPUT_FORMAT = """
Each output should be strictly a list of nodes, in json format, like this:
```json
[
{
"node_id": str = "unique identifier for a solution, can be an ordinal",
"node_state_instruction": "specified sample of solution",
},
...
]
```
"""
class ThoughtSolverBase(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
thought_tree: Optional[ThoughtTree] = Field(default=None)
llm: BaseLLM = Field(default_factory=LLM, exclude=True)
config: ThoughtSolverConfig = Field(default_factory=ThoughtSolverConfig)
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self.llm.use_system_prompt = False
async def solve(self, init_prompt):
"""
Solve method for subclasses to implement.
"""
raise NotImplementedError("Subclasses must implement the solve method")
async def generate_thoughts(self, current_state="", current_node=None) -> List[ThoughtNode]:
"""
Generate children thoughts based on the current state.
Args:
current_state (str): The current state for which thoughts are generated.
current_node (ThoughtNode): The current node in the thought tree.
Returns:
List[ThoughtNode]: List of nodes representing the generated thoughts.
"""
state_prompt = self.config.parser.propose(
current_state=current_state, **{"n_generate_sample": self.config.n_generate_sample}
)
rsp = await self.llm.aask(msg=state_prompt + "\n" + OUTPUT_FORMAT)
thoughts = CodeParser.parse_code(block="", text=rsp)
thoughts = eval(thoughts)
# fixme 避免不跟随,生成过多nodes
# valid_thoughts = [_node for idx, _node in enumerate(thoughts) if idx < self.n_generate_sample]
return self.thought_tree.update_node(thoughts, current_node=current_node)
async def evaluate_node(self, node, parent_value) -> None:
"""
Evaluate a node and update its status and value.
Args:
node (ThoughtNode): The node to be evaluated.
parent_value (float): The parent node's value.
Returns:
None
"""
eval_prompt = self.config.parser.value(input=node.name, **{"node_id": node.id})
evaluation = await self.llm.aask(msg=eval_prompt)
value = self.config.evaluator(evaluation, **{"node_id": node.id})
status = self.config.evaluator.status_verify(value)
node.update_valid_status(status=status)
# 累计分数
node.update_value(parent_value + value)
def select_nodes(self, thought_nodes: List[ThoughtNode]) -> List[ThoughtNode]:
"""
Select nodes based on the configured selection method.
Args:
thought_nodes (List[ThoughtNode]): List of nodes to be selected.
Returns:
List[ThoughtNode]: List of selected nodes.
"""
# nodes to be selected
nodes = []
if self.config.method_select == MethodSelect.SAMPLE:
raise NotImplementedError
elif self.config.method_select == MethodSelect.GREEDY:
nodes = sorted(thought_nodes, key=lambda x: x.value, reverse=True)[: self.config.n_select_sample]
for node in thought_nodes:
if node not in nodes:
node.parent = None # 从树中删除节点
return nodes
def update_solution(self):
"""
Select the result with the highest score.
Returns:
- List[ThoughtNode]: List of nodes representing the best solution.
- List[str]: List of node names forming the best solution path.
"""
best_node = max(self.thought_tree.all_nodes, key=lambda x: x.value, default=None)
best_solution_path = self.thought_tree.parse_node_path(best_node)
return [best_node], best_solution_path
class BFSSolver(ThoughtSolverBase):
async def solve(self, init_prompt=""):
"""
Solve the problem using Breadth-First Search (BFS) strategy.
Args:
init_prompt (str): The initial prompt for the solver.
Returns:
List[str]: The best solution path obtained through BFS.
"""
root = ThoughtNode(init_prompt)
self.thought_tree = ThoughtTree(root)
current_nodes = [root]
for step in range(self.config.max_steps):
solutions = await self._bfs_build(current_nodes)
selected_nodes = self.select_nodes(solutions)
current_nodes = selected_nodes
self.thought_tree.show()
best_solution, best_solution_path = self.update_solution()
logger.info(f"best solution is: {best_solution_path}")
return best_solution_path
async def _bfs_build(self, current_nodes):
"""
Build the thought tree using Breadth-First Search (BFS) strategy.
Args:
current_nodes (List[ThoughtNode]): Current nodes to expand.
Returns:
List[ThoughtNode]: The solutions obtained after expanding the current nodes.
"""
tasks = []
for node in current_nodes:
current_state = self.config.parser(node.name)
current_value = node.value
tasks.append(self.generate_and_evaluate_nodes(current_state, current_value, node))
thought_nodes_list = await asyncio.gather(*tasks)
solutions = [child_node for thought_nodes in thought_nodes_list for child_node in thought_nodes]
return solutions
async def generate_and_evaluate_nodes(self, current_state, current_value, node):
thought_nodes = await self.generate_thoughts(current_state, current_node=node)
await asyncio.gather(
*(self.evaluate_node(child_node, parent_value=current_value) for child_node in thought_nodes)
)
return thought_nodes
class DFSSolver(ThoughtSolverBase):
async def _dfs(self, root_node):
"""
Perform Depth-First Search (DFS) on the thought tree.
Args:
root_node (ThoughtNode): The root node of the thought tree.
Returns:
List[str]: The solution path obtained through DFS.
"""
impossible_state_cnt = 0
node = root_node
for step in range(self.max_steps):
current_state = self.config.parser(node.name)
current_value = node.value
thought_nodes = await self.generate_thoughts(current_state, current_node=node)
await self.evaluate_node(thought_nodes[0], parent_value=current_value)
if thought_nodes[0].valid_status is False:
impossible_state_cnt += 1
if impossible_state_cnt >= 2:
logger.info("impossible state reached, break")
break
node = thought_nodes[0]
_solution_path = self.thought_tree.parse_node_path(node)
self.thought_tree.show()
return _solution_path
async def solve(self, init_prompt="", root=ThoughtNode("")):
"""
Solve the problem using Depth-First Search (DFS) strategy.
Args:
init_prompt (str): The initial prompt for the solver.
Returns:
List[str]: The best solution path obtained through DFS.
"""
root = ThoughtNode(init_prompt)
self.thought_tree = ThoughtTree(root)
for n in range(self.config.n_solution_sample):
# fixme: 需要产生回退,当前节点不可用时回退到父节点,产生新的节点继续探索
await self._dfs(root)
best_solution, best_solution_path = self.update_solution()
logger.info(f"best solution is: {best_solution_path}")
return best_solution_path
class MCTSSolver(ThoughtSolverBase):
async def solve(self, init_prompt=""):
raise NotImplementedError
class TreeofThought(BaseModel):
config: ThoughtSolverConfig = Field(default_factory=ThoughtSolverConfig)
solver: ThoughtSolverBase = Field(default_factory=ThoughtSolverBase)
strategy: Strategy = Field(default=Strategy.BFS)
class Config:
arbitrary_types_allowed = True
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
self._initialize_solver(self.strategy)
def _initialize_solver(self, strategy):
"""
Initialize the solver based on the chosen strategy.
Args:
strategy (Strategy): The strategy to use for solving.
Returns:
ThoughtSolverBase: An instance of the appropriate solver.
"""
if strategy == Strategy.BFS:
self.solver = BFSSolver(config=self.config)
elif strategy == Strategy.DFS:
self.solver = DFSSolver(config=self.config)
elif strategy == Strategy.MCTS:
self.solver = MCTSSolver(config=self.config)
else:
raise NotImplementedError(f"Invalid strategy: {strategy}, only support BFS/DFS/MCTS currently!")
async def solve(self, init_prompt=""):
"""
Solve the problem using the specified strategy.
Args:
init_prompt (str): The initial prompt for the solver.
strategy (str): The strategy to use for solving.
Returns:
Any: The solution obtained using the selected strategy.
"""
await self.solver.solve(init_prompt)
| 387722.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/4/29 16:19
@Author : alexanderwu
@File : test_common.py
@Modified by: mashenquan, 2023/11/21. Add unit tests.
"""
import importlib
import os
import platform
import uuid
from pathlib import Path
from typing import Any, Set
import pytest
from pydantic import BaseModel
from metagpt.actions import RunCode
from metagpt.const import get_metagpt_root
from metagpt.roles.tutorial_assistant import TutorialAssistant
from metagpt.schema import Message
from metagpt.utils.common import (
NoMoneyException,
OutputParser,
any_to_str,
any_to_str_set,
aread,
awrite,
check_cmd_exists,
concat_namespace,
import_class_inst,
parse_recipient,
print_members,
read_file_block,
read_json_file,
require_python_version,
split_namespace,
)
class TestGetProjectRoot:
def change_etc_dir(self):
# current_directory = Path.cwd()
abs_root = "/etc"
os.chdir(abs_root)
def test_get_project_root(self):
project_root = get_metagpt_root()
src_path = project_root / "metagpt"
assert src_path.exists()
def test_get_root_exception(self):
self.change_etc_dir()
project_root = get_metagpt_root()
assert project_root
def test_any_to_str(self):
class Input(BaseModel):
x: Any = None
want: str
inputs = [
Input(x=TutorialAssistant, want="metagpt.roles.tutorial_assistant.TutorialAssistant"),
Input(x=TutorialAssistant(), want="metagpt.roles.tutorial_assistant.TutorialAssistant"),
Input(x=RunCode, want="metagpt.actions.run_code.RunCode"),
Input(x=RunCode(), want="metagpt.actions.run_code.RunCode"),
Input(x=Message, want="metagpt.schema.Message"),
Input(x=Message(content=""), want="metagpt.schema.Message"),
Input(x="A", want="A"),
]
for i in inputs:
v = any_to_str(i.x)
assert v == i.want
def test_any_to_str_set(self):
class Input(BaseModel):
x: Any = None
want: Set
inputs = [
Input(
x=[TutorialAssistant, RunCode(), "a"],
want={"metagpt.roles.tutorial_assistant.TutorialAssistant", "metagpt.actions.run_code.RunCode", "a"},
),
Input(
x={TutorialAssistant, "a"},
want={"metagpt.roles.tutorial_assistant.TutorialAssistant", "a"},
),
Input(
x=(TutorialAssistant, RunCode(), "a"),
want={"metagpt.roles.tutorial_assistant.TutorialAssistant", "metagpt.actions.run_code.RunCode", "a"},
),
Input(
x={"a": TutorialAssistant, "b": RunCode(), "c": "a"},
want={"a", "metagpt.roles.tutorial_assistant.TutorialAssistant", "metagpt.actions.run_code.RunCode"},
),
]
for i in inputs:
v = any_to_str_set(i.x)
assert v == i.want
def test_check_cmd_exists(self):
class Input(BaseModel):
command: str
platform: str
inputs = [
{"command": "cat", "platform": "linux"},
{"command": "ls", "platform": "linux"},
{"command": "mspaint", "platform": "windows"},
]
plat = "windows" if platform.system().lower() == "windows" else "linux"
for i in inputs:
seed = Input(**i)
result = check_cmd_exists(seed.command)
if plat == seed.platform:
assert result == 0
else:
assert result != 0
@pytest.mark.parametrize(("filename", "want"), [("1.md", "File list"), ("2.md", "Language"), ("3.md", "# TODOs")])
@pytest.mark.asyncio
async def test_parse_data_exception(self, filename, want):
pathname = Path(__file__).parent.parent.parent / "data/output_parser" / filename
assert pathname.exists()
data = await aread(filename=pathname)
result = OutputParser.parse_data(data=data)
assert want in result
@pytest.mark.parametrize(
("ver", "want", "err"), [((1, 2, 3, 4), False, True), ((2, 3, 9), True, False), ((3, 10, 18), False, False)]
)
def test_require_python_version(self, ver, want, err):
try:
res = require_python_version(ver)
assert res == want
except ValueError:
assert err
def test_no_money_exception(self):
val = NoMoneyException(3.10)
assert "Amount required:" in str(val)
@pytest.mark.parametrize("module_path", ["tests.metagpt.utils.test_common"])
def test_print_members(self, module_path):
module = importlib.import_module(module_path)
with pytest.raises(Exception) as info:
print_members(module)
assert info is None
@pytest.mark.parametrize(
("words", "want"), [("", ""), ("## Send To: Engineer", "Engineer"), ("Send To: \nNone", "None")]
)
def test_parse_recipient(self, words, want):
res = parse_recipient(words)
assert want == res
def test_concat_namespace(self):
assert concat_namespace("a", "b", "c") == "a:b:c"
assert concat_namespace("a", "b", "c", "e") == "a:b:c:e"
assert concat_namespace("a", "b", "c", "e", "f") == "a:b:c:e:f"
@pytest.mark.parametrize(
("val", "want"),
[
(
"tests/metagpt/test_role.py:test_react:Input:subscription",
["tests/metagpt/test_role.py", "test_react", "Input", "subscription"],
),
(
"tests/metagpt/test_role.py:test_react:Input:goal",
["tests/metagpt/test_role.py", "test_react", "Input", "goal"],
),
],
)
def test_split_namespace(self, val, want):
res = split_namespace(val, maxsplit=-1)
assert res == want
def test_read_json_file(self):
assert read_json_file(str(Path(__file__).parent / "../../data/ut_writer/yft_swaggerApi.json"), encoding="utf-8")
with pytest.raises(FileNotFoundError):
read_json_file("not_exists_file", encoding="utf-8")
with pytest.raises(ValueError):
read_json_file(__file__, encoding="utf-8")
def test_import_class_inst(self):
rc = import_class_inst("RunCode", "metagpt.actions.run_code", name="X")
assert rc.name == "X"
@pytest.mark.asyncio
async def test_read_file_block(self):
assert await read_file_block(filename=__file__, lineno=6, end_lineno=6) == "@File : test_common.py\n"
@pytest.mark.asyncio
async def test_read_write(self):
pathname = Path(__file__).parent / f"../../../workspace/unittest/{uuid.uuid4().hex}" / "test.tmp"
await awrite(pathname, "ABC")
data = await aread(pathname)
assert data == "ABC"
pathname.unlink(missing_ok=True)
@pytest.mark.asyncio
async def test_read_write_error_charset(self):
pathname = Path(__file__).parent / f"../../../workspace/unittest/{uuid.uuid4().hex}" / "test.txt"
content = "中国abc123\u27f6"
await awrite(filename=pathname, data=content)
data = await aread(filename=pathname)
assert data == content
content = "GB18030 是中国国家标准局发布的新一代中文字符集标准,是 GBK 的升级版,支持更广泛的字符范围。"
await awrite(filename=pathname, data=content, encoding="gb2312")
data = await aread(filename=pathname, encoding="utf-8")
assert data == content
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| 082256.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
from __future__ import annotations
import importlib
import sys
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
from pandas.util._exceptions import find_stack_level
from pandas.util.version import Version
if TYPE_CHECKING:
import types
# Update install.rst, actions-310-minimum_versions.yaml,
# deps_minimum.toml & pyproject.toml when updating versions!
VERSIONS = {
"adbc-driver-postgresql": "0.10.0",
"adbc-driver-sqlite": "0.8.0",
"bs4": "4.11.2",
"blosc": "1.21.3",
"bottleneck": "1.3.6",
"fastparquet": "2023.10.0",
"fsspec": "2022.11.0",
"html5lib": "1.1",
"hypothesis": "6.84.0",
"gcsfs": "2022.11.0",
"jinja2": "3.1.2",
"lxml.etree": "4.9.2",
"matplotlib": "3.6.3",
"numba": "0.56.4",
"numexpr": "2.8.4",
"odfpy": "1.4.1",
"openpyxl": "3.1.0",
"psycopg2": "2.9.6", # (dt dec pq3 ext lo64)
"pymysql": "1.0.2",
"pyarrow": "10.0.1",
"pyreadstat": "1.2.0",
"pytest": "7.3.2",
"python-calamine": "0.1.7",
"pytz": "2023.4",
"pyxlsb": "1.0.10",
"s3fs": "2022.11.0",
"scipy": "1.10.0",
"sqlalchemy": "2.0.0",
"tables": "3.8.0",
"tabulate": "0.9.0",
"xarray": "2022.12.0",
"xlrd": "2.0.1",
"xlsxwriter": "3.0.5",
"zstandard": "0.19.0",
"tzdata": "2022.7",
"qtpy": "2.3.0",
"pyqt5": "5.15.9",
}
# A mapping from import name to package name (on PyPI) for packages where
# these two names are different.
INSTALL_MAPPING = {
"bs4": "beautifulsoup4",
"bottleneck": "Bottleneck",
"jinja2": "Jinja2",
"lxml.etree": "lxml",
"odf": "odfpy",
"python_calamine": "python-calamine",
"sqlalchemy": "SQLAlchemy",
"tables": "pytables",
}
def get_version(module: types.ModuleType) -> str:
version = getattr(module, "__version__", None)
if version is None:
raise ImportError(f"Can't determine version for {module.__name__}")
if module.__name__ == "psycopg2":
# psycopg2 appends " (dt dec pq3 ext lo64)" to it's version
version = version.split()[0]
return version
@overload
def import_optional_dependency(
name: str,
extra: str = ...,
min_version: str | None = ...,
*,
errors: Literal["raise"] = ...,
) -> types.ModuleType: ...
@overload
def import_optional_dependency(
name: str,
extra: str = ...,
min_version: str | None = ...,
*,
errors: Literal["warn", "ignore"],
) -> types.ModuleType | None: ...
def import_optional_dependency(
name: str,
extra: str = "",
min_version: str | None = None,
*,
errors: Literal["raise", "warn", "ignore"] = "raise",
) -> types.ModuleType | None:
"""
Import an optional dependency.
By default, if a dependency is missing an ImportError with a nice
message will be raised. If a dependency is present, but too old,
we raise.
Parameters
----------
name : str
The module name.
extra : str
Additional text to include in the ImportError message.
errors : str {'raise', 'warn', 'ignore'}
What to do when a dependency is not found or its version is too old.
* raise : Raise an ImportError
* warn : Only applicable when a module's version is to old.
Warns that the version is too old and returns None
* ignore: If the module is not installed, return None, otherwise,
return the module, even if the version is too old.
It's expected that users validate the version locally when
using ``errors="ignore"`` (see. ``io/html.py``)
min_version : str, default None
Specify a minimum version that is different from the global pandas
minimum version required.
Returns
-------
maybe_module : Optional[ModuleType]
The imported module, when found and the version is correct.
None is returned when the package is not found and `errors`
is False, or when the package's version is too old and `errors`
is ``'warn'`` or ``'ignore'``.
"""
assert errors in {"warn", "raise", "ignore"}
package_name = INSTALL_MAPPING.get(name)
install_name = package_name if package_name is not None else name
msg = (
f"Missing optional dependency '{install_name}'. {extra} "
f"Use pip or conda to install {install_name}."
)
try:
module = importlib.import_module(name)
except ImportError as err:
if errors == "raise":
raise ImportError(msg) from err
return None
# Handle submodules: if we have submodule, grab parent module from sys.modules
parent = name.split(".")[0]
if parent != name:
install_name = parent
module_to_get = sys.modules[install_name]
else:
module_to_get = module
minimum_version = min_version if min_version is not None else VERSIONS.get(parent)
if minimum_version:
version = get_version(module_to_get)
if version and Version(version) < Version(minimum_version):
msg = (
f"Pandas requires version '{minimum_version}' or newer of '{parent}' "
f"(version '{version}' currently installed)."
)
if errors == "warn":
warnings.warn(
msg,
UserWarning,
stacklevel=find_stack_level(),
)
return None
elif errors == "raise":
raise ImportError(msg)
else:
return None
return module
| 946124.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
MultiIndex,
)
def test_repr_with_unicode_data():
with pd.option_context("display.encoding", "UTF-8"):
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip_raises():
mi = MultiIndex.from_product([list("ab"), range(3)], names=["first", "second"])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
eval(repr(mi))
def test_unicode_string_with_unicode():
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
str(idx)
def test_repr_max_seq_item_setting(idx):
# GH10182
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
class TestRepr:
def test_unicode_repr_issues(self):
levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])]
codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
repr(index.get_level_values(1))
def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
with pd.option_context("display.max_seq_items", 6):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
def test_repr(self, idx):
result = idx[:1].__repr__()
expected = """\
MultiIndex([('foo', 'one')],
names=['first', 'second'])"""
assert result == expected
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
with pd.option_context("display.max_seq_items", 5):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
...
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'], length=6)"""
assert result == expected
# display.max_seq_items == 1
with pd.option_context("display.max_seq_items", 1):
result = idx.__repr__()
expected = """\
MultiIndex([...
('qux', 'two')],
names=['first', ...], length=6)"""
assert result == expected
def test_rjust(self):
n = 1000
ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n))
dti = pd.date_range("2000-01-01", freq="s", periods=n * 2)
mi = MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=["a", "b", "dti"])
result = mi[:1].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi[::500].__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:08:20'),
('abc', 10, '2000-01-01 00:16:40'),
('abc', 10, '2000-01-01 00:25:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:00:01'),
( 'a', 9, '2000-01-01 00:00:02'),
( 'a', 9, '2000-01-01 00:00:03'),
( 'a', 9, '2000-01-01 00:00:04'),
( 'a', 9, '2000-01-01 00:00:05'),
( 'a', 9, '2000-01-01 00:00:06'),
( 'a', 9, '2000-01-01 00:00:07'),
( 'a', 9, '2000-01-01 00:00:08'),
( 'a', 9, '2000-01-01 00:00:09'),
...
('abc', 10, '2000-01-01 00:33:10'),
('abc', 10, '2000-01-01 00:33:11'),
('abc', 10, '2000-01-01 00:33:12'),
('abc', 10, '2000-01-01 00:33:13'),
('abc', 10, '2000-01-01 00:33:14'),
('abc', 10, '2000-01-01 00:33:15'),
('abc', 10, '2000-01-01 00:33:16'),
('abc', 10, '2000-01-01 00:33:17'),
('abc', 10, '2000-01-01 00:33:18'),
('abc', 10, '2000-01-01 00:33:19')],
names=['a', 'b', 'dti'], length=2000)"""
assert result == expected
def test_tuple_width(self):
n = 1000
ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n))
dti = pd.date_range("2000-01-01", freq="s", periods=n * 2)
levels = [ci, ci.codes + 9, dti, dti, dti]
names = ["a", "b", "dti_1", "dti_2", "dti_3"]
mi = MultiIndex.from_arrays(levels, names=names)
result = mi[:1].__repr__()
expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa: E501
assert result == expected
result = mi[:10].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),
...
('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),
('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),
('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),
('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),
('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),
('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),
('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),
('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
assert result == expected
def test_multiindex_long_element(self):
# Non-regression test towards GH#52960
data = MultiIndex.from_tuples([("c" * 62,)])
expected = (
"MultiIndex([('cccccccccccccccccccccccccccccccccccccccc"
"cccccccccccccccccccccc',)],\n )"
)
assert str(data) == expected
| 222094.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
"""
self-contained to write legacy storage pickle files
To use this script. Create an environment where you want
generate pickles, say its for 0.20.3, with your pandas clone
in ~/pandas
. activate pandas_0.20.3
cd ~/pandas/pandas
$ python -m tests.io.generate_legacy_storage_files \
tests/io/data/legacy_pickle/0.20.3/ pickle
This script generates a storage file for the current arch, system,
and python version
pandas version: 0.20.3
output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/
storage format: pickle
created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle
The idea here is you are using the *current* version of the
generate_legacy_storage_files with an *older* version of pandas to
generate a pickle file. We will then check this file into a current
branch, and test using test_pickle.py. This will load the *older*
pickles and test versus the current data that is generated
(with main). These are then compared.
If we have cases where we changed the signature (e.g. we renamed
offset -> freq in Timestamp). Then we have to conditionally execute
in the generate_legacy_storage_files.py to make it
run under the older AND the newer version.
"""
from datetime import timedelta
import os
import pickle
import platform as pl
import sys
# Remove script directory from path, otherwise Python will try to
# import the JSON test directory as the json module
sys.path.pop(0)
import numpy as np
import pandas
from pandas import (
Categorical,
DataFrame,
Index,
MultiIndex,
NaT,
Period,
RangeIndex,
Series,
Timestamp,
bdate_range,
date_range,
interval_range,
period_range,
timedelta_range,
)
from pandas.arrays import SparseArray
from pandas.tseries.offsets import (
FY5253,
BusinessDay,
BusinessHour,
CustomBusinessDay,
DateOffset,
Day,
Easter,
Hour,
LastWeekOfMonth,
Minute,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
SemiMonthBegin,
SemiMonthEnd,
Week,
WeekOfMonth,
YearBegin,
YearEnd,
)
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = Series(SparseArray(arr, kind="block"))
bseries.name = "bseries"
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range("1/1/2011", periods=len(arr))
bseries = Series(SparseArray(arr, kind="block"), index=date_index)
bseries.name = "btsseries"
return bseries
def _create_sp_frame():
nan = np.nan
data = {
"A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
"B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
"C": np.arange(10).astype(np.int64),
"D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
}
dates = bdate_range("1/1/2011", periods=10)
return DataFrame(data, index=dates).apply(SparseArray)
def create_pickle_data():
"""create the pickle data"""
data = {
"A": [0.0, 1.0, 2.0, 3.0, np.nan],
"B": [0, 1, 0, 1, 0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": date_range("1/1/2009", periods=5),
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
}
scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")}
index = {
"int": Index(np.arange(10)),
"date": date_range("20130101", periods=10),
"period": period_range("2013-01-01", freq="M", periods=10),
"float": Index(np.arange(10, dtype=np.float64)),
"uint": Index(np.arange(10, dtype=np.uint64)),
"timedelta": timedelta_range("00:00:00", freq="30min", periods=10),
}
index["range"] = RangeIndex(10)
index["interval"] = interval_range(0, periods=10)
mi = {
"reg2": MultiIndex.from_tuples(
tuple(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
),
names=["first", "second"],
)
}
series = {
"float": Series(data["A"]),
"int": Series(data["B"]),
"mixed": Series(data["E"]),
"ts": Series(
np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
),
"mi": Series(
np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
),
),
"dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
"cat": Series(Categorical(["foo", "bar", "baz"])),
"dt": Series(date_range("20130101", periods=5)),
"dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")),
"period": Series([Period("2000Q1")] * 5),
}
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list("ABCDA")
frame = {
"float": DataFrame({"A": series["float"], "B": series["float"] + 1}),
"int": DataFrame({"A": series["int"], "B": series["int"] + 1}),
"mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
"mi": DataFrame(
{"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(
zip(
*[
["bar", "bar", "baz", "baz", "baz"],
["one", "two", "one", "two", "three"],
]
)
),
names=["first", "second"],
),
),
"dup": DataFrame(
np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
),
"cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}),
"cat_and_float": DataFrame(
{
"A": Categorical(["foo", "bar", "baz"]),
"B": np.arange(3).astype(np.int64),
}
),
"mixed_dup": mixed_dup_df,
"dt_mixed_tzs": DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
},
index=range(5),
),
"dt_mixed2_tzs": DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
"C": Timestamp("20130603", tz="UTC"),
},
index=range(5),
),
}
cat = {
"int8": Categorical(list("abcdefg")),
"int16": Categorical(np.arange(1000)),
"int32": Categorical(np.arange(10000)),
}
timestamp = {
"normal": Timestamp("2011-01-01"),
"nat": NaT,
"tz": Timestamp("2011-01-01", tz="US/Eastern"),
}
off = {
"DateOffset": DateOffset(years=1),
"DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
"BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
"BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
"CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
"SemiMonthBegin": SemiMonthBegin(day_of_month=9),
"SemiMonthEnd": SemiMonthEnd(day_of_month=24),
"MonthBegin": MonthBegin(1),
"MonthEnd": MonthEnd(1),
"QuarterBegin": QuarterBegin(1),
"QuarterEnd": QuarterEnd(1),
"Day": Day(1),
"YearBegin": YearBegin(1),
"YearEnd": YearEnd(1),
"Week": Week(1),
"Week_Tues": Week(2, normalize=False, weekday=1),
"WeekOfMonth": WeekOfMonth(week=3, weekday=4),
"LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
"FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
"Easter": Easter(),
"Hour": Hour(1),
"Minute": Minute(1),
}
return {
"series": series,
"frame": frame,
"index": index,
"scalars": scalars,
"mi": mi,
"sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()},
"sp_frame": {"float": _create_sp_frame()},
"cat": cat,
"timestamp": timestamp,
"offsets": off,
}
def platform_name():
return "_".join(
[
str(pandas.__version__),
str(pl.machine()),
str(pl.system().lower()),
str(pl.python_version()),
]
)
def write_legacy_pickles(output_dir):
version = pandas.__version__
print(
"This script generates a storage file for the current arch, system, "
"and python version"
)
print(f" pandas version: {version}")
print(f" output dir : {output_dir}")
print(" storage format: pickle")
pth = f"{platform_name()}.pickle"
with open(os.path.join(output_dir, pth), "wb") as fh:
pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL)
print(f"created pickle file: {pth}")
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, "")
if not 3 <= len(sys.argv) <= 4:
sys.exit(
"Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
)
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if storage_type == "pickle":
write_legacy_pickles(output_dir=output_dir)
else:
sys.exit("storage_type must be one of {'pickle'}")
if __name__ == "__main__":
write_legacy_file()
| 426331.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import os
import sys
import cv2
import numpy as np
import paddle
import logging
import numpy as np
import argparse
from tqdm import tqdm
import paddle
from paddleslim.common import load_config as load_slim_config
from paddleslim.common import get_logger
import sys
sys.path.append("../../../")
from ppocr.data import build_dataloader
from ppocr.postprocess import build_post_process
from ppocr.metrics import build_metric
from paddle.inference import create_predictor, PrecisionType
from paddle.inference import Config as PredictConfig
logger = get_logger(__name__, level=logging.INFO)
def find_images_with_bounding_size(dataset: paddle.io.Dataset):
max_length_index = -1
max_width_index = -1
min_length_index = -1
min_width_index = -1
max_length = float("-inf")
max_width = float("-inf")
min_length = float("inf")
min_width = float("inf")
for idx, data in enumerate(dataset):
image = np.array(data[0])
h, w = image.shape[-2:]
if h > max_length:
max_length = h
max_length_index = idx
if w > max_width:
max_width = w
max_width_index = idx
if h < min_length:
min_length = h
min_length_index = idx
if w < min_width:
min_width = w
min_width_index = idx
print(f"Found max image length: {max_length}, index: {max_length_index}")
print(f"Found max image width: {max_width}, index: {max_width_index}")
print(f"Found min image length: {min_length}, index: {min_length_index}")
print(f"Found min image width: {min_width}, index: {min_width_index}")
return paddle.io.Subset(
dataset, [max_width_index, max_length_index, min_width_index, min_length_index]
)
def load_predictor(args):
"""
load predictor func
"""
rerun_flag = False
model_file = os.path.join(args.model_path, args.model_filename)
params_file = os.path.join(args.model_path, args.params_filename)
pred_cfg = PredictConfig(model_file, params_file)
pred_cfg.enable_memory_optim()
pred_cfg.switch_ir_optim(True)
if args.device == "GPU":
pred_cfg.enable_use_gpu(100, 0)
else:
pred_cfg.disable_gpu()
pred_cfg.set_cpu_math_library_num_threads(args.cpu_threads)
if args.use_mkldnn:
pred_cfg.enable_mkldnn()
if args.precision == "int8":
pred_cfg.enable_mkldnn_int8({"conv2d"})
if global_config["model_type"] == "rec":
# delete pass which influence the accuracy, please refer to https://github.com/PaddlePaddle/Paddle/issues/55290
pred_cfg.delete_pass("fc_mkldnn_pass")
pred_cfg.delete_pass("fc_act_mkldnn_fuse_pass")
if args.use_trt:
# To collect the dynamic shapes of inputs for TensorRT engine
dynamic_shape_file = os.path.join(args.model_path, "dynamic_shape.txt")
if os.path.exists(dynamic_shape_file):
pred_cfg.enable_tuned_tensorrt_dynamic_shape(dynamic_shape_file, True)
print("trt set dynamic shape done!")
precision_map = {
"fp16": PrecisionType.Half,
"fp32": PrecisionType.Float32,
"int8": PrecisionType.Int8,
}
if (
args.precision == "int8"
and "ppocrv4_det_server_qat_dist.yaml" in args.config_path
):
# Use the following settings only when the hardware is a Tesla V100. If you are using
# a RTX 3090, use the settings in the else branch.
pred_cfg.enable_tensorrt_engine(
workspace_size=1 << 30,
max_batch_size=1,
min_subgraph_size=30,
precision_mode=precision_map[args.precision],
use_static=True,
use_calib_mode=False,
)
pred_cfg.exp_disable_tensorrt_ops(["elementwise_add"])
else:
pred_cfg.enable_tensorrt_engine(
workspace_size=1 << 30,
max_batch_size=1,
min_subgraph_size=4,
precision_mode=precision_map[args.precision],
use_static=True,
use_calib_mode=False,
)
else:
# pred_cfg.disable_gpu()
# pred_cfg.set_cpu_math_library_num_threads(24)
pred_cfg.collect_shape_range_info(dynamic_shape_file)
print("Start collect dynamic shape...")
rerun_flag = True
predictor = create_predictor(pred_cfg)
return predictor, rerun_flag
def eval(args):
"""
eval mIoU func
"""
# DataLoader need run on cpu
paddle.set_device("cpu")
devices = paddle.device.get_device().split(":")[0]
val_loader = build_dataloader(all_config, "Eval", devices, logger)
post_process_class = build_post_process(all_config["PostProcess"], global_config)
eval_class = build_metric(all_config["Metric"])
model_type = global_config["model_type"]
predictor, rerun_flag = load_predictor(args)
if rerun_flag:
eval_dataset = find_images_with_bounding_size(val_loader.dataset)
batch_sampler = paddle.io.BatchSampler(
eval_dataset, batch_size=1, shuffle=False, drop_last=False
)
val_loader = paddle.io.DataLoader(
eval_dataset, batch_sampler=batch_sampler, num_workers=4, return_list=True
)
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
sample_nums = len(val_loader)
predict_time = 0.0
time_min = float("inf")
time_max = float("-inf")
print("Start evaluating ( total_iters: {}).".format(sample_nums))
for batch_id, batch in enumerate(val_loader):
images = np.array(batch[0])
batch_numpy = []
for item in batch:
batch_numpy.append(np.array(item))
# ori_shape = np.array(batch_numpy).shape[-2:]
input_handle.reshape(images.shape)
input_handle.copy_from_cpu(images)
start_time = time.time()
predictor.run()
preds = output_handle.copy_to_cpu()
end_time = time.time()
timed = end_time - start_time
time_min = min(time_min, timed)
time_max = max(time_max, timed)
predict_time += timed
if model_type == "det":
preds_map = {"maps": preds}
post_result = post_process_class(preds_map, batch_numpy[1])
eval_class(post_result, batch_numpy)
elif model_type == "rec":
post_result = post_process_class(preds, batch_numpy[1])
eval_class(post_result, batch_numpy)
if rerun_flag:
if batch_id == 3:
print(
"***** Collect dynamic shape done, Please rerun the program to get correct results. *****"
)
return
if batch_id % 100 == 0:
print("Eval iter:", batch_id)
sys.stdout.flush()
metric = eval_class.get_metric()
time_avg = predict_time / sample_nums
print(
"[Benchmark] Inference time(ms): min={}, max={}, avg={}".format(
round(time_min * 1000, 2),
round(time_max * 1000, 1),
round(time_avg * 1000, 1),
)
)
for k, v in metric.items():
print("{}:{}".format(k, v))
sys.stdout.flush()
def main():
global all_config, global_config
all_config = load_slim_config(args.config_path)
global_config = all_config["Global"]
eval(args)
if __name__ == "__main__":
paddle.enable_static()
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str, help="inference model filepath")
parser.add_argument(
"--config_path",
type=str,
default="./configs/ppocrv3_det_qat_dist.yaml",
help="path of compression strategy config.",
)
parser.add_argument(
"--model_filename",
type=str,
default="inference.pdmodel",
help="model file name",
)
parser.add_argument(
"--params_filename",
type=str,
default="inference.pdiparams",
help="params file name",
)
parser.add_argument(
"--device",
type=str,
default="GPU",
choices=["CPU", "GPU"],
help="Choose the device you want to run, it can be: CPU/GPU, default is GPU",
)
parser.add_argument(
"--precision",
type=str,
default="fp32",
choices=["fp32", "fp16", "int8"],
help="The precision of inference. It can be 'fp32', 'fp16' or 'int8'. Default is 'fp16'.",
)
parser.add_argument(
"--use_trt",
type=bool,
default=False,
help="Whether to use tensorrt engine or not.",
)
parser.add_argument(
"--use_mkldnn", type=bool, default=False, help="Whether use mkldnn or not."
)
parser.add_argument(
"--cpu_threads", type=int, default=10, help="Num of cpu threads."
)
args = parser.parse_args()
main()
| 938513.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is refer from:
https://github.com/lukas-blecher/LaTeX-OCR/blob/main/pix2tex/dataset/dataset.py
"""
import numpy as np
import cv2
import math
import os
import json
import pickle
import random
import traceback
import paddle
from paddle.io import Dataset
from .imaug.label_ops import LatexOCRLabelEncode
from .imaug import transform, create_operators
class LaTeXOCRDataSet(Dataset):
def __init__(self, config, mode, logger, seed=None):
super(LaTeXOCRDataSet, self).__init__()
self.logger = logger
self.mode = mode.lower()
global_config = config["Global"]
dataset_config = config[mode]["dataset"]
loader_config = config[mode]["loader"]
pkl_path = dataset_config.pop("data")
self.data_dir = dataset_config["data_dir"]
self.min_dimensions = dataset_config.pop("min_dimensions")
self.max_dimensions = dataset_config.pop("max_dimensions")
self.batchsize = dataset_config.pop("batch_size_per_pair")
self.keep_smaller_batches = dataset_config.pop("keep_smaller_batches")
self.max_seq_len = global_config.pop("max_seq_len")
self.rec_char_dict_path = global_config.pop("rec_char_dict_path")
self.tokenizer = LatexOCRLabelEncode(self.rec_char_dict_path)
file = open(pkl_path, "rb")
data = pickle.load(file)
temp = {}
for k in data:
if (
self.min_dimensions[0] <= k[0] <= self.max_dimensions[0]
and self.min_dimensions[1] <= k[1] <= self.max_dimensions[1]
):
temp[k] = data[k]
self.data = temp
self.do_shuffle = loader_config["shuffle"]
self.seed = seed
if self.mode == "train" and self.do_shuffle:
random.seed(self.seed)
self.pairs = []
for k in self.data:
info = np.array(self.data[k], dtype=object)
p = (
paddle.randperm(len(info))
if self.mode == "train" and self.do_shuffle
else paddle.arange(len(info))
)
for i in range(0, len(info), self.batchsize):
batch = info[p[i : i + self.batchsize]]
if len(batch.shape) == 1:
batch = batch[None, :]
if len(batch) < self.batchsize and not self.keep_smaller_batches:
continue
self.pairs.append(batch)
if self.do_shuffle:
self.pairs = np.random.permutation(np.array(self.pairs, dtype=object))
else:
self.pairs = np.array(self.pairs, dtype=object)
self.size = len(self.pairs)
self.set_epoch_as_seed(self.seed, dataset_config)
self.ops = create_operators(dataset_config["transforms"], global_config)
self.ext_op_transform_idx = dataset_config.get("ext_op_transform_idx", 2)
self.need_reset = True
def set_epoch_as_seed(self, seed, dataset_config):
if self.mode == "train":
try:
border_map_id = [
index
for index, dictionary in enumerate(dataset_config["transforms"])
if "MakeBorderMap" in dictionary
][0]
shrink_map_id = [
index
for index, dictionary in enumerate(dataset_config["transforms"])
if "MakeShrinkMap" in dictionary
][0]
dataset_config["transforms"][border_map_id]["MakeBorderMap"][
"epoch"
] = (seed if seed is not None else 0)
dataset_config["transforms"][shrink_map_id]["MakeShrinkMap"][
"epoch"
] = (seed if seed is not None else 0)
except Exception as E:
print(E)
return
def shuffle_data_random(self):
random.seed(self.seed)
random.shuffle(self.data_lines)
return
def __getitem__(self, idx):
batch = self.pairs[idx]
eqs, ims = batch.T
try:
max_width, max_height, max_length = 0, 0, 0
images_transform = []
for file_name in ims:
img_path = os.path.join(self.data_dir, file_name)
data = {
"img_path": img_path,
}
with open(data["img_path"], "rb") as f:
img = f.read()
data["image"] = img
item = transform(data, self.ops)
images_transform.append(np.array(item[0]))
image_concat = np.concatenate(images_transform, axis=0)[:, np.newaxis, :, :]
images_transform = image_concat.astype(np.float32)
labels, attention_mask, max_length = self.tokenizer(list(eqs))
if self.max_seq_len < max_length:
rnd_idx = (
np.random.randint(self.__len__())
if self.mode == "train"
else (idx + 1) % self.__len__()
)
return self.__getitem__(rnd_idx)
return (images_transform, labels, attention_mask)
except:
self.logger.error(
"When parsing line {}, error happened with msg: {}".format(
data["img_path"], traceback.format_exc()
)
)
outs = None
if outs is None:
# during evaluation, we should fix the idx to get same results for many times of evaluation.
rnd_idx = (
np.random.randint(self.__len__())
if self.mode == "train"
else (idx + 1) % self.__len__()
)
return self.__getitem__(rnd_idx)
return outs
def __len__(self):
return self.size
| 116843.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import torch
import ldm_patched.modules.clip_vision
import safetensors.torch as sf
import ldm_patched.modules.model_management as model_management
import ldm_patched.ldm.modules.attention as attention
from extras.resampler import Resampler
from ldm_patched.modules.model_patcher import ModelPatcher
from modules.core import numpy_to_pytorch
from modules.ops import use_patched_ops
from ldm_patched.modules.ops import manual_cast
SD_V12_CHANNELS = [320] * 4 + [640] * 4 + [1280] * 4 + [1280] * 6 + [640] * 6 + [320] * 6 + [1280] * 2
SD_XL_CHANNELS = [640] * 8 + [1280] * 40 + [1280] * 60 + [640] * 12 + [1280] * 20
def sdp(q, k, v, extra_options):
return attention.optimized_attention(q, k, v, heads=extra_options["n_heads"], mask=None)
class ImageProjModel(torch.nn.Module):
def __init__(self, cross_attention_dim=1024, clip_embeddings_dim=1024, clip_extra_context_tokens=4):
super().__init__()
self.cross_attention_dim = cross_attention_dim
self.clip_extra_context_tokens = clip_extra_context_tokens
self.proj = torch.nn.Linear(clip_embeddings_dim, self.clip_extra_context_tokens * cross_attention_dim)
self.norm = torch.nn.LayerNorm(cross_attention_dim)
def forward(self, image_embeds):
embeds = image_embeds
clip_extra_context_tokens = self.proj(embeds).reshape(-1, self.clip_extra_context_tokens,
self.cross_attention_dim)
clip_extra_context_tokens = self.norm(clip_extra_context_tokens)
return clip_extra_context_tokens
class To_KV(torch.nn.Module):
def __init__(self, cross_attention_dim):
super().__init__()
channels = SD_XL_CHANNELS if cross_attention_dim == 2048 else SD_V12_CHANNELS
self.to_kvs = torch.nn.ModuleList(
[torch.nn.Linear(cross_attention_dim, channel, bias=False) for channel in channels])
def load_state_dict_ordered(self, sd):
state_dict = []
for i in range(4096):
for k in ['k', 'v']:
key = f'{i}.to_{k}_ip.weight'
if key in sd:
state_dict.append(sd[key])
for i, v in enumerate(state_dict):
self.to_kvs[i].weight = torch.nn.Parameter(v, requires_grad=False)
class IPAdapterModel(torch.nn.Module):
def __init__(self, state_dict, plus, cross_attention_dim=768, clip_embeddings_dim=1024, clip_extra_context_tokens=4,
sdxl_plus=False):
super().__init__()
self.plus = plus
if self.plus:
self.image_proj_model = Resampler(
dim=1280 if sdxl_plus else cross_attention_dim,
depth=4,
dim_head=64,
heads=20 if sdxl_plus else 12,
num_queries=clip_extra_context_tokens,
embedding_dim=clip_embeddings_dim,
output_dim=cross_attention_dim,
ff_mult=4
)
else:
self.image_proj_model = ImageProjModel(
cross_attention_dim=cross_attention_dim,
clip_embeddings_dim=clip_embeddings_dim,
clip_extra_context_tokens=clip_extra_context_tokens
)
self.image_proj_model.load_state_dict(state_dict["image_proj"])
self.ip_layers = To_KV(cross_attention_dim)
self.ip_layers.load_state_dict_ordered(state_dict["ip_adapter"])
clip_vision: ldm_patched.modules.clip_vision.ClipVisionModel = None
ip_negative: torch.Tensor = None
ip_adapters: dict = {}
def load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path):
global clip_vision, ip_negative, ip_adapters
if clip_vision is None and isinstance(clip_vision_path, str):
clip_vision = ldm_patched.modules.clip_vision.load(clip_vision_path)
if ip_negative is None and isinstance(ip_negative_path, str):
ip_negative = sf.load_file(ip_negative_path)['data']
if not isinstance(ip_adapter_path, str) or ip_adapter_path in ip_adapters:
return
load_device = model_management.get_torch_device()
offload_device = torch.device('cpu')
use_fp16 = model_management.should_use_fp16(device=load_device)
ip_state_dict = torch.load(ip_adapter_path, map_location="cpu", weights_only=True)
plus = "latents" in ip_state_dict["image_proj"]
cross_attention_dim = ip_state_dict["ip_adapter"]["1.to_k_ip.weight"].shape[1]
sdxl = cross_attention_dim == 2048
sdxl_plus = sdxl and plus
if plus:
clip_extra_context_tokens = ip_state_dict["image_proj"]["latents"].shape[1]
clip_embeddings_dim = ip_state_dict["image_proj"]["latents"].shape[2]
else:
clip_extra_context_tokens = ip_state_dict["image_proj"]["proj.weight"].shape[0] // cross_attention_dim
clip_embeddings_dim = None
with use_patched_ops(manual_cast):
ip_adapter = IPAdapterModel(
ip_state_dict,
plus=plus,
cross_attention_dim=cross_attention_dim,
clip_embeddings_dim=clip_embeddings_dim,
clip_extra_context_tokens=clip_extra_context_tokens,
sdxl_plus=sdxl_plus
)
ip_adapter.sdxl = sdxl
ip_adapter.load_device = load_device
ip_adapter.offload_device = offload_device
ip_adapter.dtype = torch.float16 if use_fp16 else torch.float32
ip_adapter.to(offload_device, dtype=ip_adapter.dtype)
image_proj_model = ModelPatcher(model=ip_adapter.image_proj_model, load_device=load_device,
offload_device=offload_device)
ip_layers = ModelPatcher(model=ip_adapter.ip_layers, load_device=load_device,
offload_device=offload_device)
ip_adapters[ip_adapter_path] = dict(
ip_adapter=ip_adapter,
image_proj_model=image_proj_model,
ip_layers=ip_layers,
ip_unconds=None
)
return
@torch.no_grad()
@torch.inference_mode()
def clip_preprocess(image):
mean = torch.tensor([0.48145466, 0.4578275, 0.40821073], device=image.device, dtype=image.dtype).view([1, 3, 1, 1])
std = torch.tensor([0.26862954, 0.26130258, 0.27577711], device=image.device, dtype=image.dtype).view([1, 3, 1, 1])
image = image.movedim(-1, 1)
# https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75
B, C, H, W = image.shape
assert H == 224 and W == 224
return (image - mean) / std
@torch.no_grad()
@torch.inference_mode()
def preprocess(img, ip_adapter_path):
global ip_adapters
entry = ip_adapters[ip_adapter_path]
ldm_patched.modules.model_management.load_model_gpu(clip_vision.patcher)
pixel_values = clip_preprocess(numpy_to_pytorch(img).to(clip_vision.load_device))
outputs = clip_vision.model(pixel_values=pixel_values, output_hidden_states=True)
ip_adapter = entry['ip_adapter']
ip_layers = entry['ip_layers']
image_proj_model = entry['image_proj_model']
ip_unconds = entry['ip_unconds']
if ip_adapter.plus:
cond = outputs.hidden_states[-2]
else:
cond = outputs.image_embeds
cond = cond.to(device=ip_adapter.load_device, dtype=ip_adapter.dtype)
ldm_patched.modules.model_management.load_model_gpu(image_proj_model)
cond = image_proj_model.model(cond).to(device=ip_adapter.load_device, dtype=ip_adapter.dtype)
ldm_patched.modules.model_management.load_model_gpu(ip_layers)
if ip_unconds is None:
uncond = ip_negative.to(device=ip_adapter.load_device, dtype=ip_adapter.dtype)
ip_unconds = [m(uncond).cpu() for m in ip_layers.model.to_kvs]
entry['ip_unconds'] = ip_unconds
ip_conds = [m(cond).cpu() for m in ip_layers.model.to_kvs]
return ip_conds, ip_unconds
@torch.no_grad()
@torch.inference_mode()
def patch_model(model, tasks):
new_model = model.clone()
def make_attn_patcher(ip_index):
def patcher(n, context_attn2, value_attn2, extra_options):
org_dtype = n.dtype
current_step = float(model.model.diffusion_model.current_step.detach().cpu().numpy()[0])
cond_or_uncond = extra_options['cond_or_uncond']
q = n
k = [context_attn2]
v = [value_attn2]
b, _, _ = q.shape
for (cs, ucs), cn_stop, cn_weight in tasks:
if current_step < cn_stop:
ip_k_c = cs[ip_index * 2].to(q)
ip_v_c = cs[ip_index * 2 + 1].to(q)
ip_k_uc = ucs[ip_index * 2].to(q)
ip_v_uc = ucs[ip_index * 2 + 1].to(q)
ip_k = torch.cat([(ip_k_c, ip_k_uc)[i] for i in cond_or_uncond], dim=0)
ip_v = torch.cat([(ip_v_c, ip_v_uc)[i] for i in cond_or_uncond], dim=0)
# Midjourney's attention formulation of image prompt (non-official reimplementation)
# Written by Lvmin Zhang at Stanford University, 2023 Dec
# For non-commercial use only - if you use this in commercial project then
# probably it has some intellectual property issues.
# Contact [email protected] if you are not sure.
# Below is the sensitive part with potential intellectual property issues.
ip_v_mean = torch.mean(ip_v, dim=1, keepdim=True)
ip_v_offset = ip_v - ip_v_mean
B, F, C = ip_k.shape
channel_penalty = float(C) / 1280.0
weight = cn_weight * channel_penalty
ip_k = ip_k * weight
ip_v = ip_v_offset + ip_v_mean * weight
k.append(ip_k)
v.append(ip_v)
k = torch.cat(k, dim=1)
v = torch.cat(v, dim=1)
out = sdp(q, k, v, extra_options)
return out.to(dtype=org_dtype)
return patcher
def set_model_patch_replace(model, number, key):
to = model.model_options["transformer_options"]
if "patches_replace" not in to:
to["patches_replace"] = {}
if "attn2" not in to["patches_replace"]:
to["patches_replace"]["attn2"] = {}
if key not in to["patches_replace"]["attn2"]:
to["patches_replace"]["attn2"][key] = make_attn_patcher(number)
number = 0
for id in [4, 5, 7, 8]:
block_indices = range(2) if id in [4, 5] else range(10)
for index in block_indices:
set_model_patch_replace(new_model, number, ("input", id, index))
number += 1
for id in range(6):
block_indices = range(2) if id in [3, 4, 5] else range(10)
for index in block_indices:
set_model_patch_replace(new_model, number, ("output", id, index))
number += 1
for index in range(10):
set_model_patch_replace(new_model, number, ("middle", 0, index))
number += 1
return new_model
| 791267.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import torch
from ldm_patched.ldm.modules.diffusionmodules.util import make_beta_schedule
import math
import numpy as np
class EPS:
def calculate_input(self, sigma, noise):
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
return noise / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
def calculate_denoised(self, sigma, model_output, model_input):
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
return model_input - model_output * sigma
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
if max_denoise:
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
else:
noise = noise * sigma
noise += latent_image
return noise
def inverse_noise_scaling(self, sigma, latent):
return latent
class V_PREDICTION(EPS):
def calculate_denoised(self, sigma, model_output, model_input):
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) - model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
class EDM(V_PREDICTION):
def calculate_denoised(self, sigma, model_output, model_input):
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
return model_input * self.sigma_data ** 2 / (sigma ** 2 + self.sigma_data ** 2) + model_output * sigma * self.sigma_data / (sigma ** 2 + self.sigma_data ** 2) ** 0.5
class ModelSamplingDiscrete(torch.nn.Module):
def __init__(self, model_config=None):
super().__init__()
if model_config is not None:
sampling_settings = model_config.sampling_settings
else:
sampling_settings = {}
beta_schedule = sampling_settings.get("beta_schedule", "linear")
linear_start = sampling_settings.get("linear_start", 0.00085)
linear_end = sampling_settings.get("linear_end", 0.012)
self._register_schedule(given_betas=None, beta_schedule=beta_schedule, timesteps=1000, linear_start=linear_start, linear_end=linear_end, cosine_s=8e-3)
self.sigma_data = 1.0
def _register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if given_betas is not None:
betas = given_betas
else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
# self.register_buffer('betas', torch.tensor(betas, dtype=torch.float32))
# self.register_buffer('alphas_cumprod', torch.tensor(alphas_cumprod, dtype=torch.float32))
# self.register_buffer('alphas_cumprod_prev', torch.tensor(alphas_cumprod_prev, dtype=torch.float32))
sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
self.set_sigmas(sigmas)
self.set_alphas_cumprod(alphas_cumprod.float())
def set_sigmas(self, sigmas):
self.register_buffer('sigmas', sigmas.float())
self.register_buffer('log_sigmas', sigmas.log().float())
def set_alphas_cumprod(self, alphas_cumprod):
self.register_buffer("alphas_cumprod", alphas_cumprod.float())
@property
def sigma_min(self):
return self.sigmas[0]
@property
def sigma_max(self):
return self.sigmas[-1]
def timestep(self, sigma):
log_sigma = sigma.log()
dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device)
def sigma(self, timestep):
t = torch.clamp(timestep.float().to(self.log_sigmas.device), min=0, max=(len(self.sigmas) - 1))
low_idx = t.floor().long()
high_idx = t.ceil().long()
w = t.frac()
log_sigma = (1 - w) * self.log_sigmas[low_idx] + w * self.log_sigmas[high_idx]
return log_sigma.exp().to(timestep.device)
def percent_to_sigma(self, percent):
if percent <= 0.0:
return 999999999.9
if percent >= 1.0:
return 0.0
percent = 1.0 - percent
return self.sigma(torch.tensor(percent * 999.0)).item()
class ModelSamplingContinuousEDM(torch.nn.Module):
def __init__(self, model_config=None):
super().__init__()
if model_config is not None:
sampling_settings = model_config.sampling_settings
else:
sampling_settings = {}
sigma_min = sampling_settings.get("sigma_min", 0.002)
sigma_max = sampling_settings.get("sigma_max", 120.0)
sigma_data = sampling_settings.get("sigma_data", 1.0)
self.set_parameters(sigma_min, sigma_max, sigma_data)
def set_parameters(self, sigma_min, sigma_max, sigma_data):
self.sigma_data = sigma_data
sigmas = torch.linspace(math.log(sigma_min), math.log(sigma_max), 1000).exp()
self.register_buffer('sigmas', sigmas) #for compatibility with some schedulers
self.register_buffer('log_sigmas', sigmas.log())
@property
def sigma_min(self):
return self.sigmas[0]
@property
def sigma_max(self):
return self.sigmas[-1]
def timestep(self, sigma):
return 0.25 * sigma.log()
def sigma(self, timestep):
return (timestep / 0.25).exp()
def percent_to_sigma(self, percent):
if percent <= 0.0:
return 999999999.9
if percent >= 1.0:
return 0.0
percent = 1.0 - percent
log_sigma_min = math.log(self.sigma_min)
return math.exp((math.log(self.sigma_max) - log_sigma_min) * percent + log_sigma_min)
class StableCascadeSampling(ModelSamplingDiscrete):
def __init__(self, model_config=None):
super().__init__()
if model_config is not None:
sampling_settings = model_config.sampling_settings
else:
sampling_settings = {}
self.set_parameters(sampling_settings.get("shift", 1.0))
def set_parameters(self, shift=1.0, cosine_s=8e-3):
self.shift = shift
self.cosine_s = torch.tensor(cosine_s)
self._init_alpha_cumprod = torch.cos(self.cosine_s / (1 + self.cosine_s) * torch.pi * 0.5) ** 2
#This part is just for compatibility with some schedulers in the codebase
self.num_timesteps = 10000
sigmas = torch.empty((self.num_timesteps), dtype=torch.float32)
for x in range(self.num_timesteps):
t = (x + 1) / self.num_timesteps
sigmas[x] = self.sigma(t)
self.set_sigmas(sigmas)
def sigma(self, timestep):
alpha_cumprod = (torch.cos((timestep + self.cosine_s) / (1 + self.cosine_s) * torch.pi * 0.5) ** 2 / self._init_alpha_cumprod)
if self.shift != 1.0:
var = alpha_cumprod
logSNR = (var/(1-var)).log()
logSNR += 2 * torch.log(1.0 / torch.tensor(self.shift))
alpha_cumprod = logSNR.sigmoid()
alpha_cumprod = alpha_cumprod.clamp(0.0001, 0.9999)
return ((1 - alpha_cumprod) / alpha_cumprod) ** 0.5
def timestep(self, sigma):
var = 1 / ((sigma * sigma) + 1)
var = var.clamp(0, 1.0)
s, min_var = self.cosine_s.to(var.device), self._init_alpha_cumprod.to(var.device)
t = (((var * min_var) ** 0.5).acos() / (torch.pi * 0.5)) * (1 + s) - s
return t
def percent_to_sigma(self, percent):
if percent <= 0.0:
return 999999999.9
if percent >= 1.0:
return 0.0
percent = 1.0 - percent
return self.sigma(torch.tensor(percent)) | 032394.py | [
"Unknown"
] |
import os
import einops
import torch
import numpy as np
import ldm_patched.modules.model_management
import ldm_patched.modules.model_detection
import ldm_patched.modules.model_patcher
import ldm_patched.modules.utils
import ldm_patched.modules.controlnet
import modules.sample_hijack
import ldm_patched.modules.samplers
import ldm_patched.modules.latent_formats
from ldm_patched.modules.sd import load_checkpoint_guess_config
from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode, VAEEncodeTiled, VAEDecodeTiled, \
ControlNetApplyAdvanced
from ldm_patched.contrib.external_freelunch import FreeU_V2
from ldm_patched.modules.sample import prepare_mask
from modules.lora import match_lora
from modules.util import get_file_from_folder_list
from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip
from modules.config import path_embeddings
from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete, ModelSamplingContinuousEDM
opEmptyLatentImage = EmptyLatentImage()
opVAEDecode = VAEDecode()
opVAEEncode = VAEEncode()
opVAEDecodeTiled = VAEDecodeTiled()
opVAEEncodeTiled = VAEEncodeTiled()
opControlNetApplyAdvanced = ControlNetApplyAdvanced()
opFreeU = FreeU_V2()
opModelSamplingDiscrete = ModelSamplingDiscrete()
opModelSamplingContinuousEDM = ModelSamplingContinuousEDM()
class StableDiffusionModel:
def __init__(self, unet=None, vae=None, clip=None, clip_vision=None, filename=None, vae_filename=None):
self.unet = unet
self.vae = vae
self.clip = clip
self.clip_vision = clip_vision
self.filename = filename
self.vae_filename = vae_filename
self.unet_with_lora = unet
self.clip_with_lora = clip
self.visited_loras = ''
self.lora_key_map_unet = {}
self.lora_key_map_clip = {}
if self.unet is not None:
self.lora_key_map_unet = model_lora_keys_unet(self.unet.model, self.lora_key_map_unet)
self.lora_key_map_unet.update({x: x for x in self.unet.model.state_dict().keys()})
if self.clip is not None:
self.lora_key_map_clip = model_lora_keys_clip(self.clip.cond_stage_model, self.lora_key_map_clip)
self.lora_key_map_clip.update({x: x for x in self.clip.cond_stage_model.state_dict().keys()})
@torch.no_grad()
@torch.inference_mode()
def refresh_loras(self, loras):
assert isinstance(loras, list)
if self.visited_loras == str(loras):
return
self.visited_loras = str(loras)
if self.unet is None:
return
print(f'Request to load LoRAs {str(loras)} for model [{self.filename}].')
loras_to_load = []
for filename, weight in loras:
if filename == 'None':
continue
if os.path.exists(filename):
lora_filename = filename
else:
lora_filename = get_file_from_folder_list(filename, modules.config.paths_loras)
if not os.path.exists(lora_filename):
print(f'Lora file not found: {lora_filename}')
continue
loras_to_load.append((lora_filename, weight))
self.unet_with_lora = self.unet.clone() if self.unet is not None else None
self.clip_with_lora = self.clip.clone() if self.clip is not None else None
for lora_filename, weight in loras_to_load:
lora_unmatch = ldm_patched.modules.utils.load_torch_file(lora_filename, safe_load=False)
lora_unet, lora_unmatch = match_lora(lora_unmatch, self.lora_key_map_unet)
lora_clip, lora_unmatch = match_lora(lora_unmatch, self.lora_key_map_clip)
if len(lora_unmatch) > 12:
# model mismatch
continue
if len(lora_unmatch) > 0:
print(f'Loaded LoRA [{lora_filename}] for model [{self.filename}] '
f'with unmatched keys {list(lora_unmatch.keys())}')
if self.unet_with_lora is not None and len(lora_unet) > 0:
loaded_keys = self.unet_with_lora.add_patches(lora_unet, weight)
print(f'Loaded LoRA [{lora_filename}] for UNet [{self.filename}] '
f'with {len(loaded_keys)} keys at weight {weight}.')
for item in lora_unet:
if item not in loaded_keys:
print("UNet LoRA key skipped: ", item)
if self.clip_with_lora is not None and len(lora_clip) > 0:
loaded_keys = self.clip_with_lora.add_patches(lora_clip, weight)
print(f'Loaded LoRA [{lora_filename}] for CLIP [{self.filename}] '
f'with {len(loaded_keys)} keys at weight {weight}.')
for item in lora_clip:
if item not in loaded_keys:
print("CLIP LoRA key skipped: ", item)
@torch.no_grad()
@torch.inference_mode()
def apply_freeu(model, b1, b2, s1, s2):
return opFreeU.patch(model=model, b1=b1, b2=b2, s1=s1, s2=s2)[0]
@torch.no_grad()
@torch.inference_mode()
def load_controlnet(ckpt_filename):
return ldm_patched.modules.controlnet.load_controlnet(ckpt_filename)
@torch.no_grad()
@torch.inference_mode()
def apply_controlnet(positive, negative, control_net, image, strength, start_percent, end_percent):
return opControlNetApplyAdvanced.apply_controlnet(positive=positive, negative=negative, control_net=control_net,
image=image, strength=strength, start_percent=start_percent, end_percent=end_percent)
@torch.no_grad()
@torch.inference_mode()
def load_model(ckpt_filename, vae_filename=None):
unet, clip, vae, vae_filename, clip_vision = load_checkpoint_guess_config(ckpt_filename, embedding_directory=path_embeddings,
vae_filename_param=vae_filename)
return StableDiffusionModel(unet=unet, clip=clip, vae=vae, clip_vision=clip_vision, filename=ckpt_filename, vae_filename=vae_filename)
@torch.no_grad()
@torch.inference_mode()
def generate_empty_latent(width=1024, height=1024, batch_size=1):
return opEmptyLatentImage.generate(width=width, height=height, batch_size=batch_size)[0]
@torch.no_grad()
@torch.inference_mode()
def decode_vae(vae, latent_image, tiled=False):
if tiled:
return opVAEDecodeTiled.decode(samples=latent_image, vae=vae, tile_size=512)[0]
else:
return opVAEDecode.decode(samples=latent_image, vae=vae)[0]
@torch.no_grad()
@torch.inference_mode()
def encode_vae(vae, pixels, tiled=False):
if tiled:
return opVAEEncodeTiled.encode(pixels=pixels, vae=vae, tile_size=512)[0]
else:
return opVAEEncode.encode(pixels=pixels, vae=vae)[0]
@torch.no_grad()
@torch.inference_mode()
def encode_vae_inpaint(vae, pixels, mask):
assert mask.ndim == 3 and pixels.ndim == 4
assert mask.shape[-1] == pixels.shape[-2]
assert mask.shape[-2] == pixels.shape[-3]
w = mask.round()[..., None]
pixels = pixels * (1 - w) + 0.5 * w
latent = vae.encode(pixels)
B, C, H, W = latent.shape
latent_mask = mask[:, None, :, :]
latent_mask = torch.nn.functional.interpolate(latent_mask, size=(H * 8, W * 8), mode="bilinear").round()
latent_mask = torch.nn.functional.max_pool2d(latent_mask, (8, 8)).round().to(latent)
return latent, latent_mask
class VAEApprox(torch.nn.Module):
def __init__(self):
super(VAEApprox, self).__init__()
self.conv1 = torch.nn.Conv2d(4, 8, (7, 7))
self.conv2 = torch.nn.Conv2d(8, 16, (5, 5))
self.conv3 = torch.nn.Conv2d(16, 32, (3, 3))
self.conv4 = torch.nn.Conv2d(32, 64, (3, 3))
self.conv5 = torch.nn.Conv2d(64, 32, (3, 3))
self.conv6 = torch.nn.Conv2d(32, 16, (3, 3))
self.conv7 = torch.nn.Conv2d(16, 8, (3, 3))
self.conv8 = torch.nn.Conv2d(8, 3, (3, 3))
self.current_type = None
def forward(self, x):
extra = 11
x = torch.nn.functional.interpolate(x, (x.shape[2] * 2, x.shape[3] * 2))
x = torch.nn.functional.pad(x, (extra, extra, extra, extra))
for layer in [self.conv1, self.conv2, self.conv3, self.conv4, self.conv5, self.conv6, self.conv7, self.conv8]:
x = layer(x)
x = torch.nn.functional.leaky_relu(x, 0.1)
return x
VAE_approx_models = {}
@torch.no_grad()
@torch.inference_mode()
def get_previewer(model):
global VAE_approx_models
from modules.config import path_vae_approx
is_sdxl = isinstance(model.model.latent_format, ldm_patched.modules.latent_formats.SDXL)
vae_approx_filename = os.path.join(path_vae_approx, 'xlvaeapp.pth' if is_sdxl else 'vaeapp_sd15.pth')
if vae_approx_filename in VAE_approx_models:
VAE_approx_model = VAE_approx_models[vae_approx_filename]
else:
sd = torch.load(vae_approx_filename, map_location='cpu', weights_only=True)
VAE_approx_model = VAEApprox()
VAE_approx_model.load_state_dict(sd)
del sd
VAE_approx_model.eval()
if ldm_patched.modules.model_management.should_use_fp16():
VAE_approx_model.half()
VAE_approx_model.current_type = torch.float16
else:
VAE_approx_model.float()
VAE_approx_model.current_type = torch.float32
VAE_approx_model.to(ldm_patched.modules.model_management.get_torch_device())
VAE_approx_models[vae_approx_filename] = VAE_approx_model
@torch.no_grad()
@torch.inference_mode()
def preview_function(x0, step, total_steps):
with torch.no_grad():
x_sample = x0.to(VAE_approx_model.current_type)
x_sample = VAE_approx_model(x_sample) * 127.5 + 127.5
x_sample = einops.rearrange(x_sample, 'b c h w -> b h w c')[0]
x_sample = x_sample.cpu().numpy().clip(0, 255).astype(np.uint8)
return x_sample
return preview_function
@torch.no_grad()
@torch.inference_mode()
def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
force_full_denoise=False, callback_function=None, refiner=None, refiner_switch=-1,
previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None, disable_preview=False):
if sigmas is not None:
sigmas = sigmas.clone().to(ldm_patched.modules.model_management.get_torch_device())
latent_image = latent["samples"]
if disable_noise:
noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
else:
batch_inds = latent["batch_index"] if "batch_index" in latent else None
noise = ldm_patched.modules.sample.prepare_noise(latent_image, seed, batch_inds)
if isinstance(noise_mean, torch.Tensor):
noise = noise + noise_mean - torch.mean(noise, dim=1, keepdim=True)
noise_mask = None
if "noise_mask" in latent:
noise_mask = latent["noise_mask"]
previewer = get_previewer(model)
if previewer_start is None:
previewer_start = 0
if previewer_end is None:
previewer_end = steps
def callback(step, x0, x, total_steps):
ldm_patched.modules.model_management.throw_exception_if_processing_interrupted()
y = None
if previewer is not None and not disable_preview:
y = previewer(x0, previewer_start + step, previewer_end)
if callback_function is not None:
callback_function(previewer_start + step, x0, x, previewer_end, y)
disable_pbar = False
modules.sample_hijack.current_refiner = refiner
modules.sample_hijack.refiner_switch_step = refiner_switch
ldm_patched.modules.samplers.sample = modules.sample_hijack.sample_hacked
try:
samples = ldm_patched.modules.sample.sample(model,
noise, steps, cfg, sampler_name, scheduler,
positive, negative, latent_image,
denoise=denoise, disable_noise=disable_noise,
start_step=start_step,
last_step=last_step,
force_full_denoise=force_full_denoise, noise_mask=noise_mask,
callback=callback,
disable_pbar=disable_pbar, seed=seed, sigmas=sigmas)
out = latent.copy()
out["samples"] = samples
finally:
modules.sample_hijack.current_refiner = None
return out
@torch.no_grad()
@torch.inference_mode()
def pytorch_to_numpy(x):
return [np.clip(255. * y.cpu().numpy(), 0, 255).astype(np.uint8) for y in x]
@torch.no_grad()
@torch.inference_mode()
def numpy_to_pytorch(x):
y = x.astype(np.float32) / 255.0
y = y[None]
y = np.ascontiguousarray(y.copy())
y = torch.from_numpy(y).float()
return y
| 192634.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import time
from abc import abstractmethod
from typing import List, Tuple
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import CLIPImageProcessor, CLIPVisionModel
from extensions.multimodal.abstract_pipeline import AbstractMultimodalPipeline
from modules import shared
from modules.logging_colors import logger
from modules.text_generation import encode
def expand2square(pil_img: Image.Image, background_color: Tuple[int]) -> Image.Image:
width, height = pil_img.size
if width == height:
return pil_img
elif width > height:
result = Image.new(pil_img.mode, (width, width), background_color)
result.paste(pil_img, (0, (width - height) // 2))
return result
else:
result = Image.new(pil_img.mode, (height, height), background_color)
result.paste(pil_img, ((height - width) // 2, 0))
return result
class LLaVA_v0_Pipeline(AbstractMultimodalPipeline):
CLIP_REPO = "openai/clip-vit-large-patch14"
def __init__(self, params: dict) -> None:
super().__init__()
self.clip_device = self._get_device("vision_device", params)
self.clip_dtype = self._get_dtype("vision_bits", params)
self.projector_device = self._get_device("projector_device", params)
self.projector_dtype = self._get_dtype("projector_bits", params)
self.image_processor, self.vision_tower, self.mm_projector = self._load_models()
def _load_models(self):
start_ts = time.time()
logger.info(f"LLaVA - Loading CLIP from {self.CLIP_REPO} as {self.clip_dtype} on {self.clip_device}...")
image_processor = CLIPImageProcessor.from_pretrained(self.CLIP_REPO, torch_dtype=self.clip_dtype)
vision_tower = CLIPVisionModel.from_pretrained(self.CLIP_REPO, torch_dtype=self.clip_dtype).to(self.clip_device)
logger.info(f"LLaVA - Loading projector from {self.llava_projector_repo()} as {self.projector_dtype} on {self.projector_device}...")
projector_path = hf_hub_download(self.llava_projector_repo(), self.llava_projector_filename())
mm_projector = self.build_mm_projector()
projector_data = torch.load(projector_path)
projector_data = {k[19:]: v for k, v in projector_data.items() if k.startswith('model.mm_projector.')}
mm_projector.load_state_dict(projector_data)
mm_projector = mm_projector.to(self.projector_device)
logger.info(f"LLaVA supporting models loaded, took {time.time() - start_ts:.2f} seconds")
return image_processor, vision_tower, mm_projector
def build_mm_projector(self) -> torch.nn.Module:
projector_shape = self.llava_projector_shape()
if len(projector_shape) == 2:
return torch.nn.Linear(*projector_shape)
else:
modules = []
modules.append(torch.nn.Linear(projector_shape[0], projector_shape[1]))
for i in range(2, len(projector_shape)):
modules.append(torch.nn.GELU())
modules.append(torch.nn.Linear(projector_shape[i-1], projector_shape[i]))
return torch.nn.Sequential(*modules)
@staticmethod
def image_start() -> str:
return "<im_start>"
@staticmethod
def image_end() -> str:
return "<im_end>"
@staticmethod
def num_image_embeds() -> int:
return 256
@staticmethod
def embed_tokens(input_ids: torch.Tensor) -> torch.Tensor:
for attr in ['', 'model', 'model.model', 'model.model.model']:
tmp = getattr(shared.model, attr, None) if attr != '' else shared.model
if tmp is not None and hasattr(tmp, 'embed_tokens'):
func = tmp.embed_tokens
break
else:
raise ValueError('The embed_tokens method has not been found for this loader.')
return func(input_ids).to(shared.model.device, dtype=shared.model.dtype)
@staticmethod
def placeholder_embeddings() -> torch.Tensor:
return LLaVA_v0_Pipeline.embed_tokens(encode("<im_patch>"*256, add_bos_token=False)[0])
def embed_images(self, images: List[Image.Image]) -> torch.Tensor:
images = self.image_processor(images, return_tensors='pt')['pixel_values']
images = images.to(self.clip_device, dtype=self.clip_dtype)
with torch.no_grad():
image_forward_outs = self.vision_tower(images, output_hidden_states=True)
select_hidden_state_layer = -2
select_hidden_state = image_forward_outs.hidden_states[select_hidden_state_layer]
image_features = select_hidden_state[:, 1:].to(self.projector_device, dtype=self.projector_dtype)
image_features = self.mm_projector(image_features)
return image_features.to(shared.model.device, dtype=shared.model.dtype)
@staticmethod
@abstractmethod
def llava_projector_repo() -> str:
pass
@staticmethod
@abstractmethod
def llava_projector_filename() -> str:
pass
@staticmethod
@abstractmethod
def llava_projector_shape() -> Tuple[int, int]:
pass
class LLaVA_v0_13B_Pipeline(LLaVA_v0_Pipeline):
def __init__(self, params: dict) -> None:
super().__init__(params)
@staticmethod
def name() -> str:
return "llava-13b"
@staticmethod
def placeholder_token_id() -> int:
return 32000
@staticmethod
def llava_projector_shape() -> Tuple[int, int]:
return (1024, 5120)
@staticmethod
def llava_projector_filename() -> str:
return "mm_projector.bin"
@staticmethod
def llava_projector_repo() -> str:
return "liuhaotian/LLaVA-13b-delta-v0"
class LLaVA_v0_7B_Pipeline(LLaVA_v0_Pipeline):
def __init__(self, params: dict) -> None:
super().__init__(params)
@staticmethod
def name() -> str:
return "llava-7b"
@staticmethod
def placeholder_token_id() -> int:
return 32001
@staticmethod
def llava_projector_shape() -> Tuple[int, int]:
return (1024, 4096)
@staticmethod
def llava_projector_filename() -> str:
return "mm_projector.bin"
@staticmethod
def llava_projector_repo() -> str:
return "liuhaotian/LLaVA-7b-delta-v0"
class LLaVA_LLaMA_2_13B_Pipeline(LLaVA_v0_13B_Pipeline):
def __init__(self, params: dict) -> None:
super().__init__(params)
@staticmethod
def name() -> str:
return "llava-llama-2-13b"
@staticmethod
def placeholder_token_id() -> int:
return 0
@staticmethod
def llava_projector_repo() -> str:
return "liuhaotian/llava-llama-2-13b-chat-lightning-preview"
@staticmethod
def image_start() -> str:
return ""
@staticmethod
def image_end() -> str:
return ""
@staticmethod
def placeholder_embeddings() -> torch.Tensor:
return LLaVA_v0_Pipeline.embed_tokens(encode("<unk>"*256, add_bos_token=False)[0])
class LLaVA_v1_5_13B_Pipeline(LLaVA_v0_13B_Pipeline):
CLIP_REPO = "openai/clip-vit-large-patch14-336"
def __init__(self, params: dict) -> None:
super().__init__(params)
@staticmethod
def name() -> str:
return "llava-v1.5-13b"
@staticmethod
def llava_projector_shape() -> Tuple[int, int]:
return (1024, 5120, 5120)
@staticmethod
def placeholder_token_id() -> int:
return 0
@staticmethod
def llava_projector_repo() -> str:
return "liuhaotian/llava-v1.5-13b"
@staticmethod
def image_start() -> str:
return ""
@staticmethod
def image_end() -> str:
return ""
@staticmethod
def num_image_embeds() -> int:
return 576
def embed_images(self, images: List[Image.Image]) -> torch.Tensor:
# pad it to square first
images = [
expand2square(image, tuple(int(x*255) for x in self.image_processor.image_mean))
for image in images
]
return super().embed_images(images)
@staticmethod
def placeholder_embeddings() -> torch.Tensor:
return LLaVA_v0_Pipeline.embed_tokens(encode("<unk>"*576, add_bos_token=False)[0])
class LLaVA_v1_5_7B_Pipeline(LLaVA_v1_5_13B_Pipeline):
@staticmethod
def name() -> str:
return "llava-v1.5-7b"
@staticmethod
def llava_projector_shape() -> Tuple[int, int]:
return (1024, 4096, 4096)
@staticmethod
def llava_projector_repo() -> str:
return "liuhaotian/llava-v1.5-7b" | 490297.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import asyncio
import json
import logging
import os
import traceback
from collections import deque
from threading import Thread
import speech_recognition as sr
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse
from pydub import AudioSegment
from sse_starlette import EventSourceResponse
import extensions.openai.completions as OAIcompletions
import extensions.openai.embeddings as OAIembeddings
import extensions.openai.images as OAIimages
import extensions.openai.logits as OAIlogits
import extensions.openai.models as OAImodels
import extensions.openai.moderations as OAImoderations
from extensions.openai.errors import ServiceUnavailableError
from extensions.openai.tokens import token_count, token_decode, token_encode
from extensions.openai.utils import _start_cloudflared
from modules import shared
from modules.logging_colors import logger
from modules.models import unload_model
from modules.text_generation import stop_everything_event
from .typing import (
ChatCompletionRequest,
ChatCompletionResponse,
ChatPromptResponse,
CompletionRequest,
CompletionResponse,
DecodeRequest,
DecodeResponse,
EmbeddingsRequest,
EmbeddingsResponse,
EncodeRequest,
EncodeResponse,
LoadLorasRequest,
LoadModelRequest,
LogitsRequest,
LogitsResponse,
LoraListResponse,
ModelInfoResponse,
ModelListResponse,
TokenCountResponse,
to_dict
)
params = {
'embedding_device': 'cpu',
'embedding_model': 'sentence-transformers/all-mpnet-base-v2',
'sd_webui_url': '',
'debug': 0
}
streaming_semaphore = asyncio.Semaphore(1)
def verify_api_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.api_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
def verify_admin_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.admin_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
app = FastAPI()
check_key = [Depends(verify_api_key)]
check_admin_key = [Depends(verify_admin_key)]
# Configure CORS settings to allow all origins, methods, and headers
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.options("/", dependencies=check_key)
async def options_route():
return JSONResponse(content="OK")
@app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key)
async def openai_completions(request: Request, request_data: CompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
async def generator():
async with streaming_semaphore:
response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy)
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator()) # SSE streaming
else:
response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy)
return JSONResponse(response)
@app.post('/v1/chat/completions', response_model=ChatCompletionResponse, dependencies=check_key)
async def openai_chat_completions(request: Request, request_data: ChatCompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
async def generator():
async with streaming_semaphore:
response = OAIcompletions.stream_chat_completions(to_dict(request_data), is_legacy=is_legacy)
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator()) # SSE streaming
else:
response = OAIcompletions.chat_completions(to_dict(request_data), is_legacy=is_legacy)
return JSONResponse(response)
@app.get("/v1/models", dependencies=check_key)
@app.get("/v1/models/{model}", dependencies=check_key)
async def handle_models(request: Request):
path = request.url.path
is_list = request.url.path.split('?')[0].split('#')[0] == '/v1/models'
if is_list:
response = OAImodels.list_dummy_models()
else:
model_name = path[len('/v1/models/'):]
response = OAImodels.model_info_dict(model_name)
return JSONResponse(response)
@app.get('/v1/billing/usage', dependencies=check_key)
def handle_billing_usage():
'''
Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31
'''
return JSONResponse(content={"total_usage": 0})
@app.post('/v1/audio/transcriptions', dependencies=check_key)
async def handle_audio_transcription(request: Request):
r = sr.Recognizer()
form = await request.form()
audio_file = await form["file"].read()
audio_data = AudioSegment.from_file(audio_file)
# Convert AudioSegment to raw data
raw_data = audio_data.raw_data
# Create AudioData object
audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width)
whisper_language = form.getvalue('language', None)
whisper_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny
transcription = {"text": ""}
try:
transcription["text"] = r.recognize_whisper(audio_data, language=whisper_language, model=whisper_model)
except sr.UnknownValueError:
print("Whisper could not understand audio")
transcription["text"] = "Whisper could not understand audio UnknownValueError"
except sr.RequestError as e:
print("Could not request results from Whisper", e)
transcription["text"] = "Whisper could not understand audio RequestError"
return JSONResponse(content=transcription)
@app.post('/v1/images/generations', dependencies=check_key)
async def handle_image_generation(request: Request):
if not os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', '')):
raise ServiceUnavailableError("Stable Diffusion not available. SD_WEBUI_URL not set.")
body = await request.json()
prompt = body['prompt']
size = body.get('size', '1024x1024')
response_format = body.get('response_format', 'url') # or b64_json
n = body.get('n', 1) # ignore the batch limits of max 10
response = await OAIimages.generations(prompt=prompt, size=size, response_format=response_format, n=n)
return JSONResponse(response)
@app.post("/v1/embeddings", response_model=EmbeddingsResponse, dependencies=check_key)
async def handle_embeddings(request: Request, request_data: EmbeddingsRequest):
input = request_data.input
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
if type(input) is str:
input = [input]
response = OAIembeddings.embeddings(input, request_data.encoding_format)
return JSONResponse(response)
@app.post("/v1/moderations", dependencies=check_key)
async def handle_moderations(request: Request):
body = await request.json()
input = body["input"]
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
response = OAImoderations.moderations(input)
return JSONResponse(response)
@app.post("/v1/internal/encode", response_model=EncodeResponse, dependencies=check_key)
async def handle_token_encode(request_data: EncodeRequest):
response = token_encode(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/decode", response_model=DecodeResponse, dependencies=check_key)
async def handle_token_decode(request_data: DecodeRequest):
response = token_decode(request_data.tokens)
return JSONResponse(response)
@app.post("/v1/internal/token-count", response_model=TokenCountResponse, dependencies=check_key)
async def handle_token_count(request_data: EncodeRequest):
response = token_count(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/logits", response_model=LogitsResponse, dependencies=check_key)
async def handle_logits(request_data: LogitsRequest):
'''
Given a prompt, returns the top 50 most likely logits as a dict.
The keys are the tokens, and the values are the probabilities.
'''
response = OAIlogits._get_next_logits(to_dict(request_data))
return JSONResponse(response)
@app.post('/v1/internal/chat-prompt', response_model=ChatPromptResponse, dependencies=check_key)
async def handle_chat_prompt(request: Request, request_data: ChatCompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
generator = OAIcompletions.chat_completions_common(to_dict(request_data), is_legacy=is_legacy, prompt_only=True)
response = deque(generator, maxlen=1).pop()
return JSONResponse(response)
@app.post("/v1/internal/stop-generation", dependencies=check_key)
async def handle_stop_generation(request: Request):
stop_everything_event()
return JSONResponse(content="OK")
@app.get("/v1/internal/model/info", response_model=ModelInfoResponse, dependencies=check_key)
async def handle_model_info():
payload = OAImodels.get_current_model_info()
return JSONResponse(content=payload)
@app.get("/v1/internal/model/list", response_model=ModelListResponse, dependencies=check_admin_key)
async def handle_list_models():
payload = OAImodels.list_models()
return JSONResponse(content=payload)
@app.post("/v1/internal/model/load", dependencies=check_admin_key)
async def handle_load_model(request_data: LoadModelRequest):
'''
This endpoint is experimental and may change in the future.
The "args" parameter can be used to modify flags like "--load-in-4bit"
or "--n-gpu-layers" before loading a model. Example:
```
"args": {
"load_in_4bit": true,
"n_gpu_layers": 12
}
```
Note that those settings will remain after loading the model. So you
may need to change them back to load a second model.
The "settings" parameter is also a dict but with keys for the
shared.settings object. It can be used to modify the default instruction
template like this:
```
"settings": {
"instruction_template": "Alpaca"
}
```
'''
try:
OAImodels._load_model(to_dict(request_data))
return JSONResponse(content="OK")
except:
traceback.print_exc()
return HTTPException(status_code=400, detail="Failed to load the model.")
@app.post("/v1/internal/model/unload", dependencies=check_admin_key)
async def handle_unload_model():
unload_model()
@app.get("/v1/internal/lora/list", response_model=LoraListResponse, dependencies=check_admin_key)
async def handle_list_loras():
response = OAImodels.list_loras()
return JSONResponse(content=response)
@app.post("/v1/internal/lora/load", dependencies=check_admin_key)
async def handle_load_loras(request_data: LoadLorasRequest):
try:
OAImodels.load_loras(request_data.lora_names)
return JSONResponse(content="OK")
except:
traceback.print_exc()
return HTTPException(status_code=400, detail="Failed to apply the LoRA(s).")
@app.post("/v1/internal/lora/unload", dependencies=check_admin_key)
async def handle_unload_loras():
OAImodels.unload_all_loras()
return JSONResponse(content="OK")
def run_server():
server_addr = '0.0.0.0' if shared.args.listen else '127.0.0.1'
port = int(os.environ.get('OPENEDAI_PORT', shared.args.api_port))
ssl_certfile = os.environ.get('OPENEDAI_CERT_PATH', shared.args.ssl_certfile)
ssl_keyfile = os.environ.get('OPENEDAI_KEY_PATH', shared.args.ssl_keyfile)
if shared.args.public_api:
def on_start(public_url: str):
logger.info(f'OpenAI-compatible API URL:\n\n{public_url}\n')
_start_cloudflared(port, shared.args.public_api_id, max_attempts=3, on_start=on_start)
else:
if ssl_keyfile and ssl_certfile:
logger.info(f'OpenAI-compatible API URL:\n\nhttps://{server_addr}:{port}\n')
else:
logger.info(f'OpenAI-compatible API URL:\n\nhttp://{server_addr}:{port}\n')
if shared.args.api_key:
if not shared.args.admin_key:
shared.args.admin_key = shared.args.api_key
logger.info(f'OpenAI API key:\n\n{shared.args.api_key}\n')
if shared.args.admin_key and shared.args.admin_key != shared.args.api_key:
logger.info(f'OpenAI API admin key (for loading/unloading models):\n\n{shared.args.admin_key}\n')
logging.getLogger("uvicorn.error").propagate = False
uvicorn.run(app, host=server_addr, port=port, ssl_certfile=ssl_certfile, ssl_keyfile=ssl_keyfile)
def setup():
if shared.args.nowebui:
run_server()
else:
Thread(target=run_server, daemon=True).start()
| 204197.py | [
"CWE-942: Permissive Cross-domain Policy with Untrusted Domains"
] |
import importlib
import traceback
from functools import partial
from inspect import signature
import gradio as gr
import extensions
import modules.shared as shared
from modules.logging_colors import logger
state = {}
available_extensions = []
setup_called = set()
def apply_settings(extension, name):
if not hasattr(extension, 'params'):
return
for param in extension.params:
_id = f"{name}-{param}"
shared.default_settings[_id] = extension.params[param]
if _id in shared.settings:
extension.params[param] = shared.settings[_id]
def load_extensions():
global state, setup_called
state = {}
for i, name in enumerate(shared.args.extensions):
if name in available_extensions:
if name != 'api':
logger.info(f'Loading the extension "{name}"')
try:
try:
extension = importlib.import_module(f"extensions.{name}.script")
except ModuleNotFoundError:
logger.error(f"Could not import the requirements for '{name}'. Make sure to install the requirements for the extension.\n\n* To install requirements for all available extensions, launch the\n update_wizard script for your OS and choose the B option.\n\n* To install the requirements for this extension alone, launch the\n cmd script for your OS and paste the following command in the\n terminal window that appears:\n\nLinux / Mac:\n\npip install -r extensions/{name}/requirements.txt --upgrade\n\nWindows:\n\npip install -r extensions\\{name}\\requirements.txt --upgrade\n")
raise
# Only run setup() and apply settings from settings.yaml once
if extension not in setup_called:
apply_settings(extension, name)
if hasattr(extension, "setup"):
extension.setup()
setup_called.add(extension)
state[name] = [True, i]
except:
logger.error(f'Failed to load the extension "{name}".')
traceback.print_exc()
# This iterator returns the extensions in the order specified in the command-line
def iterator():
for name in sorted(state, key=lambda x: state[x][1]):
if state[name][0]:
yield getattr(extensions, name).script, name
# Extension functions that map string -> string
def _apply_string_extensions(function_name, text, state, is_chat=False):
for extension, _ in iterator():
if hasattr(extension, function_name):
func = getattr(extension, function_name)
# Handle old extensions without the 'state' arg or
# the 'is_chat' kwarg
count = 0
has_chat = False
for k in signature(func).parameters:
if k == 'is_chat':
has_chat = True
else:
count += 1
if count == 2:
args = [text, state]
else:
args = [text]
if has_chat:
kwargs = {'is_chat': is_chat}
else:
kwargs = {}
text = func(*args, **kwargs)
return text
# Extension functions that map string -> string
def _apply_chat_input_extensions(text, visible_text, state):
for extension, _ in iterator():
if hasattr(extension, 'chat_input_modifier'):
text, visible_text = extension.chat_input_modifier(text, visible_text, state)
return text, visible_text
# custom_generate_chat_prompt handling - currently only the first one will work
def _apply_custom_generate_chat_prompt(text, state, **kwargs):
for extension, _ in iterator():
if hasattr(extension, 'custom_generate_chat_prompt'):
return extension.custom_generate_chat_prompt(text, state, **kwargs)
return None
# Extension that modifies the input parameters before they are used
def _apply_state_modifier_extensions(state):
for extension, _ in iterator():
if hasattr(extension, "state_modifier"):
state = getattr(extension, "state_modifier")(state)
return state
# Extension that modifies the chat history before it is used
def _apply_history_modifier_extensions(history):
for extension, _ in iterator():
if hasattr(extension, "history_modifier"):
history = getattr(extension, "history_modifier")(history)
return history
# Extension functions that override the default tokenizer output - The order of execution is not defined
def _apply_tokenizer_extensions(function_name, state, prompt, input_ids, input_embeds):
for extension, _ in iterator():
if hasattr(extension, function_name):
prompt, input_ids, input_embeds = getattr(extension, function_name)(state, prompt, input_ids, input_embeds)
return prompt, input_ids, input_embeds
# Allow extensions to add their own logits processors to the stack being run.
# Each extension would call `processor_list.append({their LogitsProcessor}())`.
def _apply_logits_processor_extensions(function_name, processor_list, input_ids):
for extension, _ in iterator():
if hasattr(extension, function_name):
result = getattr(extension, function_name)(processor_list, input_ids)
if type(result) is list:
processor_list = result
return processor_list
# Get prompt length in tokens after applying extension functions which override the default tokenizer output
# currently only the first one will work
def _apply_custom_tokenized_length(prompt):
for extension, _ in iterator():
if hasattr(extension, 'custom_tokenized_length'):
return getattr(extension, 'custom_tokenized_length')(prompt)
return None
# Custom generate reply handling - currently only the first one will work
def _apply_custom_generate_reply():
for extension, _ in iterator():
if hasattr(extension, 'custom_generate_reply'):
return getattr(extension, 'custom_generate_reply')
return None
def _apply_custom_css():
all_css = ''
for extension, _ in iterator():
if hasattr(extension, 'custom_css'):
all_css += getattr(extension, 'custom_css')()
return all_css
def _apply_custom_js():
all_js = ''
for extension, _ in iterator():
if hasattr(extension, 'custom_js'):
all_js += getattr(extension, 'custom_js')()
return all_js
def create_extensions_block():
to_display = []
for extension, name in iterator():
if hasattr(extension, "ui") and not (hasattr(extension, 'params') and extension.params.get('is_tab', False)):
to_display.append((extension, name))
# Creating the extension ui elements
if len(to_display) > 0:
with gr.Column(elem_id="extensions"):
for row in to_display:
extension, _ = row
extension.ui()
def create_extensions_tabs():
for extension, name in iterator():
if hasattr(extension, "ui") and (hasattr(extension, 'params') and extension.params.get('is_tab', False)):
display_name = getattr(extension, 'params', {}).get('display_name', name)
with gr.Tab(display_name, elem_classes="extension-tab"):
extension.ui()
EXTENSION_MAP = {
"input": partial(_apply_string_extensions, "input_modifier"),
"output": partial(_apply_string_extensions, "output_modifier"),
"chat_input": _apply_chat_input_extensions,
"state": _apply_state_modifier_extensions,
"history": _apply_history_modifier_extensions,
"bot_prefix": partial(_apply_string_extensions, "bot_prefix_modifier"),
"tokenizer": partial(_apply_tokenizer_extensions, "tokenizer_modifier"),
'logits_processor': partial(_apply_logits_processor_extensions, 'logits_processor_modifier'),
"custom_generate_chat_prompt": _apply_custom_generate_chat_prompt,
"custom_generate_reply": _apply_custom_generate_reply,
"tokenized_length": _apply_custom_tokenized_length,
"css": _apply_custom_css,
"js": _apply_custom_js
}
def apply_extensions(typ, *args, **kwargs):
if typ not in EXTENSION_MAP:
raise ValueError(f"Invalid extension type {typ}")
return EXTENSION_MAP[typ](*args, **kwargs)
| 813380.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
import argparse
import os
from threading import Lock
from typing import Generator, List, Optional
import torch
import uvicorn
from coati.models import generate_streaming
from coati.quant import llama_load_quant, low_resource_init
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from slowapi import Limiter, _rate_limit_exceeded_handler
from slowapi.errors import RateLimitExceeded
from slowapi.util import get_remote_address
from sse_starlette.sse import EventSourceResponse
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
from utils import ChatPromptProcessor, Dialogue, LockedIterator, load_json, update_model_kwargs_fn
MAX_LEN = 512
running_lock = Lock()
class GenerationTaskReq(BaseModel):
max_new_tokens: int = Field(gt=0, le=512, example=64)
history: List[Dialogue] = Field(min_items=1)
top_k: Optional[int] = Field(default=None, gt=0, example=50)
top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5)
temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7)
repetition_penalty: Optional[float] = Field(default=None, gt=1.0, example=1.2)
limiter = Limiter(key_func=get_remote_address)
app = FastAPI()
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
# set CORS
origin_spec_from_env = os.environ.get("CORS_ORIGIN", None)
if origin_spec_from_env is not None:
# allow CORS from the specified origins
origins = os.environ["CORS_ORIGIN"].split(",")
else:
# allow CORS from all origins
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def generate_streamingly(prompt, max_length, max_new_tokens, top_k, top_p, temperature):
input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"]
# TODO(ver217): streaming generation does not support repetition_penalty now
model_kwargs = {
"max_new_tokens": max_new_tokens,
"early_stopping": True,
"top_k": top_k,
"top_p": top_p,
"temperature": temperature,
"prepare_inputs_fn": None,
"update_model_kwargs_fn": update_model_kwargs_fn,
}
is_first_word = True
generator = LockedIterator(
generate_streaming(model, input_ids, tokenizer, max_length, **model_kwargs), running_lock
)
for output in generator:
output = output.cpu()
tokens = tokenizer.convert_ids_to_tokens(output, skip_special_tokens=True)
current_sub_tokens = []
for token in tokens:
if token in tokenizer.all_special_tokens:
continue
current_sub_tokens.append(token)
if current_sub_tokens:
out_string = tokenizer.sp_model.decode(current_sub_tokens)
if is_first_word:
out_string = out_string.lstrip()
is_first_word = False
elif current_sub_tokens[0].startswith("▁"):
# whitespace will be ignored by the frontend
out_string = " " + out_string
yield out_string
async def event_generator(request: Request, generator: Generator):
while True:
if await request.is_disconnected():
break
try:
yield {"event": "generate", "data": next(generator)}
except StopIteration:
yield {"event": "end", "data": ""}
break
@app.post("/generate/stream")
@limiter.limit("1/second")
def generate(data: GenerationTaskReq, request: Request):
prompt = prompt_processor.preprocess_prompt(data.history)
event_source = event_generator(
request,
generate_streamingly(prompt, data.max_length, data.max_new_tokens, data.top_k, data.top_p, data.temperature),
)
return EventSourceResponse(event_source)
@app.post("/generate")
@limiter.limit("1/second")
def generate_no_stream(data: GenerationTaskReq, request: Request):
prompt = prompt_processor.preprocess_prompt(data.history, data.max_new_tokens)
if prompt_processor.has_censored_words(prompt):
return prompt_processor.SAFE_RESPONSE
inputs = {k: v.cuda() for k, v in tokenizer(prompt, return_tensors="pt").items()}
with running_lock:
output = model.generate(**inputs, **data.dict(exclude={"history"}))
output = output.cpu()
prompt_len = inputs["input_ids"].size(1)
response = output[0, prompt_len:]
out_string = tokenizer.decode(response, skip_special_tokens=True)
out_string = prompt_processor.postprocess_output(out_string)
if prompt_processor.has_censored_words(out_string):
return prompt_processor.SAFE_RESPONSE
return out_string
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"pretrained",
help="Path to pretrained model. Can be a local path or a model name from the HuggingFace model hub.",
)
parser.add_argument(
"--tokenizer_path",
help="Path to pretrained tokenizer. Can be a local path or a model name from the HuggingFace model hub.",
default=None,
)
parser.add_argument(
"--quant",
choices=["8bit", "4bit"],
default=None,
help="Quantization mode. Default: None (no quantization, fp16).",
)
parser.add_argument(
"--gptq_checkpoint",
default=None,
help="Path to GPTQ checkpoint. This is only useful when quantization mode is 4bit. Default: None.",
)
parser.add_argument(
"--gptq_group_size",
type=int,
default=128,
help="Group size for GPTQ. This is only useful when quantization mode is 4bit. Default: 128.",
)
parser.add_argument("--http_host", default="0.0.0.0")
parser.add_argument("--http_port", type=int, default=7070)
parser.add_argument(
"--profanity_file",
default=None,
help="Path to profanity words list. It should be a JSON file containing a list of words.",
)
args = parser.parse_args()
if args.quant == "4bit":
assert args.gptq_checkpoint is not None, "Please specify a GPTQ checkpoint."
if args.tokenizer_path is None:
args.tokenizer_path = args.pretrained
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_path, local_files_only=True)
if args.profanity_file is not None:
censored_words = load_json(args.profanity_file)
else:
censored_words = []
prompt_processor = ChatPromptProcessor(censored_words=censored_words)
if args.quant == "4bit":
with low_resource_init():
config = AutoConfig.from_pretrained(args.pretrained)
model = AutoModelForCausalLM(config)
model = llama_load_quant(model, args.gptq_checkpoint, 4, args.gptq_group_size)
model.cuda()
else:
model = AutoModelForCausalLM.from_pretrained(
args.pretrained,
load_in_8bit=(args.quant == "8bit"),
torch_dtype=torch.float16,
device_map="auto",
local_files_only=True,
)
if args.quant != "8bit":
model.half() # seems to fix bugs for some users.
model.eval()
config = uvicorn.Config(app, host=args.http_host, port=args.http_port)
server = uvicorn.Server(config=config)
server.run()
"""
python server.py /home/lcyab/data/models/experiments5/checkpoint/experiment5-2023-10-20-21-53-51/modeling/ --tokenizer_path /mnt/vepfs/lcxyc/leaderboard_models/Colossal-LLaMA-2-7b-base/
"""
| 041528.py | [
"CWE-942: Permissive Cross-domain Policy with Untrusted Domains"
] |
"""
API and LLM warpper class for running LLMs locally
Usage:
import os
model_path = os.environ.get("ZH_MODEL_PATH")
model_name = "chatglm2"
colossal_api = ColossalAPI(model_name, model_path)
llm = ColossalLLM(n=1, api=colossal_api)
TEST_PROMPT_CHATGLM="续写文章:惊蛰一过,春寒加剧。先是料料峭峭,继而雨季开始,"
logger.info(llm(TEST_PROMPT_CHATGLM, max_new_tokens=100), verbose=True)
"""
from typing import Any, List, Mapping, Optional
import torch
from colossalqa.local.utils import get_response, post_http_request
from colossalqa.mylogging import get_logger
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from transformers import AutoModelForCausalLM, AutoTokenizer
logger = get_logger()
class ColossalAPI:
"""
API for calling LLM.generate
"""
__instances = dict()
def __init__(self, model_type: str, model_path: str, ckpt_path: str = None) -> None:
"""
Configure model
"""
if model_type + model_path + (ckpt_path or "") in ColossalAPI.__instances:
return
else:
ColossalAPI.__instances[model_type + model_path + (ckpt_path or "")] = self
self.model_type = model_type
self.model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, trust_remote_code=True)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path)
self.model.load_state_dict(state_dict)
self.model.to(torch.cuda.current_device())
# Configure tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model.eval()
@staticmethod
def get_api(model_type: str, model_path: str, ckpt_path: str = None):
if model_type + model_path + (ckpt_path or "") in ColossalAPI.__instances:
return ColossalAPI.__instances[model_type + model_path + (ckpt_path or "")]
else:
return ColossalAPI(model_type, model_path, ckpt_path)
def generate(self, input: str, **kwargs) -> str:
"""
Generate response given the prompt
Args:
input: input string
**kwargs: language model keyword type arguments, such as top_k, top_p, temperature, max_new_tokens...
Returns:
output: output string
"""
if self.model_type in ["chatglm", "chatglm2"]:
inputs = {
k: v.to(torch.cuda.current_device()) for k, v in self.tokenizer(input, return_tensors="pt").items()
}
else:
inputs = {
"input_ids": self.tokenizer(input, return_tensors="pt")["input_ids"].to(torch.cuda.current_device())
}
output = self.model.generate(**inputs, **kwargs)
output = output.cpu()
prompt_len = inputs["input_ids"].size(1)
response = output[0, prompt_len:]
output = self.tokenizer.decode(response, skip_special_tokens=True)
return output
class VllmAPI:
def __init__(self, host: str = "localhost", port: int = 8077) -> None:
# Configure api for model served through web
self.host = host
self.port = port
self.url = f"http://{self.host}:{self.port}/generate"
def generate(self, input: str, **kwargs):
output = get_response(post_http_request(input, self.url, **kwargs))[0]
return output[len(input) :]
class ColossalLLM(LLM):
"""
Langchain LLM wrapper for a local LLM
"""
n: int
api: Any
kwargs = {"max_new_tokens": 100}
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
logger.info(f"kwargs:{kwargs}\nstop:{stop}\nprompt:{prompt}", verbose=self.verbose)
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
generate_args = {k: kwargs[k] for k in kwargs if k not in ["stop", "n"]}
out = self.api.generate(prompt, **generate_args)
if isinstance(stop, list) and len(stop) != 0:
for stopping_words in stop:
if stopping_words in out:
out = out.split(stopping_words)[0]
logger.info(f"{prompt}{out}", verbose=self.verbose)
return out
@property
def _identifying_params(self) -> Mapping[str, int]:
"""Get the identifying parameters."""
return {"n": self.n}
def get_token_ids(self, text: str) -> List[int]:
"""Return the ordered ids of the tokens in a text.
Args:
text: The string input to tokenize.
Returns:
A list of ids corresponding to the tokens in the text, in order they occur
in the text.
"""
# use the colossal llm's tokenizer instead of langchain's cached GPT2 tokenizer
return self.api.tokenizer.encode(text)
class VllmLLM(LLM):
"""
Langchain LLM wrapper for a local LLM
"""
n: int
api: Any
kwargs = {"max_new_tokens": 100}
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
for k in self.kwargs:
if k not in kwargs:
kwargs[k] = self.kwargs[k]
logger.info(f"kwargs:{kwargs}\nstop:{stop}\nprompt:{prompt}", verbose=self.verbose)
generate_args = {k: kwargs[k] for k in kwargs if k in ["n", "max_tokens", "temperature", "stream"]}
out = self.api.generate(prompt, **generate_args)
if len(stop) != 0:
for stopping_words in stop:
if stopping_words in out:
out = out.split(stopping_words)[0]
logger.info(f"{prompt}{out}", verbose=self.verbose)
return out
def set_host_port(self, host: str = "localhost", port: int = 8077, **kwargs) -> None:
if "max_tokens" not in kwargs:
kwargs["max_tokens"] = 100
self.kwargs = kwargs
self.api = VllmAPI(host=host, port=port)
@property
def _identifying_params(self) -> Mapping[str, int]:
"""Get the identifying parameters."""
return {"n": self.n}
| 693973.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import linecache
import os
import sys
import traceback
import warnings
from pathlib import Path
from typing import Any, Dict, Optional, Union
import torch
import torch.fx
import torch.nn as nn
from torch.fx.graph import PythonCode
try:
from torch.fx.graph import _PyTreeCodeGen
SUPPORT_PT_CODEGEN = True
except ImportError:
SUPPORT_PT_CODEGEN = False
from torch.fx.graph_module import _exec_with_source, _forward_from_src
from torch.nn.modules.module import _addindent
# This is a copy of torch.fx.graph_module._WrappedCall.
# It should be removed when we stop supporting torch < 1.12.0.
class _WrappedCall:
def __init__(self, cls, cls_call):
self.cls = cls
self.cls_call = cls_call
# Previously, if an error occurred when valid
# symbolically-traced code was run with an invalid input, the
# user would see the source of the error as coming from
# `File "<eval_with_key_N">`, where N is some number. We use
# this function to generate a more informative error message. We
# return the traceback itself, a message explaining that the
# error occurred in a traced Module's generated forward
# function, and five lines of context surrounding the faulty
# line
@staticmethod
def _generate_error_message(frame_summary: traceback.FrameSummary) -> str:
# auxiliary variables (for readability)
err_lineno = frame_summary.lineno
assert err_lineno is not None
line = frame_summary.line
assert line is not None
err_line_len = len(line)
all_src_lines = linecache.getlines(frame_summary.filename)
# constituent substrings of the error message
tb_repr = traceback.format_exc()
custom_msg = (
"Call using an FX-traced Module, "
f"line {err_lineno} of the traced Module's "
"generated forward function:"
)
before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno])
marker = "~" * err_line_len + "~~~ <--- HERE"
err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2])
# joined message
return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err])
def __call__(self, obj, *args, **kwargs):
try:
if self.cls_call is not None:
return self.cls_call(obj, *args, **kwargs)
else:
return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
except Exception as e:
assert e.__traceback__
topmost_framesummary: traceback.FrameSummary = traceback.StackSummary.extract(
traceback.walk_tb(e.__traceback__)
)[
-1
] # type: ignore[arg-type]
if "eval_with_key" in topmost_framesummary.filename:
print(_WrappedCall._generate_error_message(topmost_framesummary), file=sys.stderr)
raise e.with_traceback(None)
else:
raise e
class ColoGraphModule(torch.fx.GraphModule):
"""
ColoGraphGraphModule is an nn.Module generated from an fx.Graph.
ColoGraphmodule has a ``graph`` attribute, as well as ``code`` and ``forward``
attributes generated from that ``graph``.
The difference between ``ColoGraphModule`` and ``torch.fx.GraphModule`` is that
``ColoGraphModule`` has a ``bind()`` function to bind customized functions
(i.e. activation checkpoint) to ``code`` of ``nn.Module``. If you want to use
specific features in Colossal-AI that are not supported by ``torch.fx.GraphModule``,
you can use ``ColoGraphModule`` instead.
``colossalai.fx.symbolic_trace()`` will return a ``ColoGraphModule`` as default.
.. warning::
When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically
regenerated. However, if you edit the contents of the ``graph`` without reassigning
the ``graph`` attribute itself, you must call ``recompile()`` to update the generated
code.
"""
def __init__(
self, root: Union[torch.nn.Module, Dict[str, Any]], graph: torch.fx.Graph, class_name: str = "GraphModule"
):
super().__init__(root, graph, class_name)
def bind(self, ckpt_def, globals):
"""Bind function needed for correctly execute ``GraphModule.forward()``
We need to bind checkpoint functions to ``ColoGraphModule`` so that we could
correctly execute ``GraphModule.forward()``
Args:
ckpt_def (List[str]): definition before the forward function
globals (Dict[str, Any]): global variables
"""
ckpt_code = "\n".join(ckpt_def)
globals_copy = globals.copy()
_exec_with_source(ckpt_code, globals_copy)
func_list = [func for func in globals_copy.keys() if "checkpoint" in func or "pack" in func]
for func in func_list:
tmp_func = globals_copy[func]
setattr(self, func, tmp_func.__get__(self, self.__class__))
del globals_copy[func]
def recompile(self) -> PythonCode:
"""
Recompile this GraphModule from its ``graph`` attribute. This should be
called after editing the contained ``graph``, otherwise the generated
code of this ``GraphModule`` will be out of date.
"""
if SUPPORT_PT_CODEGEN and isinstance(self._graph._codegen, _PyTreeCodeGen):
self._in_spec = self._graph._codegen.pytree_info.in_spec
self._out_spec = self._graph._codegen.pytree_info.out_spec
python_code = self._graph.python_code(root_module="self")
self._code = python_code.src
# To split ckpt functions code and forward code
_code_list = self._code.split("\n")
_fwd_def = [item for item in _code_list if "def forward" in item][0]
_fwd_idx = _code_list.index(_fwd_def)
ckpt_def = _code_list[:_fwd_idx]
self._code = "\n".join(_code_list[_fwd_idx:])
self.bind(ckpt_def, python_code.globals)
cls = type(self)
cls.forward = _forward_from_src(self._code, python_code.globals)
# Determine whether this class explicitly defines a __call__ implementation
# to wrap. If it does, save it in order to have wrapped_call invoke it.
# If it does not, wrapped_call can use a dynamic call to super() instead.
# In most cases, super().__call__ should be torch.nn.Module.__call__.
# We do not want to hold a reference to Module.__call__ here; doing so will
# bypass patching of torch.nn.Module.__call__ done while symbolic tracing.
cls_call = cls.__call__ if "__call__" in vars(cls) else None
if "_wrapped_call" not in vars(cls):
cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined]
def call_wrapped(self, *args, **kwargs):
return self._wrapped_call(self, *args, **kwargs)
cls.__call__ = call_wrapped
# reset self._code to original src, otherwise to_folder will be wrong
self._code = python_code.src
return python_code
def to_folder(self, folder: Union[str, os.PathLike], module_name: str = "FxModule"):
"""Dumps out module to ``folder`` with ``module_name`` so that it can be
imported with ``from <folder> import <module_name>``
Args:
folder (Union[str, os.PathLike]): The folder to write the code out to
module_name (str): Top-level name to use for the ``Module`` while
writing out the code
"""
folder = Path(folder)
Path(folder).mkdir(exist_ok=True)
torch.save(self.state_dict(), folder / "state_dict.pt")
tab = " " * 4
# we add import colossalai here
model_str = f"""
import torch
from torch.nn import *
import colossalai
class {module_name}(torch.nn.Module):
def __init__(self):
super().__init__()
"""
def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]:
safe_reprs = [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
if type(module) in safe_reprs:
return f"{module.__repr__()}"
else:
return None
blobified_modules = []
for module_name, module in self.named_children():
module_str = _gen_model_repr(module_name, module)
if module_str is None:
module_file = folder / f"{module_name}.pt"
torch.save(module, module_file)
blobified_modules.append(module_name)
module_repr = module.__repr__().replace("\r", " ").replace("\n", " ")
module_str = f"torch.load(r'{module_file}') # {module_repr}"
model_str += f"{tab*2}self.{module_name} = {module_str}\n"
for buffer_name, buffer in self._buffers.items():
if buffer is None:
continue
model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n"
for param_name, param in self._parameters.items():
if param is None:
continue
model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n"
model_str += f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n"
model_str += f"{_addindent(self.code, 4)}\n"
module_file = folder / "module.py"
module_file.write_text(model_str)
init_file = folder / "__init__.py"
init_file.write_text("from .module import *")
if len(blobified_modules) > 0:
warnings.warn(
"Was not able to save the following children modules as reprs -"
f"saved as pickled files instead: {blobified_modules}"
)
| 514256.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
from __future__ import annotations
import logging
from collections.abc import Iterable
from typing import NotRequired, TypedDict
from django.db.models.query import QuerySet
from django.http import Http404, HttpResponse, StreamingHttpResponse
from rest_framework.request import Request
from rest_framework.response import Response
from symbolic.debuginfo import normalize_debug_id
from symbolic.exceptions import SymbolicError
from sentry import ratelimits
from sentry.api.api_owners import ApiOwner
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.project import ProjectEndpoint, ProjectReleasePermission
from sentry.api.endpoints.debug_files import has_download_permission
from sentry.api.serializers import serialize
from sentry.auth.system import is_system_auth
from sentry.debug_files.artifact_bundles import (
MAX_BUNDLES_QUERY,
query_artifact_bundles_containing_file,
)
from sentry.lang.native.sources import get_internal_artifact_lookup_source_url
from sentry.models.artifactbundle import NULL_STRING, ArtifactBundle
from sentry.models.distribution import Distribution
from sentry.models.project import Project
from sentry.models.release import Release
from sentry.models.releasefile import ReleaseFile
from sentry.utils import metrics
logger = logging.getLogger("sentry.api")
# The marker for "release" bundles
RELEASE_BUNDLE_TYPE = "release.bundle"
# The number of files returned by the `get_releasefiles` query
MAX_RELEASEFILES_QUERY = 10
class _Artifact(TypedDict):
id: str
type: str
url: str
resolved_with: str
abs_path: NotRequired[str]
headers: NotRequired[dict[str, object]]
@region_silo_endpoint
class ProjectArtifactLookupEndpoint(ProjectEndpoint):
owner = ApiOwner.OWNERS_INGEST
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectReleasePermission,)
def download_file(self, download_id, project: Project):
split = download_id.split("/")
if len(split) < 2:
raise Http404
ty, ty_id, *_rest = split
rate_limited = ratelimits.backend.is_limited(
project=project,
key=f"rl:ArtifactLookupEndpoint:download:{download_id}:{project.id}",
limit=10,
)
if rate_limited:
logger.info(
"notification.rate_limited",
extra={"project_id": project.id, "file_id": download_id},
)
return HttpResponse({"Too many download requests"}, status=429)
file_m: ArtifactBundle | ReleaseFile | None = None
if ty == "artifact_bundle":
file_m = (
ArtifactBundle.objects.filter(
id=ty_id,
projectartifactbundle__project_id=project.id,
)
.select_related("file")
.first()
)
metrics.incr("sourcemaps.download.artifact_bundle")
elif ty == "release_file":
# NOTE: `ReleaseFile` does have a `project_id`, but that seems to
# be always empty, so using the `organization_id` instead.
file_m = (
ReleaseFile.objects.filter(id=ty_id, organization_id=project.organization.id)
.select_related("file")
.first()
)
metrics.incr("sourcemaps.download.release_file")
if file_m is None:
raise Http404
file = file_m.file
try:
fp = file.getfile()
response = StreamingHttpResponse(
iter(lambda: fp.read(4096), b""), content_type="application/octet-stream"
)
response["Content-Length"] = file.size
response["Content-Disposition"] = f'attachment; filename="{file.name}"'
return response
except OSError:
raise Http404
def get(self, request: Request, project: Project) -> Response:
"""
List a Project's Individual Artifacts or Bundles
````````````````````````````````````````
Retrieve a list of individual artifacts or artifact bundles for a given project.
:pparam string organization_id_or_slug: the id or slug of the organization to query.
:pparam string project_id_or_slug: the id or slug of the project to query.
:qparam string debug_id: if set, will query and return the artifact
bundle that matches the given `debug_id`.
:qparam string url: if set, will query and return all the individual
artifacts, or artifact bundles that contain files
that match the `url`. This is using a substring-match.
:qparam string release: used in conjunction with `url`.
:qparam string dist: used in conjunction with `url`.
:auth: required
"""
if (download_id := request.GET.get("download")) is not None:
if has_download_permission(request, project):
return self.download_file(download_id, project)
else:
return Response(status=403)
debug_id = request.GET.get("debug_id")
try:
debug_id = normalize_debug_id(debug_id)
except SymbolicError:
pass
url = request.GET.get("url") or NULL_STRING
release_name = request.GET.get("release") or NULL_STRING
dist_name = request.GET.get("dist") or NULL_STRING
# First query all the files:
# We first do that using the `ArtifactBundle` infrastructure.
artifact_bundles = query_artifact_bundles_containing_file(
project, release_name, dist_name, url, debug_id
)
all_bundles: dict[str, str] = {
f"artifact_bundle/{bundle_id}": resolved for bundle_id, resolved in artifact_bundles
}
# If no `ArtifactBundle`s were found matching the file, we fall back to
# looking up the file using the legacy `ReleaseFile` infrastructure.
individual_files: Iterable[ReleaseFile] = []
if not artifact_bundles:
release, dist = try_resolve_release_dist(project, release_name, dist_name)
if release:
metrics.incr("sourcemaps.lookup.release_file")
for releasefile_id in get_legacy_release_bundles(release, dist):
all_bundles[f"release_file/{releasefile_id}"] = "release-old"
individual_files = get_legacy_releasefile_by_file_url(release, dist, url)
# Then: Construct our response
url_constructor = UrlConstructor(request, project)
found_artifacts: list[_Artifact] = []
for download_id, resolved_with in all_bundles.items():
found_artifacts.append(
{
"id": download_id,
"type": "bundle",
"url": url_constructor.url_for_file_id(download_id),
"resolved_with": resolved_with,
}
)
for release_file in individual_files:
download_id = f"release_file/{release_file.id}"
found_artifacts.append(
{
"id": download_id,
"type": "file",
"url": url_constructor.url_for_file_id(download_id),
# The `name` is the url/abs_path of the file,
# as in: `"~/path/to/file.min.js"`.
"abs_path": release_file.name,
# These headers should ideally include the `Sourcemap` reference
"headers": release_file.file.headers,
"resolved_with": "release-old",
}
)
# make sure we have a stable sort order for tests
def natural_sort(key: str) -> tuple[str, int]:
split = key.split("/")
if len(split) > 1:
ty, ty_id = split
return (ty, int(ty_id))
else:
return ("", int(split[0]))
found_artifacts.sort(key=lambda x: natural_sort(x["id"]))
# NOTE: We do not paginate this response, as we have very tight limits on all the individual queries.
return Response(serialize(found_artifacts, request.user))
def try_resolve_release_dist(
project: Project, release_name: str, dist_name: str
) -> tuple[Release | None, Distribution | None]:
release = None
dist = None
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=release_name,
)
# We cannot query for dist without a release anyway
if dist_name:
dist = Distribution.objects.get(release=release, name=dist_name)
except (Release.DoesNotExist, Distribution.DoesNotExist):
pass
except Exception:
logger.exception("Failed to read")
return release, dist
def get_legacy_release_bundles(release: Release, dist: Distribution | None) -> set[int]:
return set(
ReleaseFile.objects.filter(
release_id=release.id,
dist_id=dist.id if dist else None,
# a `ReleaseFile` with `0` artifacts represents a release archive,
# see the comment above the definition of `artifact_count`.
artifact_count=0,
# similarly the special `type` is also used for release archives.
file__type=RELEASE_BUNDLE_TYPE,
).values_list("id", flat=True)
# TODO: this `order_by` might be incredibly slow
# we want to have a hard limit on the returned bundles here. and we would
# want to pick the most recently uploaded ones. that should mostly be
# relevant for customers that upload multiple bundles, or are uploading
# newer files for existing releases. In that case the symbolication is
# already degraded, so meh...
# .order_by("-file__timestamp")
[:MAX_BUNDLES_QUERY]
)
def get_legacy_releasefile_by_file_url(
release: Release, dist: Distribution | None, url: str
) -> QuerySet[ReleaseFile]:
# Exclude files which are also present in archive:
return (
ReleaseFile.public_objects.filter(
release_id=release.id,
dist_id=dist.id if dist else None,
)
.exclude(artifact_count=0)
.select_related("file")
).filter(name__icontains=url)[:MAX_RELEASEFILES_QUERY]
class UrlConstructor:
def __init__(self, request: Request, project: Project):
if is_system_auth(request.auth):
self.base_url = get_internal_artifact_lookup_source_url(project)
else:
self.base_url = request.build_absolute_uri(request.path)
def url_for_file_id(self, download_id: str) -> str:
# NOTE: Returning a self-route that requires authentication (via Bearer token)
# is not really forward compatible with a pre-signed URL that does not
# require any authentication or headers whatsoever.
# This also requires a workaround in Symbolicator, as its generic http
# downloader blocks "internal" IPs, whereas the internal Sentry downloader
# is explicitly exempt.
return f"{self.base_url}?download={download_id}"
| 610060.py | [
"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')"
] |
from __future__ import annotations
from collections import defaultdict
from collections.abc import Sequence
from datetime import timedelta
from email.headerregistry import Address
from functools import reduce
from typing import Any
from django.db.models import Q
from django.utils import timezone
from rest_framework import status
from rest_framework.request import Request
from rest_framework.response import Response
from sentry import roles
from sentry.api.api_owners import ApiOwner
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import region_silo_endpoint
from sentry.api.bases.organization import OrganizationEndpoint, OrganizationPermission
from sentry.api.serializers import Serializer, serialize
from sentry.constants import ObjectStatus
from sentry.integrations.base import IntegrationFeatures
from sentry.integrations.services.integration import integration_service
from sentry.models.commitauthor import CommitAuthor
from sentry.models.organization import Organization
FILTERED_EMAILS = {
"%%@gmail.com",
"%%@yahoo.com",
"%%@icloud.com",
"%%@hotmail.com",
"%%@outlook.com",
"%%@noreply.github.com",
"%%@localhost",
"[email protected]",
}
FILTERED_CHARACTERS = {"+"}
class MissingOrgMemberSerializer(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
formatted_external_id = _format_external_id(obj.external_id)
return {
"email": obj.email,
"externalId": formatted_external_id,
"commitCount": obj.commit__count,
}
class MissingMembersPermission(OrganizationPermission):
scope_map = {"GET": ["org:write"]}
def _format_external_id(external_id: str | None) -> str | None:
formatted_external_id = external_id
if external_id is not None and ":" in external_id:
formatted_external_id = external_id.split(":")[1]
return formatted_external_id
def _get_missing_organization_members(
organization: Organization,
provider: str,
integration_ids: Sequence[int],
shared_domain: str | None,
) -> list[Any]:
org_id = organization.id
domain_query = ""
if shared_domain:
domain_query = (
f"AND (UPPER(sentry_commitauthor.email::text) LIKE UPPER('%%{shared_domain}'))"
)
else:
for filtered_email in FILTERED_EMAILS:
domain_query += (
f"AND (UPPER(sentry_commitauthor.email::text) NOT LIKE UPPER('{filtered_email}')) "
)
date_added = (timezone.now() - timedelta(days=30)).strftime("%Y-%m-%d, %H:%M:%S")
query = """
SELECT sentry_commitauthor.id, sentry_commitauthor.organization_id, sentry_commitauthor.name, sentry_commitauthor.email, sentry_commitauthor.external_id, COUNT(sentry_commit.id) AS commit__count FROM sentry_commitauthor
INNER JOIN (
select * from sentry_commit
WHERE sentry_commit.organization_id = %(org_id)s
AND date_added >= %(date_added)s
order by date_added desc limit 1000
) as sentry_commit ON sentry_commitauthor.id = sentry_commit.author_id
WHERE sentry_commit.repository_id IN
(
select id
from sentry_repository
where provider = %(provider)s
and organization_id = %(org_id)s
and integration_id in %(integration_ids)s
)
AND sentry_commit.author_id IN
(select id from sentry_commitauthor
WHERE sentry_commitauthor.organization_id = %(org_id)s
AND NOT (
(UPPER(sentry_commitauthor.email::text) IN (select coalesce(UPPER(email), UPPER(user_email)) from sentry_organizationmember where organization_id = %(org_id)s and (email is not null or user_email is not null)
)
OR sentry_commitauthor.external_id IS NULL))
"""
# adding the extra domain query here prevents django raw from putting extra quotations around it
query += domain_query
query += """
AND NOT (UPPER(sentry_commitauthor.email::text) LIKE UPPER('%%+%%'))
)
GROUP BY sentry_commitauthor.id ORDER BY commit__count DESC limit 50"""
param_dict = {
"org_id": org_id,
"date_added": date_added,
"provider": "integrations:" + provider,
"integration_ids": tuple(integration_ids),
}
return list(CommitAuthor.objects.raw(query, param_dict))
def _get_shared_email_domain(organization: Organization) -> str | None:
# if a member has user_email=None, then they have yet to accept an invite
org_owners = organization.get_members_with_org_roles(roles=[roles.get_top_dog().id]).exclude(
Q(user_email=None) | Q(user_email="")
)
def _get_email_domain(email: str) -> str | None:
try:
domain = Address(addr_spec=email).domain
except Exception:
return None
return domain
owner_email_domains = {_get_email_domain(owner.user_email) for owner in org_owners}
# all owners have the same email domain
if len(owner_email_domains) == 1:
return owner_email_domains.pop()
return None
@region_silo_endpoint
class OrganizationMissingMembersEndpoint(OrganizationEndpoint):
owner = ApiOwner.ECOSYSTEM
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (MissingMembersPermission,)
def get(self, request: Request, organization: Organization) -> Response:
# ensure the organization has an integration with the commit feature
integrations = integration_service.get_integrations(
organization_id=organization.id, status=ObjectStatus.ACTIVE
)
def provider_reducer(dict, integration):
if not integration.has_feature(feature=IntegrationFeatures.COMMITS):
return dict
if dict.get(integration.provider):
dict[integration.provider].append(integration.id)
else:
dict[integration.provider] = [integration.id]
return dict
integration_provider_to_ids: dict[str, Sequence[int]] = reduce(
provider_reducer, integrations, defaultdict(list)
)
shared_domain = _get_shared_email_domain(organization)
missing_org_members = []
for integration_provider, integration_ids in integration_provider_to_ids.items():
# TODO(cathy): allow other integration providers
if integration_provider != "github":
continue
queryset = _get_missing_organization_members(
organization, integration_provider, integration_ids, shared_domain
)
missing_members_for_integration = {
"integration": integration_provider,
"users": serialize(queryset, request.user, serializer=MissingOrgMemberSerializer()),
}
missing_org_members.append(missing_members_for_integration)
return Response(
missing_org_members,
status=status.HTTP_200_OK,
)
| 239585.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
from typing import Any
from django.db.models import Q
from drf_spectacular.utils import extend_schema
from rest_framework.exceptions import ParseError
from rest_framework.request import Request
from rest_framework.response import Response
from sentry.api.api_publish_status import ApiPublishStatus
from sentry.api.base import EnvironmentMixin, region_silo_endpoint
from sentry.api.bases.organization import OrganizationAndStaffPermission, OrganizationEndpoint
from sentry.api.paginator import OffsetPaginator
from sentry.api.serializers import serialize
from sentry.api.serializers.models.project import (
OrganizationProjectResponse,
ProjectSummarySerializer,
)
from sentry.apidocs.constants import RESPONSE_FORBIDDEN, RESPONSE_NOT_FOUND, RESPONSE_UNAUTHORIZED
from sentry.apidocs.examples.organization_examples import OrganizationExamples
from sentry.apidocs.parameters import CursorQueryParam, GlobalParams
from sentry.apidocs.utils import inline_sentry_response_serializer
from sentry.constants import ObjectStatus
from sentry.models.project import Project
from sentry.models.team import Team
from sentry.search.utils import tokenize_query
from sentry.snuba import discover, metrics_enhanced_performance, metrics_performance
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', '14d', and '30d'"
DATASETS = {
"": discover, # in case they pass an empty query string fall back on default
"discover": discover,
"metricsEnhanced": metrics_enhanced_performance,
"metrics": metrics_performance,
}
def get_dataset(dataset_label: str) -> Any:
if dataset_label not in DATASETS:
raise ParseError(detail=f"dataset must be one of: {', '.join(DATASETS.keys())}")
return DATASETS[dataset_label]
@extend_schema(tags=["Organizations"])
@region_silo_endpoint
class OrganizationProjectsEndpoint(OrganizationEndpoint, EnvironmentMixin):
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
permission_classes = (OrganizationAndStaffPermission,)
@extend_schema(
operation_id="List an Organization's Projects",
parameters=[GlobalParams.ORG_ID_OR_SLUG, CursorQueryParam],
request=None,
responses={
200: inline_sentry_response_serializer(
"OrganizationProjectResponseDict", list[OrganizationProjectResponse]
),
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=OrganizationExamples.LIST_PROJECTS,
)
def get(self, request: Request, organization) -> Response:
"""
Return a list of projects bound to a organization.
"""
stats_period = request.GET.get("statsPeriod")
collapse = request.GET.getlist("collapse", [])
if stats_period not in (None, "", "1h", "24h", "7d", "14d", "30d"):
return Response(
{"error": {"params": {"stats_period": {"message": ERR_INVALID_STATS_PERIOD}}}},
status=400,
)
elif not stats_period:
# disable stats
stats_period = None
datasetName = request.GET.get("dataset", "discover")
dataset = get_dataset(datasetName)
if request.auth and not request.user.is_authenticated:
# TODO: remove this, no longer supported probably
if hasattr(request.auth, "project"):
queryset = Project.objects.filter(id=request.auth.project.id)
elif request.auth.organization_id is not None:
org = request.auth.organization_id
team_list = list(Team.objects.filter(organization_id=org))
queryset = Project.objects.filter(teams__in=team_list)
else:
return Response(
{"detail": "Current access does not point to " "organization."}, status=400
)
else:
queryset = Project.objects.filter(organization=organization)
order_by = ["slug"]
if request.user.is_authenticated:
queryset = queryset.extra(
select={
"is_bookmarked": """exists (
select *
from sentry_projectbookmark spb
where spb.project_id = sentry_project.id and spb.user_id = %s
)"""
},
select_params=(request.user.id,),
)
order_by.insert(0, "-is_bookmarked")
query = request.GET.get("query")
if query:
tokens = tokenize_query(query)
for key, value in tokens.items():
if key == "query":
value = " ".join(value)
queryset = queryset.filter(Q(name__icontains=value) | Q(slug__icontains=value))
elif key == "id":
if all(v.isdigit() for v in value):
queryset = queryset.filter(id__in=value)
else:
return Response(
{
"error": {
"params": {
"stats_period": {
"message": "All 'id' values must be integers."
}
}
}
},
status=400,
)
elif key == "slug":
queryset = queryset.filter(slug__in=value)
elif key == "team":
team_list = list(Team.objects.filter(organization=organization, slug__in=value))
queryset = queryset.filter(teams__in=team_list)
elif key == "!team":
team_list = list(Team.objects.filter(organization=organization, slug__in=value))
queryset = queryset.exclude(teams__in=team_list)
elif key == "is_member":
queryset = queryset.filter(teams__organizationmember__user_id=request.user.id)
else:
queryset = queryset.none()
queryset = queryset.filter(status=ObjectStatus.ACTIVE).distinct()
# TODO(davidenwang): remove this after frontend requires only paginated projects
get_all_projects = request.GET.get("all_projects") == "1"
if get_all_projects:
queryset = queryset.order_by("slug").select_related("organization")
return Response(
serialize(
list(queryset),
request.user,
ProjectSummarySerializer(collapse=collapse, dataset=dataset),
)
)
else:
expand = set()
if request.GET.get("transactionStats"):
expand.add("transaction_stats")
if request.GET.get("sessionStats"):
expand.add("session_stats")
expand_context = {"options": request.GET.getlist("options") or []}
if expand_context:
expand.add("options")
def serialize_on_result(result):
environment_id = self._get_environment_id_from_request(request, organization.id)
serializer = ProjectSummarySerializer(
environment_id=environment_id,
stats_period=stats_period,
expand=expand,
expand_context=expand_context,
collapse=collapse,
dataset=dataset,
)
return serialize(result, request.user, serializer)
return self.paginate(
request=request,
queryset=queryset,
order_by=order_by,
on_results=serialize_on_result,
paginator_cls=OffsetPaginator,
)
@region_silo_endpoint
class OrganizationProjectsCountEndpoint(OrganizationEndpoint, EnvironmentMixin):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization) -> Response:
queryset = Project.objects.filter(organization=organization)
all_projects = queryset.count()
my_projects = queryset.filter(teams__organizationmember__user_id=request.user.id).count()
return Response({"allProjects": all_projects, "myProjects": my_projects})
| 557095.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from ldm.util import instantiate_from_config
from ldm.modules.ema import LitEma
class AutoencoderKL(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
ema_decay=None,
learn_logvar=False
):
super().__init__()
self.learn_logvar = learn_logvar
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
assert ddconfig["double_z"]
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
self.embed_dim = embed_dim
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
self.use_ema = ema_decay is not None
if self.use_ema:
self.ema_decay = ema_decay
assert 0. < ema_decay < 1.
self.model_ema = LitEma(self, decay=ema_decay)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.parameters())
self.model_ema.copy_to(self)
if context is not None:
print(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self)
def encode(self, x):
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior
def decode(self, z):
z = self.post_quant_conv(z)
dec = self.decoder(z)
return dec
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return dec, posterior
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
return x
def training_step(self, batch, batch_idx, optimizer_idx):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
if optimizer_idx == 0:
# train encoder+decoder+logvar
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return aeloss
if optimizer_idx == 1:
# train the discriminator
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return discloss
def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope():
log_dict_ema = self._validation_step(batch, batch_idx, postfix="_ema")
return log_dict
def _validation_step(self, batch, batch_idx, postfix=""):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
last_layer=self.get_last_layer(), split="val"+postfix)
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
last_layer=self.get_last_layer(), split="val"+postfix)
self.log(f"val{postfix}/rec_loss", log_dict_ae[f"val{postfix}/rec_loss"])
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(
self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())
if self.learn_logvar:
print(f"{self.__class__.__name__}: Learning logvar")
ae_params_list.append(self.loss.logvar)
opt_ae = torch.optim.Adam(ae_params_list,
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
@torch.no_grad()
def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if not only_inputs:
xrec, posterior = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
log["reconstructions"] = xrec
if log_ema or self.use_ema:
with self.ema_scope():
xrec_ema, posterior_ema = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec_ema.shape[1] > 3
xrec_ema = self.to_rgb(xrec_ema)
log["samples_ema"] = self.decode(torch.randn_like(posterior_ema.sample()))
log["reconstructions_ema"] = xrec_ema
log["inputs"] = x
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class IdentityFirstStage(torch.nn.Module):
def __init__(self, *args, vq_interface=False, **kwargs):
self.vq_interface = vq_interface
super().__init__()
def encode(self, x, *args, **kwargs):
return x
def decode(self, x, *args, **kwargs):
return x
def quantize(self, x, *args, **kwargs):
if self.vq_interface:
return x, None, [None, None, None]
return x
def forward(self, x, *args, **kwargs):
return x
| 299989.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
"""make variations of input image"""
import argparse, os
import PIL
import torch
import numpy as np
from omegaconf import OmegaConf
from PIL import Image
from tqdm import tqdm, trange
from itertools import islice
from einops import rearrange, repeat
from torchvision.utils import make_grid
from torch import autocast
from contextlib import nullcontext
from pytorch_lightning import seed_everything
from imwatermark import WatermarkEncoder
from scripts.txt2img import put_watermark
from ldm.util import instantiate_from_config
from ldm.models.diffusion.ddim import DDIMSampler
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def load_model_from_config(config, ckpt, verbose=False):
print(f"Loading model from {ckpt}")
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
model = instantiate_from_config(config.model)
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
model.cuda()
model.eval()
return model
def load_img(path):
image = Image.open(path).convert("RGB")
w, h = image.size
print(f"loaded input image of size ({w}, {h}) from {path}")
w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
image = np.array(image).astype(np.float32) / 255.0
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return 2. * image - 1.
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--prompt",
type=str,
nargs="?",
default="a painting of a virus monster playing guitar",
help="the prompt to render"
)
parser.add_argument(
"--init-img",
type=str,
nargs="?",
help="path to the input image"
)
parser.add_argument(
"--outdir",
type=str,
nargs="?",
help="dir to write results to",
default="outputs/img2img-samples"
)
parser.add_argument(
"--ddim_steps",
type=int,
default=50,
help="number of ddim sampling steps",
)
parser.add_argument(
"--fixed_code",
action='store_true',
help="if enabled, uses the same starting code across all samples ",
)
parser.add_argument(
"--ddim_eta",
type=float,
default=0.0,
help="ddim eta (eta=0.0 corresponds to deterministic sampling",
)
parser.add_argument(
"--n_iter",
type=int,
default=1,
help="sample this often",
)
parser.add_argument(
"--C",
type=int,
default=4,
help="latent channels",
)
parser.add_argument(
"--f",
type=int,
default=8,
help="downsampling factor, most often 8 or 16",
)
parser.add_argument(
"--n_samples",
type=int,
default=2,
help="how many samples to produce for each given prompt. A.k.a batch size",
)
parser.add_argument(
"--n_rows",
type=int,
default=0,
help="rows in the grid (default: n_samples)",
)
parser.add_argument(
"--scale",
type=float,
default=9.0,
help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
)
parser.add_argument(
"--strength",
type=float,
default=0.8,
help="strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image",
)
parser.add_argument(
"--from-file",
type=str,
help="if specified, load prompts from this file",
)
parser.add_argument(
"--config",
type=str,
default="configs/stable-diffusion/v2-inference.yaml",
help="path to config which constructs model",
)
parser.add_argument(
"--ckpt",
type=str,
help="path to checkpoint of model",
)
parser.add_argument(
"--seed",
type=int,
default=42,
help="the seed (for reproducible sampling)",
)
parser.add_argument(
"--precision",
type=str,
help="evaluate at this precision",
choices=["full", "autocast"],
default="autocast"
)
opt = parser.parse_args()
seed_everything(opt.seed)
config = OmegaConf.load(f"{opt.config}")
model = load_model_from_config(config, f"{opt.ckpt}")
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = model.to(device)
sampler = DDIMSampler(model)
os.makedirs(opt.outdir, exist_ok=True)
outpath = opt.outdir
print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...")
wm = "SDV2"
wm_encoder = WatermarkEncoder()
wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
batch_size = opt.n_samples
n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
if not opt.from_file:
prompt = opt.prompt
assert prompt is not None
data = [batch_size * [prompt]]
else:
print(f"reading prompts from {opt.from_file}")
with open(opt.from_file, "r") as f:
data = f.read().splitlines()
data = list(chunk(data, batch_size))
sample_path = os.path.join(outpath, "samples")
os.makedirs(sample_path, exist_ok=True)
base_count = len(os.listdir(sample_path))
grid_count = len(os.listdir(outpath)) - 1
assert os.path.isfile(opt.init_img)
init_image = load_img(opt.init_img).to(device)
init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False)
assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]'
t_enc = int(opt.strength * opt.ddim_steps)
print(f"target t_enc is {t_enc} steps")
precision_scope = autocast if opt.precision == "autocast" else nullcontext
with torch.no_grad():
with precision_scope("cuda"):
with model.ema_scope():
all_samples = list()
for n in trange(opt.n_iter, desc="Sampling"):
for prompts in tqdm(data, desc="data"):
uc = None
if opt.scale != 1.0:
uc = model.get_learned_conditioning(batch_size * [""])
if isinstance(prompts, tuple):
prompts = list(prompts)
c = model.get_learned_conditioning(prompts)
# encode (scaled latent)
z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * batch_size).to(device))
# decode it
samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale,
unconditional_conditioning=uc, )
x_samples = model.decode_first_stage(samples)
x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
for x_sample in x_samples:
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
img = Image.fromarray(x_sample.astype(np.uint8))
img = put_watermark(img, wm_encoder)
img.save(os.path.join(sample_path, f"{base_count:05}.png"))
base_count += 1
all_samples.append(x_samples)
# additionally, save as grid
grid = torch.stack(all_samples, 0)
grid = rearrange(grid, 'n b c h w -> (n b) c h w')
grid = make_grid(grid, nrow=n_rows)
# to image
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
grid = Image.fromarray(grid.astype(np.uint8))
grid = put_watermark(grid, wm_encoder)
grid.save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
grid_count += 1
print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.")
if __name__ == "__main__":
main()
| 916680.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
#!/usr/bin/env python
# vim: set encoding=utf-8
# pylint: disable=wrong-import-position,wrong-import-order
"""
Main server program.
Configuration parameters:
path.internal.malformed
path.internal.static
path.internal.templates
path.log.main
path.log.queries
"""
from __future__ import print_function
import sys
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding('utf8')
import sys
import logging
import os
import requests
import jinja2
from flask import Flask, request, send_from_directory, redirect, Response
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "lib")))
from config import CONFIG
from limits import Limits
from cheat_wrapper import cheat_wrapper
from post import process_post_request
from options import parse_args
from stateful_queries import save_query, last_query
if not os.path.exists(os.path.dirname(CONFIG["path.log.main"])):
os.makedirs(os.path.dirname(CONFIG["path.log.main"]))
logging.basicConfig(
filename=CONFIG["path.log.main"],
level=logging.DEBUG,
format='%(asctime)s %(message)s')
# Fix Flask "exception and request logging" to `stderr`.
#
# When Flask's werkzeug detects that logging is already set, it
# doesn't add its own logger that prints exceptions.
stderr_handler = logging.StreamHandler()
logging.getLogger().addHandler(stderr_handler)
#
# Alter log format to disting log lines from everything else
stderr_handler.setFormatter(logging.Formatter('%(filename)s:%(lineno)s: %(message)s'))
#
# Sometimes werkzeug starts logging before an app is imported
# (https://github.com/pallets/werkzeug/issues/1969)
# resulting in duplicating lines. In that case we need root
# stderr handler to skip lines from werkzeug.
class SkipFlaskLogger(object):
def filter(self, record):
if record.name != 'werkzeug':
return True
if logging.getLogger('werkzeug').handlers:
stderr_handler.addFilter(SkipFlaskLogger())
app = Flask(__name__) # pylint: disable=invalid-name
app.jinja_loader = jinja2.ChoiceLoader([
app.jinja_loader,
jinja2.FileSystemLoader(CONFIG["path.internal.templates"])])
LIMITS = Limits()
PLAIN_TEXT_AGENTS = [
"curl",
"httpie",
"lwp-request",
"wget",
"python-requests",
"openbsd ftp",
"powershell",
"fetch",
"aiohttp",
]
def _is_html_needed(user_agent):
"""
Basing on `user_agent`, return whether it needs HTML or ANSI
"""
return all([x not in user_agent for x in PLAIN_TEXT_AGENTS])
def is_result_a_script(query):
return query in [':cht.sh']
@app.route('/files/<path:path>')
def send_static(path):
"""
Return static file `path`.
Can be served by the HTTP frontend.
"""
return send_from_directory(CONFIG["path.internal.static"], path)
@app.route('/favicon.ico')
def send_favicon():
"""
Return static file `favicon.ico`.
Can be served by the HTTP frontend.
"""
return send_from_directory(CONFIG["path.internal.static"], 'favicon.ico')
@app.route('/malformed-response.html')
def send_malformed():
"""
Return static file `malformed-response.html`.
Can be served by the HTTP frontend.
"""
dirname, filename = os.path.split(CONFIG["path.internal.malformed"])
return send_from_directory(dirname, filename)
def log_query(ip_addr, found, topic, user_agent):
"""
Log processed query and some internal data
"""
log_entry = "%s %s %s %s\n" % (ip_addr, found, topic, user_agent)
with open(CONFIG["path.log.queries"], 'ab') as my_file:
my_file.write(log_entry.encode('utf-8'))
def get_request_ip(req):
"""
Extract IP address from `request`
"""
if req.headers.getlist("X-Forwarded-For"):
ip_addr = req.headers.getlist("X-Forwarded-For")[0]
if ip_addr.startswith('::ffff:'):
ip_addr = ip_addr[7:]
else:
ip_addr = req.remote_addr
if req.headers.getlist("X-Forwarded-For"):
ip_addr = req.headers.getlist("X-Forwarded-For")[0]
if ip_addr.startswith('::ffff:'):
ip_addr = ip_addr[7:]
else:
ip_addr = req.remote_addr
return ip_addr
def get_answer_language(request):
"""
Return preferred answer language based on
domain name, query arguments and headers
"""
def _parse_accept_language(accept_language):
languages = accept_language.split(",")
locale_q_pairs = []
for language in languages:
try:
if language.split(";")[0] == language:
# no q => q = 1
locale_q_pairs.append((language.strip(), "1"))
else:
locale = language.split(";")[0].strip()
weight = language.split(";")[1].split("=")[1]
locale_q_pairs.append((locale, weight))
except IndexError:
pass
return locale_q_pairs
def _find_supported_language(accepted_languages):
for lang_tuple in accepted_languages:
lang = lang_tuple[0]
if '-' in lang:
lang = lang.split('-', 1)[0]
return lang
return None
lang = None
hostname = request.headers['Host']
if hostname.endswith('.cheat.sh'):
lang = hostname[:-9]
if 'lang' in request.args:
lang = request.args.get('lang')
header_accept_language = request.headers.get('Accept-Language', '')
if lang is None and header_accept_language:
lang = _find_supported_language(
_parse_accept_language(header_accept_language))
return lang
def _proxy(*args, **kwargs):
# print "method=", request.method,
# print "url=", request.url.replace('/:shell-x/', ':3000/')
# print "headers=", {key: value for (key, value) in request.headers if key != 'Host'}
# print "data=", request.get_data()
# print "cookies=", request.cookies
# print "allow_redirects=", False
url_before, url_after = request.url.split('/:shell-x/', 1)
url = url_before + ':3000/'
if 'q' in request.args:
url_after = '?' + "&".join("arg=%s" % x for x in request.args['q'].split())
url += url_after
print(url)
print(request.get_data())
resp = requests.request(
method=request.method,
url=url,
headers={key: value for (key, value) in request.headers if key != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False)
excluded_headers = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers]
response = Response(resp.content, resp.status_code, headers)
return response
@app.route("/", methods=['GET', 'POST'])
@app.route("/<path:topic>", methods=["GET", "POST"])
def answer(topic=None):
"""
Main rendering function, it processes incoming weather queries.
Depending on user agent it returns output in HTML or ANSI format.
Incoming data:
request.args
request.headers
request.remote_addr
request.referrer
request.query_string
"""
user_agent = request.headers.get('User-Agent', '').lower()
html_needed = _is_html_needed(user_agent)
options = parse_args(request.args)
if topic in ['apple-touch-icon-precomposed.png', 'apple-touch-icon.png', 'apple-touch-icon-120x120-precomposed.png'] \
or (topic is not None and any(topic.endswith('/'+x) for x in ['favicon.ico'])):
return ''
request_id = request.cookies.get('id')
if topic is not None and topic.lstrip('/') == ':last':
if request_id:
topic = last_query(request_id)
else:
return "ERROR: you have to set id for your requests to use /:last\n"
else:
if request_id:
save_query(request_id, topic)
if request.method == 'POST':
process_post_request(request, html_needed)
if html_needed:
return redirect("/")
return "OK\n"
if 'topic' in request.args:
return redirect("/%s" % request.args.get('topic'))
if topic is None:
topic = ":firstpage"
if topic.startswith(':shell-x/'):
return _proxy()
#return requests.get('http://127.0.0.1:3000'+topic[8:]).text
lang = get_answer_language(request)
if lang:
options['lang'] = lang
ip_address = get_request_ip(request)
if '+' in topic:
not_allowed = LIMITS.check_ip(ip_address)
if not_allowed:
return "429 %s\n" % not_allowed, 429
html_is_needed = _is_html_needed(user_agent) and not is_result_a_script(topic)
if html_is_needed:
output_format='html'
else:
output_format='ansi'
result, found = cheat_wrapper(topic, request_options=options, output_format=output_format)
if 'Please come back in several hours' in result and html_is_needed:
malformed_response = open(os.path.join(CONFIG["path.internal.malformed"])).read()
return malformed_response
log_query(ip_address, found, topic, user_agent)
if html_is_needed:
return result
return Response(result, mimetype='text/plain')
| 919804.py | [
"CWE-601: URL Redirection to Untrusted Site ('Open Redirect')"
] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import contextlib
import requests
from lxml import etree
from hashlib import md5
from urllib import parse
from odoo import api, fields, models
from odoo.addons.account_peppol.tools.demo_utils import handle_demo
from odoo.tools.sql import column_exists, create_column
TIMEOUT = 10
class ResPartner(models.Model):
_inherit = 'res.partner'
account_peppol_is_endpoint_valid = fields.Boolean(
string="PEPPOL endpoint validity",
help="The partner's EAS code and PEPPOL endpoint are valid",
compute="_compute_account_peppol_is_endpoint_valid", store=True,
copy=False,
)
account_peppol_validity_last_check = fields.Date(
string="Checked on",
help="Last Peppol endpoint verification",
compute="_compute_account_peppol_is_endpoint_valid", store=True,
copy=False,
)
account_peppol_verification_label = fields.Selection(
selection=[
('not_verified', 'Not verified yet'),
('not_valid', 'Not valid'), # does not exist on Peppol at all
('not_valid_format', 'Cannot receive this format'), # registered on Peppol but cannot receive the selected document type
('valid', 'Valid'),
],
string='Peppol endpoint validity',
compute='_compute_account_peppol_verification_label',
copy=False,
) # field to compute the label to show for partner endpoint
is_peppol_edi_format = fields.Boolean(compute='_compute_is_peppol_edi_format')
def _auto_init(self):
"""Create columns `account_peppol_is_endpoint_valid` and `account_peppol_validity_last_check`
to avoid having them computed by the ORM on installation.
"""
if not column_exists(self.env.cr, 'res_partner', 'account_peppol_is_endpoint_valid'):
create_column(self.env.cr, 'res_partner', 'account_peppol_is_endpoint_valid', 'boolean')
if not column_exists(self.env.cr, 'res_partner', 'account_peppol_validity_last_check'):
create_column(self.env.cr, 'res_partner', 'account_peppol_validity_last_check', 'timestamp')
return super()._auto_init()
@api.model
def fields_get(self, allfields=None, attributes=None):
# TODO: remove in master
res = super().fields_get(allfields, attributes)
# the orm_cache does not contain the new selections added in stable: clear the cache once
existing_selection = res.get('account_peppol_verification_label', {}).get('selection')
if existing_selection is None:
return res
not_valid_format_label = next(x for x in self._fields['account_peppol_verification_label'].selection if x[0] == 'not_valid_format')
need_update = not_valid_format_label not in existing_selection
if need_update:
self.env['ir.model.fields'].invalidate_model(['selection_ids'])
self.env['ir.model.fields.selection']._update_selection(
'res.partner',
'account_peppol_verification_label',
self._fields['account_peppol_verification_label'].selection,
)
self.env.registry.clear_cache()
return super().fields_get(allfields, attributes)
return res
# -------------------------------------------------------------------------
# HELPER METHODS
# -------------------------------------------------------------------------
@api.model
def _get_participant_info(self, edi_identification):
hash_participant = md5(edi_identification.lower().encode()).hexdigest()
endpoint_participant = parse.quote_plus(f"iso6523-actorid-upis::{edi_identification}")
peppol_user = self.env.company.sudo().account_edi_proxy_client_ids.filtered(lambda user: user.proxy_type == 'peppol')
edi_mode = peppol_user and peppol_user.edi_mode or self.env['ir.config_parameter'].sudo().get_param('account_peppol.edi.mode')
sml_zone = 'acc.edelivery' if edi_mode == 'test' else 'edelivery'
smp_url = f"http://B-{hash_participant}.iso6523-actorid-upis.{sml_zone}.tech.ec.europa.eu/{endpoint_participant}"
try:
response = requests.get(smp_url, timeout=TIMEOUT)
except requests.exceptions.ConnectionError:
return None
if response.status_code != 200:
return None
return etree.fromstring(response.content)
@api.model
def _check_peppol_participant_exists(self, edi_identification, check_company=False, ubl_cii_format=False):
participant_info = self._get_participant_info(edi_identification)
if participant_info is None:
return False
participant_identifier = participant_info.findtext('{*}ParticipantIdentifier')
service_metadata = participant_info.find('.//{*}ServiceMetadataReference')
service_href = ''
if service_metadata is not None:
service_href = service_metadata.attrib.get('href', '')
if edi_identification != participant_identifier or 'hermes-belgium' in service_href:
# all Belgian companies are pre-registered on hermes-belgium, so they will
# technically have an existing SMP url but they are not real Peppol participants
return False
if check_company:
# if we are only checking company's existence on the network, we don't care about what documents they can receive
if not service_href:
return True
access_point_contact = True
with contextlib.suppress(requests.exceptions.RequestException, etree.XMLSyntaxError):
response = requests.get(service_href, timeout=TIMEOUT)
if response.status_code == 200:
access_point_info = etree.fromstring(response.content)
access_point_contact = access_point_info.findtext('.//{*}TechnicalContactUrl') or access_point_info.findtext('.//{*}TechnicalInformationUrl')
return access_point_contact
return self._check_document_type_support(participant_info, ubl_cii_format)
def _check_document_type_support(self, participant_info, ubl_cii_format):
service_metadata = participant_info.find('.//{*}ServiceMetadataReferenceCollection')
if service_metadata is None:
return False
document_type = self.env['account.edi.xml.ubl_21']._get_customization_ids()[ubl_cii_format]
for service in service_metadata.iterfind('{*}ServiceMetadataReference'):
if document_type in parse.unquote_plus(service.attrib.get('href', '')):
return True
return False
# -------------------------------------------------------------------------
# COMPUTE METHODS
# -------------------------------------------------------------------------
@api.depends('ubl_cii_format')
def _compute_is_peppol_edi_format(self):
for partner in self:
partner.is_peppol_edi_format = partner.ubl_cii_format not in (False, 'facturx', 'oioubl_201', 'ciusro')
@api.depends('peppol_eas', 'peppol_endpoint', 'ubl_cii_format')
def _compute_account_peppol_is_endpoint_valid(self):
for partner in self:
partner.button_account_peppol_check_partner_endpoint()
@api.depends('account_peppol_is_endpoint_valid', 'account_peppol_validity_last_check')
def _compute_account_peppol_verification_label(self):
for partner in self:
if not partner.account_peppol_validity_last_check:
partner.account_peppol_verification_label = 'not_verified'
elif (
partner.is_peppol_edi_format
and (participant_info := self._get_participant_info(f'{partner.peppol_eas}:{partner.peppol_endpoint}'.lower())) is not None
and not partner._check_document_type_support(participant_info, partner.ubl_cii_format)
):
# the partner might exist on the network, but not be able to receive that specific format
partner.account_peppol_verification_label = 'not_valid_format'
elif partner.account_peppol_is_endpoint_valid:
partner.account_peppol_verification_label = 'valid'
else:
partner.account_peppol_verification_label = 'not_valid'
# -------------------------------------------------------------------------
# BUSINESS ACTIONS
# -------------------------------------------------------------------------
@handle_demo
def button_account_peppol_check_partner_endpoint(self):
""" A basic check for whether a participant is reachable at the given
Peppol participant ID - peppol_eas:peppol_endpoint (ex: '9999:test')
The SML (Service Metadata Locator) assigns a DNS name to each peppol participant.
This DNS name resolves into the SMP (Service Metadata Publisher) of the participant.
The DNS address is of the following form:
- "http://B-" + hexstring(md5(lowercase(ID-VALUE))) + "." + ID-SCHEME + "." + SML-ZONE-NAME + "/" + url_encoded(ID-SCHEME + "::" + ID-VALUE)
(ref:https://peppol.helger.com/public/locale-en_US/menuitem-docs-doc-exchange)
"""
self.ensure_one()
if not (self.peppol_eas and self.peppol_endpoint) or not self.is_peppol_edi_format:
self.account_peppol_is_endpoint_valid = False
else:
edi_identification = f'{self.peppol_eas}:{self.peppol_endpoint}'.lower()
self.account_peppol_validity_last_check = fields.Date.context_today(self)
self.account_peppol_is_endpoint_valid = bool(self._check_peppol_participant_exists(edi_identification, ubl_cii_format=self.ubl_cii_format))
return False
| 765068.py | [
"CWE-319: Cleartext Transmission of Sensitive Information"
] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import werkzeug
from werkzeug.urls import url_encode
from odoo import http, tools, _
from odoo.addons.auth_signup.models.res_users import SignupError
from odoo.addons.web.controllers.home import ensure_db, Home, SIGN_UP_REQUEST_PARAMS, LOGIN_SUCCESSFUL_PARAMS
from odoo.addons.base_setup.controllers.main import BaseSetup
from odoo.exceptions import UserError
from odoo.http import request
_logger = logging.getLogger(__name__)
LOGIN_SUCCESSFUL_PARAMS.add('account_created')
class AuthSignupHome(Home):
@http.route()
def web_login(self, *args, **kw):
ensure_db()
response = super().web_login(*args, **kw)
response.qcontext.update(self.get_auth_signup_config())
if request.session.uid:
if request.httprequest.method == 'GET' and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return request.redirect(request.params.get('redirect'))
# Add message for non-internal user account without redirect if account was just created
if response.location == '/web/login_successful' and kw.get('confirm_password'):
return request.redirect_query('/web/login_successful', query={'account_created': True})
return response
@http.route('/web/signup', type='http', auth='public', website=True, sitemap=False)
def web_auth_signup(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('signup_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
self.do_signup(qcontext)
# Set user to public if they were not signed in by do_signup
# (mfa enabled)
if request.session.uid is None:
public_user = request.env.ref('base.public_user')
request.update_env(user=public_user)
# Send an account creation confirmation email
User = request.env['res.users']
user_sudo = User.sudo().search(
User._get_login_domain(qcontext.get('login')), order=User._get_login_order(), limit=1
)
template = request.env.ref('auth_signup.mail_template_user_signup_account_created', raise_if_not_found=False)
if user_sudo and template:
template.sudo().send_mail(user_sudo.id, force_send=True)
return self.web_login(*args, **kw)
except UserError as e:
qcontext['error'] = e.args[0]
except (SignupError, AssertionError) as e:
if request.env["res.users"].sudo().search([("login", "=", qcontext.get("login"))]):
qcontext["error"] = _("Another user is already registered using this email address.")
else:
_logger.warning("%s", e)
qcontext['error'] = _("Could not create a new account.") + "\n" + str(e)
elif 'signup_email' in qcontext:
user = request.env['res.users'].sudo().search([('email', '=', qcontext.get('signup_email')), ('state', '!=', 'new')], limit=1)
if user:
return request.redirect('/web/login?%s' % url_encode({'login': user.login, 'redirect': '/web'}))
response = request.render('auth_signup.signup', qcontext)
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
response.headers['Content-Security-Policy'] = "frame-ancestors 'self'"
return response
@http.route('/web/reset_password', type='http', auth='public', website=True, sitemap=False)
def web_auth_reset_password(self, *args, **kw):
qcontext = self.get_auth_signup_qcontext()
if not qcontext.get('token') and not qcontext.get('reset_password_enabled'):
raise werkzeug.exceptions.NotFound()
if 'error' not in qcontext and request.httprequest.method == 'POST':
try:
if qcontext.get('token'):
self.do_signup(qcontext)
return self.web_login(*args, **kw)
else:
login = qcontext.get('login')
assert login, _("No login provided.")
_logger.info(
"Password reset attempt for <%s> by user <%s> from %s",
login, request.env.user.login, request.httprequest.remote_addr)
request.env['res.users'].sudo().reset_password(login)
qcontext['message'] = _("Password reset instructions sent to your email")
except UserError as e:
qcontext['error'] = e.args[0]
except SignupError:
qcontext['error'] = _("Could not reset your password")
_logger.exception('error when resetting password')
except Exception as e:
qcontext['error'] = str(e)
elif 'signup_email' in qcontext:
user = request.env['res.users'].sudo().search([('email', '=', qcontext.get('signup_email')), ('state', '!=', 'new')], limit=1)
if user:
return request.redirect('/web/login?%s' % url_encode({'login': user.login, 'redirect': '/web'}))
response = request.render('auth_signup.reset_password', qcontext)
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
response.headers['Content-Security-Policy'] = "frame-ancestors 'self'"
return response
def get_auth_signup_config(self):
"""retrieve the module config (which features are enabled) for the login page"""
get_param = request.env['ir.config_parameter'].sudo().get_param
return {
'disable_database_manager': not tools.config['list_db'],
'signup_enabled': request.env['res.users']._get_signup_invitation_scope() == 'b2c',
'reset_password_enabled': get_param('auth_signup.reset_password') == 'True',
}
def get_auth_signup_qcontext(self):
""" Shared helper returning the rendering context for signup and reset password """
qcontext = {k: v for (k, v) in request.params.items() if k in SIGN_UP_REQUEST_PARAMS}
qcontext.update(self.get_auth_signup_config())
if not qcontext.get('token') and request.session.get('auth_signup_token'):
qcontext['token'] = request.session.get('auth_signup_token')
if qcontext.get('token'):
try:
# retrieve the user info (name, login or email) corresponding to a signup token
token_infos = request.env['res.partner'].sudo().signup_retrieve_info(qcontext.get('token'))
for k, v in token_infos.items():
qcontext.setdefault(k, v)
except:
qcontext['error'] = _("Invalid signup token")
qcontext['invalid_token'] = True
return qcontext
def _prepare_signup_values(self, qcontext):
values = { key: qcontext.get(key) for key in ('login', 'name', 'password') }
if not values:
raise UserError(_("The form was not properly filled in."))
if values.get('password') != qcontext.get('confirm_password'):
raise UserError(_("Passwords do not match; please retype them."))
supported_lang_codes = [code for code, _ in request.env['res.lang'].get_installed()]
lang = request.context.get('lang', '')
if lang in supported_lang_codes:
values['lang'] = lang
return values
def do_signup(self, qcontext):
""" Shared helper that creates a res.partner out of a token """
values = self._prepare_signup_values(qcontext)
self._signup_with_values(qcontext.get('token'), values)
request.env.cr.commit()
def _signup_with_values(self, token, values):
login, password = request.env['res.users'].sudo().signup(values, token)
request.env.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction
pre_uid = request.session.authenticate(request.db, login, password)
if not pre_uid:
raise SignupError(_('Authentication Failed.'))
class AuthBaseSetup(BaseSetup):
@http.route()
def base_setup_data(self, **kwargs):
res = super().base_setup_data(**kwargs)
res.update({'resend_invitation': True})
return res
| 511699.py | [
"CWE-532: Insertion of Sensitive Information into Log File"
] |
# -*- coding: utf-8 -*-
import contextlib
import datetime
import json
import logging
import math
import os
import random
import selectors
import threading
import time
from psycopg2 import InterfaceError, sql
import odoo
from odoo import api, fields, models
from odoo.service.server import CommonServer
from odoo.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from odoo.tools import date_utils
_logger = logging.getLogger(__name__)
# longpolling timeout connection
TIMEOUT = 50
# custom function to call instead of default PostgreSQL's `pg_notify`
ODOO_NOTIFY_FUNCTION = os.getenv('ODOO_NOTIFY_FUNCTION', 'pg_notify')
def get_notify_payload_max_length(default=8000):
try:
length = int(os.environ.get('ODOO_NOTIFY_PAYLOAD_MAX_LENGTH', default))
except ValueError:
_logger.warning("ODOO_NOTIFY_PAYLOAD_MAX_LENGTH has to be an integer, "
"defaulting to %d bytes", default)
length = default
return length
# max length in bytes for the NOTIFY query payload
NOTIFY_PAYLOAD_MAX_LENGTH = get_notify_payload_max_length()
#----------------------------------------------------------
# Bus
#----------------------------------------------------------
def json_dump(v):
return json.dumps(v, separators=(',', ':'), default=date_utils.json_default)
def hashable(key):
if isinstance(key, list):
key = tuple(key)
return key
def channel_with_db(dbname, channel):
if isinstance(channel, models.Model):
return (dbname, channel._name, channel.id)
if isinstance(channel, tuple) and len(channel) == 2 and isinstance(channel[0], models.Model):
return (dbname, channel[0]._name, channel[0].id, channel[1])
if isinstance(channel, str):
return (dbname, channel)
return channel
def get_notify_payloads(channels):
"""
Generates the json payloads for the imbus NOTIFY.
Splits recursively payloads that are too large.
:param list channels:
:return: list of payloads of json dumps
:rtype: list[str]
"""
if not channels:
return []
payload = json_dump(channels)
if len(channels) == 1 or len(payload.encode()) < NOTIFY_PAYLOAD_MAX_LENGTH:
return [payload]
else:
pivot = math.ceil(len(channels) / 2)
return (get_notify_payloads(channels[:pivot]) +
get_notify_payloads(channels[pivot:]))
class ImBus(models.Model):
_name = 'bus.bus'
_description = 'Communication Bus'
channel = fields.Char('Channel')
message = fields.Char('Message')
@api.autovacuum
def _gc_messages(self):
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT*2)
domain = [('create_date', '<', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
return self.sudo().search(domain).unlink()
@api.model
def _sendmany(self, notifications):
channels = set()
values = []
for target, notification_type, message in notifications:
channel = channel_with_db(self.env.cr.dbname, target)
channels.add(channel)
values.append({
'channel': json_dump(channel),
'message': json_dump({
'type': notification_type,
'payload': message,
})
})
self.sudo().create(values)
if channels:
# We have to wait until the notifications are commited in database.
# When calling `NOTIFY imbus`, notifications will be fetched in the
# bus table. If the transaction is not commited yet, there will be
# nothing to fetch, and the websocket will return no notification.
@self.env.cr.postcommit.add
def notify():
with odoo.sql_db.db_connect('postgres').cursor() as cr:
query = sql.SQL("SELECT {}('imbus', %s)").format(sql.Identifier(ODOO_NOTIFY_FUNCTION))
payloads = get_notify_payloads(list(channels))
if len(payloads) > 1:
_logger.info("The imbus notification payload was too large, "
"it's been split into %d payloads.", len(payloads))
for payload in payloads:
cr.execute(query, (payload,))
@api.model
def _sendone(self, channel, notification_type, message):
self._sendmany([[channel, notification_type, message]])
@api.model
def _poll(self, channels, last=0):
# first poll return the notification in the 'buffer'
if last == 0:
timeout_ago = datetime.datetime.utcnow()-datetime.timedelta(seconds=TIMEOUT)
domain = [('create_date', '>', timeout_ago.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
else: # else returns the unread notifications
domain = [('id', '>', last)]
channels = [json_dump(channel_with_db(self.env.cr.dbname, c)) for c in channels]
domain.append(('channel', 'in', channels))
notifications = self.sudo().search_read(domain)
# list of notification to return
result = []
for notif in notifications:
result.append({
'id': notif['id'],
'message': json.loads(notif['message']),
})
return result
def _bus_last_id(self):
last = self.env['bus.bus'].search([], order='id desc', limit=1)
return last.id if last else 0
#----------------------------------------------------------
# Dispatcher
#----------------------------------------------------------
class BusSubscription:
def __init__(self, channels, last):
self.last_notification_id = last
self.channels = channels
class ImDispatch(threading.Thread):
def __init__(self):
super().__init__(daemon=True, name=f'{__name__}.Bus')
self._channels_to_ws = {}
def subscribe(self, channels, last, db, websocket):
"""
Subcribe to bus notifications. Every notification related to the
given channels will be sent through the websocket. If a subscription
is already present, overwrite it.
"""
channels = {hashable(channel_with_db(db, c)) for c in channels}
for channel in channels:
self._channels_to_ws.setdefault(channel, set()).add(websocket)
outdated_channels = websocket._channels - channels
self._clear_outdated_channels(websocket, outdated_channels)
websocket.subscribe(channels, last)
with contextlib.suppress(RuntimeError):
if not self.is_alive():
self.start()
def unsubscribe(self, websocket):
self._clear_outdated_channels(websocket, websocket._channels)
def _clear_outdated_channels(self, websocket, outdated_channels):
""" Remove channels from channel to websocket map. """
for channel in outdated_channels:
self._channels_to_ws[channel].remove(websocket)
if not self._channels_to_ws[channel]:
self._channels_to_ws.pop(channel)
def loop(self):
""" Dispatch postgres notifications to the relevant websockets """
_logger.info("Bus.loop listen imbus on db postgres")
with odoo.sql_db.db_connect('postgres').cursor() as cr, \
selectors.DefaultSelector() as sel:
cr.execute("listen imbus")
cr.commit()
conn = cr._cnx
sel.register(conn, selectors.EVENT_READ)
while not stop_event.is_set():
if sel.select(TIMEOUT):
conn.poll()
channels = []
while conn.notifies:
channels.extend(json.loads(conn.notifies.pop().payload))
# relay notifications to websockets that have
# subscribed to the corresponding channels.
websockets = set()
for channel in channels:
websockets.update(self._channels_to_ws.get(hashable(channel), []))
for websocket in websockets:
websocket.trigger_notification_dispatching()
def run(self):
while not stop_event.is_set():
try:
self.loop()
except Exception as exc:
if isinstance(exc, InterfaceError) and stop_event.is_set():
continue
_logger.exception("Bus.loop error, sleep and retry")
time.sleep(TIMEOUT)
# Partially undo a2ed3d3d5bdb6025a1ba14ad557a115a86413e65
# IMDispatch has a lazy start, so we could initialize it anyway
# And this avoids the Bus unavailable error messages
dispatch = ImDispatch()
stop_event = threading.Event()
CommonServer.on_stop(stop_event.set)
| 969579.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |
import codecs
import io
import json
import re
from pathlib import Path
import aiohttp
import PyPDF2
import yaml
from bs4 import BeautifulSoup
from fastapi import FastAPI, Query, Request, Response
from fastapi.middleware.cors import CORSMiddleware
from fastapi.openapi.utils import get_openapi
from fastapi.responses import JSONResponse
from loguru import logger
from starlette.responses import FileResponse
# In total, the text + image links + prompts should be <= 2048
CHAR_LIMIT = 1585 # TODO: increase these values after long-context support has been added
IMAGES_CHAR_LIMIT = 300
MAX_DOWNLOAD_SIZE = 4 * 1024 * 1024
MAX_CHUNK_SIZE = 1024 * 1024
IMAGES_SUFIX = """, and I will also include images formatted like this:
![](image url)
"""
folder_path = Path(__file__).parent
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
def extract_image_links(text: str):
image_pattern = r"https?://\S+\.(?:jpg|jpeg|png|gif|bmp|webp|svg)"
images = re.findall(image_pattern, text, flags=re.IGNORECASE | re.MULTILINE)
return images
def detect_content_type(content: bytes) -> str:
if content.startswith(b"%PDF-"):
return "application/pdf"
elif (content).lstrip().upper().startswith(b"<!DOCTYPE HTML") or content.startswith(b"<html"):
return "text/html"
elif content.startswith(b"{") or content.startswith(b"["):
try:
json.loads(content)
return "application/json"
except json.JSONDecodeError:
pass
elif content.startswith(b"---") or content.startswith(b"%YAML"):
try:
yaml.safe_load(content)
return "application/x-yaml"
except yaml.YAMLError:
pass
return "text/plain"
def limit_image_count(images, max_chars=300):
limited_images = []
current_length = 0
for url in images:
# Add the length of "http:" if the URL starts with "//"
url_length = len("http:") + len(url) if url.startswith("//") else len(url)
if current_length + url_length > max_chars:
break
if url.startswith("//"):
limited_images.append(f"http:{url}")
else:
limited_images.append(url)
current_length += url_length
return limited_images
def truncate_paragraphs(paragraphs, max_length):
truncated_paragraphs = []
current_length = 0
for paragraph in paragraphs:
if len(paragraph) == 0:
continue
paragraph = paragraph.strip()
if current_length + len(paragraph) <= max_length:
truncated_paragraphs.append(paragraph)
current_length += len(paragraph)
else:
remaining_length = max_length - current_length
truncated_paragraph = paragraph[:remaining_length]
truncated_paragraphs.append(truncated_paragraph)
break
return truncated_paragraphs
@app.get("/get-url-content/", operation_id="getUrlContent", summary="It will return a web page's or pdf's content")
async def get_url_content(url: str = Query(..., description="url to fetch content from")) -> Response:
try:
buffer = io.BytesIO()
encoding: str | None
content_type: str | None
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
response.raise_for_status() # Raise an exception for HTTP errors
try:
encoding = response.get_encoding()
except RuntimeError:
encoding = None
content_type = response.content_type
if response.content_length is not None and response.content_length > MAX_DOWNLOAD_SIZE:
error_message = (
f"Sorry, the file at {url} is too large.\nYou should report this message to the user!"
)
return JSONResponse(content={"error": error_message}, status_code=500)
async for chunk in response.content.iter_chunked(MAX_CHUNK_SIZE):
buffer.write(chunk)
if buffer.tell() > MAX_DOWNLOAD_SIZE:
error_message = (
f"Sorry, the file at {url} is too large.\nYou should report this message to the user!"
)
return JSONResponse(content={"error": error_message}, status_code=500)
content_bytes: bytes = buffer.getvalue()
if content_type is None or content_type == "application/octet-stream":
content_type = detect_content_type(content_bytes)
buffer.seek(0)
def decode_text() -> str:
decoder = codecs.getincrementaldecoder(encoding or "utf-8")(errors="replace")
return decoder.decode(content_bytes, True)
text = ""
images = []
if content_type == "application/pdf":
pdf_reader = PyPDF2.PdfReader(buffer)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
elif content_type == "text/html":
soup = BeautifulSoup(decode_text(), "html.parser")
paragraphs = [p.get_text(strip=True) for p in soup.find_all("p")]
# if there are no paragraphs, try to get text from divs
if not paragraphs:
paragraphs = [p.get_text(strip=True) for p in soup.find_all("div")]
# if there are no paragraphs or divs, try to get text from spans
if not paragraphs:
paragraphs = [p.get_text(strip=True) for p in soup.find_all("span")]
paragraphs = truncate_paragraphs(paragraphs, CHAR_LIMIT)
text = "\n".join(paragraphs)
for p in soup.find_all("p"):
parent = p.parent
images.extend([img["src"] for img in parent.find_all("img") if img.get("src")])
elif content_type == "application/json":
json_data = json.loads(decode_text())
text = yaml.dump(json_data, sort_keys=False, default_flow_style=False)
for _, value in json_data.items():
if isinstance(value, str):
images.extend(extract_image_links(value))
elif isinstance(value, list):
for item in value:
if isinstance(item, str):
images.extend(extract_image_links(item))
elif content_type == "text/plain":
text = decode_text()
images.extend(extract_image_links(text))
else:
error_message = f"Sorry, unsupported content type '{content_type}' at {url}.\nYou should report this message to the user!"
return JSONResponse(content={"error": error_message}, status_code=500)
images = [f"http:{url}" if url.startswith("//") else url for url in images]
images = limit_image_count(images, max_chars=IMAGES_CHAR_LIMIT)
if len(text) > CHAR_LIMIT:
text = text[:CHAR_LIMIT]
MULTILINE_SYM = "|" if content_type != "applicaion/json" else ""
text_yaml = f"text_content: {MULTILINE_SYM}\n"
for line in text.split("\n"):
text_yaml += f" {line}\n"
images_yaml = "images:\n" if len(images) > 0 else ""
for image in images:
images_yaml += f"- {image}\n"
yaml_text = f"{text_yaml}\n{images_yaml}"
text = f"""{yaml_text}
Thought: I now know the answer{IMAGES_SUFIX if len(images) > 0 else "."}
"""
return Response(content=text, media_type="text/plain")
except Exception as e:
logger.opt(exception=True).debug("web_retriever GET failed:")
error_message = f"Sorry, the url is not available. {e}\nYou should report this message to the user!"
return JSONResponse(content={"error": error_message}, status_code=500)
@app.get("/icon.png", include_in_schema=False)
async def api_icon():
return FileResponse(folder_path / "icon.png")
@app.get("/ai-plugin.json", include_in_schema=False)
async def api_ai_plugin(request: Request):
json_path = folder_path / "ai-plugin.json"
with json_path.open("r") as f:
ai_plugin_json = json.load(f)
base_url, _, _ = request.url._url.rpartition("/")
ai_plugin_json["logo_url"] = base_url + "/icon.png"
ai_plugin_json["api"]["url"] = base_url + "/openapi.json"
return Response(content=json.dumps(ai_plugin_json), media_type="application/json")
def custom_openapi():
if app.openapi_schema:
return app.openapi_schema
openapi_schema = get_openapi(
title="Web Retriever",
version="0.1",
routes=app.routes,
servers=[{"url": "/plugins/web_retriever"}],
)
openapi_schema["tags"] = [
{
"name": "web-retriever",
"description": "Use this plugin to retrieve web page and pdf content",
},
]
openapi_schema.pop("components", None)
app.openapi_schema = openapi_schema
return app.openapi_schema
app.openapi = custom_openapi
if __name__ == "__main__":
# simple built-in test
import asyncio
url = "https://huggingface.co/OpenAssistant/oasst-sft-1-pythia-12b"
x = asyncio.run(get_url_content(url))
print(x.status_code, x.body)
| 287641.py | [
"CWE-942: Permissive Cross-domain Policy with Untrusted Domains"
] |
"""
Basic FastAPI server to serve models using HuggingFace Transformers library.
This is an alternative to running the HuggingFace `text-generation-inference` (tgi) server.
"""
import sys
import threading
from queue import Queue
import fastapi
import hf_stopping
import hf_streamer
import interface
import torch
import transformers
import uvicorn
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from oasst_shared import model_configs
from settings import settings
from sse_starlette.sse import EventSourceResponse
app = fastapi.FastAPI()
DECODE_TOKEN = "<decode-token>"
# Allow CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.middleware("http")
async def log_exceptions(request: fastapi.Request, call_next):
try:
response = await call_next(request)
except Exception:
logger.exception("Exception in request")
raise
return response
model_loaded: bool = False
fully_loaded: bool = False
model_input_queue: Queue = Queue()
def model_thread():
"""Continually obtain new work requests from the model input queue and work on them."""
model: transformers.PreTrainedModel
tokenizer: transformers.PreTrainedTokenizer
model, tokenizer, decode_token = load_models()
request: interface.GenerateStreamRequest
output_queue: Queue
eos_token_id = tokenizer.eos_token_id if hasattr(tokenizer, "eos_token_id") else None
while True:
request, output_queue = model_input_queue.get()
try:
prompt = request.inputs
params = request.parameters.dict()
seed = params.pop("seed")
stop_sequences = params.pop("stop")
params.pop("details")
params.pop("plugins")
if seed is not None:
torch.manual_seed(seed)
last_token_id = None # need to delay by 1 to simulate tgi
def print_text(token_id: int):
nonlocal last_token_id
if last_token_id is not None:
text = decode_token(last_token_id)
stream_response = interface.GenerateStreamResponse(
token=interface.Token(text=text, id=last_token_id),
)
output_queue.put_nowait(stream_response)
last_token_id = token_id
with torch.no_grad():
ids = tokenizer.encode(prompt, return_tensors="pt", add_special_tokens=False)
streamer = hf_streamer.HFStreamer(input_ids=ids, printer=print_text)
ids = ids.to(model.device)
stopping_criteria = (
transformers.StoppingCriteriaList(
[hf_stopping.SequenceStoppingCriteria(tokenizer, stop_sequences, prompt)]
)
if stop_sequences
else None
)
output = model.generate(
ids,
**params,
streamer=streamer,
eos_token_id=eos_token_id,
stopping_criteria=stopping_criteria,
)
output = output.cpu()
output_ids = output[0][len(ids[0]) :]
decoded = tokenizer.decode(output_ids, skip_special_tokens=True)
stream_response = interface.GenerateStreamResponse(
token=interface.Token(
text=decode_token(last_token_id), # hack because the "normal" inference server does this at once
id=last_token_id,
),
generated_text=decoded.strip(),
details=interface.StreamDetails(
finish_reason="eos_token",
generated_tokens=len(output_ids),
seed=seed,
),
)
output_queue.put_nowait(stream_response)
except Exception as e:
logger.exception("Exception in model thread")
output_queue.put_nowait(interface.GenerateStreamResponse(error=str(e)))
def load_models():
global model_loaded
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
model_config = model_configs.MODEL_CONFIGS.get(settings.model_config_name)
if model_config is None:
logger.error(f"Unknown model config name: {settings.model_config_name}")
sys.exit(2)
hf_config = transformers.AutoConfig.from_pretrained(model_config.model_id)
logger.warning(f"Loading model {model_config.model_id}...")
tokenizer = transformers.AutoTokenizer.from_pretrained(model_config.model_id)
logger.warning(f"tokenizer {tokenizer.name_or_path} has vocab size {len(tokenizer)}")
# see `decode_token` method, taken from HF text-generation-inference
tokenizer.add_special_tokens({"additional_special_tokens": ["<decode-token>"]})
special_decode_token_id = tokenizer.convert_tokens_to_ids("<decode-token>")
special_decode_token_length = len("<decode-token>")
def decode_token(token_id):
result = tokenizer.decode([special_decode_token_id, token_id], skip_special_tokens=False)
# slice to remove special decode token
return result[special_decode_token_length:]
config_dtype = hf_config.torch_dtype if hasattr(hf_config, "torch_dtype") else torch.float32
dtype = torch.bfloat16 if torch.has_cuda and torch.cuda.is_bf16_supported() else config_dtype
model = transformers.AutoModelForCausalLM.from_pretrained(
model_config.model_id,
torch_dtype=dtype,
load_in_8bit=settings.quantize,
device_map="auto" if torch.cuda.is_available() else None,
).eval()
logger.warning("Model loaded, using it once...")
# warmup
with torch.no_grad():
text = "Hello, world"
tokens = tokenizer.encode(text, return_tensors="pt")
tokens = tokens.to(model.device)
model.generate(tokens, max_length=10, num_beams=1, do_sample=False)
model_loaded = True
return model, tokenizer, decode_token
@app.on_event("startup")
async def start_model_thread():
logger.warning("Starting model thread...")
threading.Thread(target=model_thread, daemon=True).start()
logger.warning("Model thread started")
@app.on_event("startup")
async def welcome_message():
global fully_loaded
logger.warning("Server started")
logger.warning("To stop the server, press Ctrl+C")
fully_loaded = True
@app.post("/generate_stream")
async def generate(
request: interface.GenerateStreamRequest,
):
def event_stream():
try:
output_queue: Queue = Queue()
model_input_queue.put_nowait((request, output_queue))
while True:
output = output_queue.get() # type: interface.GenerateStreamResponse
yield {"data": output.json()}
if output.is_end:
break
if output.is_error:
raise Exception(output.error)
except Exception as e:
logger.exception("Exception in event stream")
output_queue.put_nowait(interface.GenerateStreamResponse(error=str(e)))
raise
return EventSourceResponse(event_stream())
@app.get("/health")
async def health():
if not (fully_loaded and model_loaded):
raise fastapi.HTTPException(status_code=503, detail="Server not fully loaded")
return {"status": "ok"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| 672233.py | [
"CWE-942: Permissive Cross-domain Policy with Untrusted Domains"
] |
from argparse import Namespace
import pytest
import torch
from model_training.custom_datasets import get_one_dataset
from model_training.custom_datasets.formatting import (
QA_SPECIAL_TOKENS,
DatasetEntryRm,
Role,
Utterance,
create_dataset_entry_qa,
)
from model_training.custom_datasets.ranking_collator import RankingDataCollator
from model_training.utils.utils import get_tokenizer, match_tokenizer_name
from torch.utils.data import DataLoader
from transformers.models.auto.tokenization_auto import AutoTokenizer
@pytest.fixture
def pythia_tokenizer():
tokenizer = AutoTokenizer.from_pretrained("tests/resources/data_collator", local_files_only=True)
# for this test we use the pythia special tokens but note that this test is model agnostic
tokenizer_config = match_tokenizer_name("pythia")
tokenizer.add_special_tokens(
{
"pad_token": tokenizer_config.special_tokens.pad_token,
"eos_token": tokenizer_config.special_tokens.eos_token,
"sep_token": tokenizer_config.special_tokens.sep_token,
}
)
additional_special_tokens = list(QA_SPECIAL_TOKENS.values())
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
return tokenizer
def test_ranking_collator_system_tag(pythia_tokenizer):
first_example = DatasetEntryRm(
messages=[Utterance(text="First instruction.", role=Role.prompter, lang="en")],
replies=[
Utterance(text="Answer to first instruction.", role=Role.assistant, lang="en", quality=0.7),
Utterance(text="Answer to first instruction.", role=Role.assistant, lang="de", quality=0.8),
],
)
second_example = DatasetEntryRm(
messages=[Utterance(text="Second instruction.", role=Role.prompter)],
replies=[
Utterance(text="Answer to second instruction.", role=Role.assistant, humor=0.1, creativity=0.2),
Utterance(text="Answer to second instruction.", role=Role.assistant, humor=0.4, creativity=0.3),
],
)
examples = [first_example, second_example]
rdc = RankingDataCollator(tokenizer=pythia_tokenizer, padding=True)
batch, cu_lens = rdc(examples=examples)
assert len(batch) == 2
assert cu_lens == [0, len(first_example.replies), len(first_example.replies) + len(second_example.replies)]
assert batch.data["attention_mask"].shape[0] == 4 # we have 4 replies in total
assert batch.data["input_ids"].shape == batch.data["attention_mask"].shape
eos = pythia_tokenizer.eos_token
# check each instruction
first_example_first_answer_decoded = pythia_tokenizer.decode(batch.data["input_ids"][0])
f"{QA_SPECIAL_TOKENS['Question']}{first_example.messages[0].text}{eos}" in first_example_first_answer_decoded
f"{QA_SPECIAL_TOKENS['Answer']}{first_example.replies[0].text}{eos}" in first_example_first_answer_decoded
"lang: en" in first_example_first_answer_decoded
"quality: 0.7" in first_example_first_answer_decoded
first_example_second_answer_decoded = pythia_tokenizer.decode(batch.data["input_ids"][1])
f"{QA_SPECIAL_TOKENS['Question']}{first_example.messages[0].text}{eos}" in first_example_second_answer_decoded
f"{QA_SPECIAL_TOKENS['Answer']}{first_example.replies[1].text}{eos}" in first_example_second_answer_decoded
"lang: de" in first_example_second_answer_decoded
"quality: 0.8" in first_example_second_answer_decoded
second_example_first_answer_decoded = pythia_tokenizer.decode(batch.data["input_ids"][2])
f"{QA_SPECIAL_TOKENS['Question']}{second_example.messages[0].text}{eos}" in second_example_first_answer_decoded
f"{QA_SPECIAL_TOKENS['Answer']}{second_example.replies[0].text}{eos}" in second_example_first_answer_decoded
"humor: 0.1" in second_example_first_answer_decoded
"creativity: 0.2" in second_example_first_answer_decoded
second_example_second_answer_decoded = pythia_tokenizer.decode(batch.data["input_ids"][2])
f"{QA_SPECIAL_TOKENS['Question']}{second_example.messages[0].text}{eos}" in second_example_second_answer_decoded
f"{QA_SPECIAL_TOKENS['Answer']}{second_example.replies[1].text}{eos}" in second_example_second_answer_decoded
"humor: 0.4" in second_example_second_answer_decoded
"creativity: 0.3" in second_example_second_answer_decoded
def test_ranking_collator_no_messages(pythia_tokenizer):
first_messages = None
first_replies = [
"Response A to None",
"Response B to None",
"Response C to None",
]
examples = [(first_messages, first_replies)]
rdc = RankingDataCollator(tokenizer=pythia_tokenizer, padding=True)
eos = pythia_tokenizer.eos_token
examples_ds = [
DatasetEntryRm(messages=None, replies=[Utterance(text=r, role=Role.assistant) for r in first_replies])
]
# make sure that formatting via dataset entry and lists is the same
for ex in [examples, examples_ds]:
batch, cu_lens = rdc(examples=ex)
assert len(batch) == 2
assert cu_lens == [0, len(first_replies)]
assert batch.data["attention_mask"].shape[0] == 3 # we have 5 replies in total
assert batch.data["input_ids"].shape == batch.data["attention_mask"].shape
# check each instruction
assert pythia_tokenizer.decode(batch.data["input_ids"][0]) == f"{first_replies[0]}{eos}"
assert pythia_tokenizer.decode(batch.data["input_ids"][1]) == f"{first_replies[1]}{eos}"
assert pythia_tokenizer.decode(batch.data["input_ids"][2]) == f"{first_replies[2]}{eos}"
assert (batch.attention_mask == torch.where(batch.input_ids == 1, 0, 1)).all()
def test_ranking_collator_local(pythia_tokenizer):
first_messages = ["First Instruction."]
first_replies = [
"Response A to First Instruction",
"Response B to First Instruction",
"First Response C to First Instruction",
]
second_messages = ["Second Instruction."]
second_replies = ["Response A to Second Instruction", "Response B to Second Instruction"]
examples = [(first_messages, first_replies), (second_messages, second_replies)]
rdc = RankingDataCollator(tokenizer=pythia_tokenizer, padding=True)
eos = pythia_tokenizer.eos_token
pad = pythia_tokenizer.pad_token
examples_ds = [
create_dataset_entry_qa(mode="rm", questions=first_messages, answers=first_replies),
create_dataset_entry_qa(mode="rm", questions=second_messages, answers=second_replies),
]
# make sure that formatting via dataset entry and lists is the same
for ex in [examples, examples_ds]:
batch, cu_lens = rdc(examples=ex)
assert len(batch) == 2
assert cu_lens == [0, len(first_replies), len(first_replies) + len(second_replies)]
assert batch.data["attention_mask"].shape[0] == 5 # we have 5 replies in total
assert batch.data["input_ids"].shape == batch.data["attention_mask"].shape
# check each instruction
assert (
pythia_tokenizer.decode(batch.data["input_ids"][0])
== f"{QA_SPECIAL_TOKENS['Question']}{first_messages[0]}{eos}{QA_SPECIAL_TOKENS['Answer']}{first_replies[0]}{eos}"
+ 5 * pad
)
assert (
pythia_tokenizer.decode(batch.data["input_ids"][1])
== f"{QA_SPECIAL_TOKENS['Question']}{first_messages[0]}{eos}{QA_SPECIAL_TOKENS['Answer']}{first_replies[1]}{eos}"
+ 5 * pad
)
assert (
pythia_tokenizer.decode(batch.data["input_ids"][2])
== f"{QA_SPECIAL_TOKENS['Question']}{first_messages[0]}{eos}{QA_SPECIAL_TOKENS['Answer']}{first_replies[2]}{eos}"
)
assert (
pythia_tokenizer.decode(batch.data["input_ids"][3])
== f"{QA_SPECIAL_TOKENS['Question']}{second_messages[0]}{eos}{QA_SPECIAL_TOKENS['Answer']}{second_replies[0]}{eos}"
+ 4 * pad
)
assert (
pythia_tokenizer.decode(batch.data["input_ids"][4])
== f"{QA_SPECIAL_TOKENS['Question']}{second_messages[0]}{eos}{QA_SPECIAL_TOKENS['Answer']}{second_replies[1]}{eos}"
+ 4 * pad
)
assert (batch.attention_mask == torch.where(batch.input_ids == 1, 0, 1)).all()
@pytest.mark.skip(reason="manual")
def test_rm_datasets():
# dummy configuration
config = Namespace(cache_dir=".cache", model_name="EleutherAI/pythia-70m-deduped")
dataset_names = ["anthropic_rlhf", "hf_summary_pairs", "webgpt", "hellaswag", "shp", "hf_summary"]
for name in dataset_names:
train, val = get_one_dataset(conf=config, dataset_name=name, mode="rm")
print(f"dataset: '{name}' (train ({type(train)}): {len(train)}, val({type(val)}): {len(val)})")
avg_number_continuations = sum(len(x[1]) for x in train) / len(train)
num_more_than_two = sum(1 if len(x[1]) > 2 else 0 for x in train)
print(f"Average number of continuations: {avg_number_continuations} (with >2: {num_more_than_two})")
for i in range(10):
item = train[i + 100]
print(f"[{i}] Prefix: {item[0]}")
continuations = item[1]
print(f"[{i}] Continuations ({len(continuations)}):")
for j, c in enumerate(continuations):
print(f"[{i}.{j}]: {c}")
@pytest.mark.skip(reason="manual")
def test_ranking_collator():
# dummy configuration
config = Namespace(cache_dir=".cache", model_name="EleutherAI/pythia-70m-deduped")
# get a tokenizer
tokenizer = get_tokenizer(config)
print(type(tokenizer))
# load oasst dataset
kwargs = {"lang": "en,es,de,fr", "input_file_path": "2023-03-13_oasst_ready_labels.jsonl.gz", "mode": "rm"}
train, val = get_one_dataset(conf=config, dataset_name="oasst_export", **kwargs)
print(len(train))
a = train[0]
print(type(a))
print(len(a))
print("prefix", a[0])
print("continuations", a[1])
# create RankingCollator
ranking_collator = RankingDataCollator(tokenizer=tokenizer)
dl = DataLoader(
train,
batch_size=4,
collate_fn=ranking_collator,
num_workers=1,
pin_memory=False,
)
data_iter = iter(dl)
b = next(data_iter)
x, y = b
input_ids = x.input_ids
attention_mask = x.attention_mask
print("input_ids", input_ids.shape)
print("attention_mask", attention_mask.shape)
print("input_ids[0, :200]", input_ids[0, :200])
print("decoded input_ids[0, :200]:", tokenizer.decode(input_ids[0, :200]))
print("decoded non masked input_ids[0]:", tokenizer.decode(input_ids[0][x.attention_mask[0] == 1]))
print(y)
if __name__ == "__main__":
test_rm_datasets()
# test_ranking_collator()
| 230568.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
import dataclasses
import gc
import glob
import os
from accelerate import init_empty_weights
from accelerate.utils import set_module_tensor_to_device
from huggingface_hub import snapshot_download
import torch
from torch import Tensor
from torch.nn import functional as F
import torch.nn as nn
from tqdm import tqdm
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
AutoModel,
AutoModelForSeq2SeqLM,
)
@dataclasses.dataclass
class CompressionConfig:
"""Group-wise quantization."""
num_bits: int
group_size: int
group_dim: int
symmetric: bool
enabled: bool = True
default_compression_config = CompressionConfig(
num_bits=8, group_size=256, group_dim=1, symmetric=True, enabled=True
)
class CLinear(nn.Module):
"""Compressed Linear Layer."""
def __init__(self, weight=None, bias=None, device=None):
super().__init__()
if weight is None:
self.weight = None
elif isinstance(weight, Tensor):
self.weight = compress(weight.data.to(device), default_compression_config)
else:
self.weight = weight
self.bias = bias
def forward(self, input: Tensor) -> Tensor:
weight = decompress(self.weight, default_compression_config)
if self.bias is None:
return F.linear(input.to(weight.dtype), weight)
return F.linear(input.to(weight.dtype), weight, self.bias.to(weight.dtype))
def compress_module(module, target_device):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
setattr(
module,
attr_str,
CLinear(target_attr.weight, target_attr.bias, target_device),
)
for name, child in module.named_children():
compress_module(child, target_device)
def get_compressed_list(module, prefix=""):
compressed_list = []
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
full_name = (
f"{prefix}.{attr_str}.weight" if prefix else f"{attr_str}.weight"
)
compressed_list.append(full_name)
for name, child in module.named_children():
child_prefix = f"{prefix}.{name}" if prefix else name
for each in get_compressed_list(child, child_prefix):
compressed_list.append(each)
return compressed_list
def apply_compressed_weight(module, compressed_state_dict, target_device, prefix=""):
for attr_str in dir(module):
target_attr = getattr(module, attr_str)
if type(target_attr) == torch.nn.Linear:
full_name = (
f"{prefix}.{attr_str}.weight" if prefix else f"{attr_str}.weight"
)
setattr(
module,
attr_str,
CLinear(
compressed_state_dict[full_name], target_attr.bias, target_device
),
)
for name, child in module.named_children():
child_prefix = f"{prefix}.{name}" if prefix else name
apply_compressed_weight(
child, compressed_state_dict, target_device, child_prefix
)
def load_compress_model(model_path, device, torch_dtype, use_fast, revision="main"):
# partially load model
# `use_fast=True`` is not supported for some models.
try:
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=use_fast, revision=revision, trust_remote_code=True
)
except TypeError:
tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=~use_fast, revision=revision, trust_remote_code=True
)
with init_empty_weights():
# `trust_remote_code` should be set as `True` for both AutoConfig and AutoModel
config = AutoConfig.from_pretrained(
model_path,
low_cpu_mem_usage=True,
torch_dtype=torch_dtype,
trust_remote_code=True,
revision=revision,
)
# some models are loaded by AutoModel but not AutoModelForCausalLM,
# such as chatglm, chatglm2
try:
# google/flan-* models are based on an AutoModelForSeq2SeqLM.
if "T5Config" in str(type(config)):
model = AutoModelForSeq2SeqLM.from_config(
config, trust_remote_code=True
)
else:
model = AutoModelForCausalLM.from_config(config, trust_remote_code=True)
except NameError:
model = AutoModel.from_config(config, trust_remote_code=True)
linear_weights = get_compressed_list(model)
if os.path.exists(model_path):
# `model_path` is a local folder
base_pattern = os.path.join(model_path, "pytorch_model*.bin")
else:
# `model_path` is a cached Hugging Face repo
# We don't necessarily need to download the model' repo again if there is a cache.
# So check the default huggingface cache first.
model_path_temp = os.path.join(
os.path.expanduser("~"),
".cache/huggingface/hub",
"models--" + model_path.replace("/", "--"),
"snapshots/",
)
downloaded = False
if os.path.exists(model_path_temp):
temp_last_dir = os.listdir(model_path_temp)[-1]
model_path_temp = os.path.join(model_path_temp, temp_last_dir)
base_pattern = os.path.join(model_path_temp, "pytorch_model*.bin")
files = glob.glob(base_pattern)
if len(files) > 0:
downloaded = True
if downloaded:
model_path = model_path_temp
else:
model_path = snapshot_download(model_path, revision=revision)
base_pattern = os.path.join(model_path, "pytorch_model*.bin")
files = glob.glob(base_pattern)
use_safetensors = False
if len(files) == 0:
base_pattern = os.path.join(model_path, "*.safetensors")
files = glob.glob(base_pattern)
use_safetensors = True
if len(files) == 0:
raise ValueError(
f"Cannot find any model weight files. "
f"Please check your (cached) weight path: {model_path}"
)
compressed_state_dict = {}
if use_safetensors:
from safetensors.torch import load_file
for filename in tqdm(files):
if use_safetensors:
tmp_state_dict = load_file(filename)
else:
tmp_state_dict = torch.load(
filename, map_location=lambda storage, loc: storage
)
for name in tmp_state_dict:
if name in linear_weights:
tensor = tmp_state_dict[name].to(device, dtype=torch_dtype)
compressed_state_dict[name] = compress(
tensor, default_compression_config
)
else:
compressed_state_dict[name] = tmp_state_dict[name].to(
device, dtype=torch_dtype
)
tmp_state_dict[name] = None
tensor = None
gc.collect()
torch.cuda.empty_cache()
if device == "xpu":
torch.xpu.empty_cache()
if device == "npu":
torch.npu.empty_cache()
for name in model.state_dict():
if name not in linear_weights:
set_module_tensor_to_device(
model, name, device, value=compressed_state_dict[name]
)
apply_compressed_weight(model, compressed_state_dict, device)
if torch_dtype == torch.float16:
model.half()
model.to(device)
model.eval()
return model, tokenizer
def compress(tensor, config):
"""Simulate group-wise quantization."""
if not config.enabled:
return tensor
group_size, num_bits, group_dim, symmetric = (
config.group_size,
config.num_bits,
config.group_dim,
config.symmetric,
)
assert num_bits <= 8
original_shape = tensor.shape
num_groups = (original_shape[group_dim] + group_size - 1) // group_size
new_shape = (
original_shape[:group_dim]
+ (num_groups, group_size)
+ original_shape[group_dim + 1 :]
)
# Pad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len != 0:
pad_shape = (
original_shape[:group_dim] + (pad_len,) + original_shape[group_dim + 1 :]
)
tensor = torch.cat(
[tensor, torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)],
dim=group_dim,
)
data = tensor.view(new_shape)
# Quantize
if symmetric:
B = 2 ** (num_bits - 1) - 1
scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0]
data = data * scale
data = data.clamp_(-B, B).round_().to(torch.int8)
return data, scale, original_shape
else:
B = 2**num_bits - 1
mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0]
mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0]
scale = B / (mx - mn)
data = data - mn
data.mul_(scale)
data = data.clamp_(0, B).round_().to(torch.uint8)
return data, mn, scale, original_shape
def decompress(packed_data, config):
"""Simulate group-wise dequantization."""
if not config.enabled:
return packed_data
group_size, num_bits, group_dim, symmetric = (
config.group_size,
config.num_bits,
config.group_dim,
config.symmetric,
)
# Dequantize
if symmetric:
data, scale, original_shape = packed_data
data = data / scale
else:
data, mn, scale, original_shape = packed_data
data = data / scale
data.add_(mn)
# Unpad
pad_len = (group_size - original_shape[group_dim] % group_size) % group_size
if pad_len:
padded_original_shape = (
original_shape[:group_dim]
+ (original_shape[group_dim] + pad_len,)
+ original_shape[group_dim + 1 :]
)
data = data.reshape(padded_original_shape)
indices = [slice(0, x) for x in original_shape]
return data[indices].contiguous()
else:
return data.view(original_shape)
| 914791.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
__version__ = "3.0.0.dev0"
import os
import sys
import warnings
from typing import TYPE_CHECKING
if os.environ.get("_AIRFLOW_PATCH_GEVENT"):
# If you are using gevents and start airflow webserver, you might want to run gevent monkeypatching
# as one of the first thing when Airflow is started. This allows gevent to patch networking and other
# system libraries to make them gevent-compatible before anything else patches them (for example boto)
from gevent.monkey import patch_all
patch_all()
if sys.platform == "win32":
warnings.warn(
"Airflow currently can be run on POSIX-compliant Operating Systems. For development, "
"it is regularly tested on fairly modern Linux Distros and recent versions of macOS. "
"On Windows you can run it via WSL2 (Windows Subsystem for Linux 2) or via Linux Containers. "
"The work to add Windows support is tracked via https://github.com/apache/airflow/issues/10388, "
"but it is not a high priority.",
category=RuntimeWarning,
stacklevel=1,
)
# The configuration module initializes and validates the conf object as a side effect the first
# time it is imported. If it is not imported before importing the settings module, the conf
# object will then be initted/validated as a side effect of it being imported in settings,
# however this can cause issues since those modules are very tightly coupled and can
# very easily cause import cycles in the conf init/validate code (since downstream code from
# those functions likely import settings).
# configuration is therefore initted early here, simply by importing it.
from airflow import configuration, settings
__all__ = [
"__version__",
"DAG",
"Dataset",
"XComArg",
]
# Make `airflow` a namespace package, supporting installing
# airflow.providers.* in different locations (i.e. one in site, and one in user
# lib.)
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
# Perform side-effects unless someone has explicitly opted out before import
# WARNING: DO NOT USE THIS UNLESS YOU REALLY KNOW WHAT YOU'RE DOING.
# This environment variable prevents proper initialization, and things like
# configs, logging, the ORM, etc. will be broken. It is only useful if you only
# access certain trivial constants and free functions (e.g. `__version__`).
if not os.environ.get("_AIRFLOW__AS_LIBRARY", None):
settings.initialize()
# Things to lazy import in form {local_name: ('target_module', 'target_name', 'deprecated')}
__lazy_imports: dict[str, tuple[str, str, bool]] = {
"DAG": (".models.dag", "DAG", False),
"Dataset": (".datasets", "Dataset", False),
"XComArg": (".models.xcom_arg", "XComArg", False),
"version": (".version", "", False),
# Deprecated lazy imports
"AirflowException": (".exceptions", "AirflowException", True),
}
if TYPE_CHECKING:
# These objects are imported by PEP-562, however, static analyzers and IDE's
# have no idea about typing of these objects.
# Add it under TYPE_CHECKING block should help with it.
from airflow.models.dag import DAG
from airflow.models.dataset import Dataset
from airflow.models.xcom_arg import XComArg
def __getattr__(name: str):
# PEP-562: Lazy loaded attributes on python modules
module_path, attr_name, deprecated = __lazy_imports.get(name, ("", "", False))
if not module_path:
if name.startswith("PY3") and (py_minor := name[3:]) in ("6", "7", "8", "9", "10", "11", "12"):
warnings.warn(
f"Python version constraint {name!r} is deprecated and will be removed in the future. "
f"Please get version info from the 'sys.version_info'.",
DeprecationWarning,
stacklevel=2,
)
return sys.version_info >= (3, int(py_minor))
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
elif deprecated:
warnings.warn(
f"Import {name!r} directly from the airflow module is deprecated and "
f"will be removed in the future. Please import it from 'airflow{module_path}.{attr_name}'.",
DeprecationWarning,
stacklevel=2,
)
import importlib
mod = importlib.import_module(module_path, __name__)
if attr_name:
val = getattr(mod, attr_name)
else:
val = mod
# Store for next time
globals()[name] = val
return val
if not settings.LAZY_LOAD_PROVIDERS:
from airflow.providers_manager import ProvidersManager
manager = ProvidersManager()
manager.initialize_providers_list()
manager.initialize_providers_hooks()
manager.initialize_providers_extra_links()
if not settings.LAZY_LOAD_PLUGINS:
from airflow import plugins_manager
plugins_manager.ensure_plugins_loaded()
| 023226.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.exceptions import RemovedInAirflow3Warning
from airflow.utils.airflow_flask_app import get_airflow_app
#
# Copyright (c) 2013, Michael Komitee
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Kerberos authentication module"""
import logging
import os
from functools import wraps
from typing import TYPE_CHECKING, Callable, NamedTuple, TypeVar, cast
import kerberos
from flask import Response, g, make_response, request
from airflow.configuration import conf
from airflow.utils.net import getfqdn
if TYPE_CHECKING:
from airflow.auth.managers.models.base_user import BaseUser
log = logging.getLogger(__name__)
class KerberosService:
"""Class to keep information about the Kerberos Service initialized."""
def __init__(self):
self.service_name = None
class _KerberosAuth(NamedTuple):
return_code: int | None
user: str = ""
token: str | None = None
# Stores currently initialized Kerberos Service
_KERBEROS_SERVICE = KerberosService()
def init_app(app):
"""Initialize application with kerberos."""
hostname = app.config.get("SERVER_NAME")
if not hostname:
hostname = getfqdn()
log.info("Kerberos: hostname %s", hostname)
service = "airflow"
_KERBEROS_SERVICE.service_name = f"{service}@{hostname}"
if "KRB5_KTNAME" not in os.environ:
os.environ["KRB5_KTNAME"] = conf.get("kerberos", "keytab")
try:
log.info("Kerberos init: %s %s", service, hostname)
principal = kerberos.getServerPrincipalDetails(service, hostname)
except kerberos.KrbError as err:
log.warning("Kerberos: %s", err)
else:
log.info("Kerberos API: server is %s", principal)
def _unauthorized():
"""Indicate that authorization is required."""
return Response("Unauthorized", 401, {"WWW-Authenticate": "Negotiate"})
def _forbidden():
return Response("Forbidden", 403)
def _gssapi_authenticate(token) -> _KerberosAuth | None:
state = None
try:
return_code, state = kerberos.authGSSServerInit(_KERBEROS_SERVICE.service_name)
if return_code != kerberos.AUTH_GSS_COMPLETE:
return _KerberosAuth(return_code=None)
if (return_code := kerberos.authGSSServerStep(state, token)) == kerberos.AUTH_GSS_COMPLETE:
return _KerberosAuth(
return_code=return_code,
user=kerberos.authGSSServerUserName(state),
token=kerberos.authGSSServerResponse(state),
)
elif return_code == kerberos.AUTH_GSS_CONTINUE:
return _KerberosAuth(return_code=return_code)
return _KerberosAuth(return_code=return_code)
except kerberos.GSSError:
return _KerberosAuth(return_code=None)
finally:
if state:
kerberos.authGSSServerClean(state)
T = TypeVar("T", bound=Callable)
def requires_authentication(function: T, find_user: Callable[[str], BaseUser] | None = None):
"""Decorate functions that require authentication with Kerberos."""
if not find_user:
warnings.warn(
"This module is deprecated. Please use "
"`airflow.providers.fab.auth_manager.api.auth.backend.kerberos_auth` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
find_user = get_airflow_app().appbuilder.sm.find_user
@wraps(function)
def decorated(*args, **kwargs):
header = request.headers.get("Authorization")
if header:
token = "".join(header.split()[1:])
auth = _gssapi_authenticate(token)
if auth.return_code == kerberos.AUTH_GSS_COMPLETE:
g.user = find_user(auth.user)
response = function(*args, **kwargs)
response = make_response(response)
if auth.token is not None:
response.headers["WWW-Authenticate"] = f"negotiate {auth.token}"
return response
elif auth.return_code != kerberos.AUTH_GSS_CONTINUE:
return _forbidden()
return _unauthorized()
return cast(T, decorated)
def __getattr__(name):
# PEP-562: Lazy loaded attributes on python modules
if name != "CLIENT_AUTH":
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
from requests_kerberos import HTTPKerberosAuth
val = HTTPKerberosAuth(service="airflow")
# Store for next time
globals()[name] = val
return val
| 218664.py | [
"CWE-79: Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')"
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Internal API command."""
from __future__ import annotations
import logging
import os
import signal
import subprocess
import sys
import textwrap
from contextlib import suppress
from pathlib import Path
from tempfile import gettempdir
from time import sleep
import psutil
from flask import Flask
from flask_appbuilder import SQLA
from flask_caching import Cache
from flask_wtf.csrf import CSRFProtect
from lockfile.pidlockfile import read_pid_from_pidfile
from sqlalchemy.engine.url import make_url
from airflow import settings
from airflow.api_internal.internal_api_call import InternalApiConfig
from airflow.cli.commands.daemon_utils import run_command_with_daemon_option
from airflow.cli.commands.webserver_command import GunicornMonitor
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.logging_config import configure_logging
from airflow.models import import_all_models
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations
from airflow.utils.providers_configuration_loader import providers_configuration_loaded
from airflow.www.extensions.init_dagbag import init_dagbag
from airflow.www.extensions.init_jinja_globals import init_jinja_globals
from airflow.www.extensions.init_manifest_files import configure_manifest_files
from airflow.www.extensions.init_security import init_xframe_protection
from airflow.www.extensions.init_views import init_api_internal, init_error_handlers
log = logging.getLogger(__name__)
app: Flask | None = None
@cli_utils.action_cli
@providers_configuration_loaded
def internal_api(args):
"""Start Airflow Internal API."""
print(settings.HEADER)
access_logfile = args.access_logfile or "-"
error_logfile = args.error_logfile or "-"
access_logformat = args.access_logformat
num_workers = args.workers
worker_timeout = args.worker_timeout
if args.debug:
log.info("Starting the Internal API server on port %s and host %s.", args.port, args.hostname)
app = create_app(testing=conf.getboolean("core", "unit_test_mode"))
app.run(
debug=True, # nosec
use_reloader=not app.config["TESTING"],
port=args.port,
host=args.hostname,
)
else:
log.info(
textwrap.dedent(
f"""\
Running the Gunicorn Server with:
Workers: {num_workers} {args.workerclass}
Host: {args.hostname}:{args.port}
Timeout: {worker_timeout}
Logfiles: {access_logfile} {error_logfile}
Access Logformat: {access_logformat}
================================================================="""
)
)
pid_file, _, _, _ = setup_locations("internal-api", pid=args.pid)
run_args = [
sys.executable,
"-m",
"gunicorn",
"--workers",
str(num_workers),
"--worker-class",
str(args.workerclass),
"--timeout",
str(worker_timeout),
"--bind",
args.hostname + ":" + str(args.port),
"--name",
"airflow-internal-api",
"--pid",
pid_file,
"--access-logfile",
str(access_logfile),
"--error-logfile",
str(error_logfile),
"--config",
"python:airflow.api_internal.gunicorn_config",
]
if args.access_logformat and args.access_logformat.strip():
run_args += ["--access-logformat", str(args.access_logformat)]
if args.daemon:
run_args += ["--daemon"]
run_args += ["airflow.cli.commands.internal_api_command:cached_app()"]
# To prevent different workers creating the web app and
# all writing to the database at the same time, we use the --preload option.
# With the preload option, the app is loaded before the workers are forked, and each worker will
# then have a copy of the app
run_args += ["--preload"]
def kill_proc(signum: int, gunicorn_master_proc: psutil.Process | subprocess.Popen):
log.info("Received signal: %s. Closing gunicorn.", signum)
gunicorn_master_proc.terminate()
with suppress(TimeoutError):
gunicorn_master_proc.wait(timeout=30)
if isinstance(gunicorn_master_proc, subprocess.Popen):
still_running = gunicorn_master_proc.poll() is not None
else:
still_running = gunicorn_master_proc.is_running()
if still_running:
gunicorn_master_proc.kill()
sys.exit(0)
def monitor_gunicorn(gunicorn_master_proc: psutil.Process | subprocess.Popen):
# Register signal handlers
signal.signal(signal.SIGINT, lambda signum, _: kill_proc(signum, gunicorn_master_proc))
signal.signal(signal.SIGTERM, lambda signum, _: kill_proc(signum, gunicorn_master_proc))
# These run forever until SIG{INT, TERM, KILL, ...} signal is sent
GunicornMonitor(
gunicorn_master_pid=gunicorn_master_proc.pid,
num_workers_expected=num_workers,
master_timeout=120,
worker_refresh_interval=30,
worker_refresh_batch_size=1,
reload_on_plugin_change=False,
).start()
def start_and_monitor_gunicorn(args):
if args.daemon:
subprocess.Popen(run_args, close_fds=True)
# Reading pid of gunicorn master as it will be different that
# the one of process spawned above.
gunicorn_master_proc_pid = None
while not gunicorn_master_proc_pid:
sleep(0.1)
gunicorn_master_proc_pid = read_pid_from_pidfile(pid_file)
# Run Gunicorn monitor
gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid)
monitor_gunicorn(gunicorn_master_proc)
else:
with subprocess.Popen(run_args, close_fds=True) as gunicorn_master_proc:
monitor_gunicorn(gunicorn_master_proc)
if args.daemon:
# This makes possible errors get reported before daemonization
os.environ["SKIP_DAGS_PARSING"] = "True"
create_app(None)
os.environ.pop("SKIP_DAGS_PARSING")
pid_file_path = Path(pid_file)
monitor_pid_file = str(pid_file_path.with_name(f"{pid_file_path.stem}-monitor{pid_file_path.suffix}"))
run_command_with_daemon_option(
args=args,
process_name="internal-api",
callback=lambda: start_and_monitor_gunicorn(args),
should_setup_logging=True,
pid_file=monitor_pid_file,
)
def create_app(config=None, testing=False):
"""Create a new instance of Airflow Internal API app."""
flask_app = Flask(__name__)
flask_app.config["APP_NAME"] = "Airflow Internal API"
flask_app.config["TESTING"] = testing
flask_app.config["SQLALCHEMY_DATABASE_URI"] = conf.get("database", "SQL_ALCHEMY_CONN")
url = make_url(flask_app.config["SQLALCHEMY_DATABASE_URI"])
if url.drivername == "sqlite" and url.database and not url.database.startswith("/"):
raise AirflowConfigException(
f'Cannot use relative path: `{conf.get("database", "SQL_ALCHEMY_CONN")}` to connect to sqlite. '
"Please use absolute path such as `sqlite:////tmp/airflow.db`."
)
flask_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
flask_app.config["SESSION_COOKIE_HTTPONLY"] = True
flask_app.config["SESSION_COOKIE_SAMESITE"] = "Lax"
if config:
flask_app.config.from_mapping(config)
if "SQLALCHEMY_ENGINE_OPTIONS" not in flask_app.config:
flask_app.config["SQLALCHEMY_ENGINE_OPTIONS"] = settings.prepare_engine_args()
if conf.getboolean("core", "database_access_isolation", fallback=False):
InternalApiConfig.set_use_database_access("Gunicorn worker initialization")
else:
raise AirflowConfigException(
"The internal-api component should only be run when database_access_isolation is enabled."
)
csrf = CSRFProtect()
csrf.init_app(flask_app)
db = SQLA()
db.session = settings.Session
db.init_app(flask_app)
init_dagbag(flask_app)
cache_config = {"CACHE_TYPE": "flask_caching.backends.filesystem", "CACHE_DIR": gettempdir()}
Cache(app=flask_app, config=cache_config)
configure_logging()
configure_manifest_files(flask_app)
import_all_models()
with flask_app.app_context():
init_error_handlers(flask_app)
init_api_internal(flask_app, standalone_api=True)
init_jinja_globals(flask_app)
init_xframe_protection(flask_app)
return flask_app
def cached_app(config=None, testing=False):
"""Return cached instance of Airflow Internal API app."""
global app
if not app:
app = create_app(config=config, testing=testing)
return app
| 409624.py | [
"CWE-489: Active Debug Code"
] |
# Copyright (c) SenseTime Research. All rights reserved.
import os
import argparse
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from utils.ImagesDataset import ImagesDataset
import cv2
import time
import copy
import imutils
# for openpose body keypoint detector : # (src:https://github.com/Hzzone/pytorch-openpose)
from openpose.src import util
from openpose.src.body import Body
# for paddlepaddle human segmentation : #(src: https://github.com/PaddlePaddle/PaddleSeg/blob/release/2.5/contrib/PP-HumanSeg/)
from PP_HumanSeg.deploy.infer import Predictor as PP_HumenSeg_Predictor
import math
def angle_between_points(p0,p1,p2):
if p0[1]==-1 or p1[1]==-1 or p2[1]==-1:
return -1
a = (p1[0]-p0[0])**2 + (p1[1]-p0[1])**2
b = (p1[0]-p2[0])**2 + (p1[1]-p2[1])**2
c = (p2[0]-p0[0])**2 + (p2[1]-p0[1])**2
if a * b == 0:
return -1
return math.acos((a+b-c) / math.sqrt(4*a*b)) * 180 / math.pi
def crop_img_with_padding(img, keypoints, rect):
person_xmin,person_xmax, ymin, ymax= rect
img_h,img_w,_ = img.shape ## find body center using keypoints
middle_shoulder_x = keypoints[1][0]
middle_hip_x = (keypoints[8][0] + keypoints[11][0]) // 2
mid_x = (middle_hip_x + middle_shoulder_x) // 2
mid_y = (ymin + ymax) // 2
## find which side (l or r) is further than center x, use the further side
if abs(mid_x-person_xmin) > abs(person_xmax-mid_x): #left further
xmin = person_xmin
xmax = mid_x + (mid_x-person_xmin)
else:
############### may be negtive
### in this case, the script won't output any image, leave the case like this
### since we don't want to pad human body
xmin = mid_x - (person_xmax-mid_x)
xmax = person_xmax
w = xmax - xmin
h = ymax - ymin
## pad rectangle to w:h = 1:2 ## calculate desired border length
if h / w >= 2: #pad horizontally
target_w = h // 2
xmin_prime = int(mid_x - target_w / 2)
xmax_prime = int(mid_x + target_w / 2)
if xmin_prime < 0:
pad_left = abs(xmin_prime)# - xmin
xmin = 0
else:
pad_left = 0
xmin = xmin_prime
if xmax_prime > img_w:
pad_right = xmax_prime - img_w
xmax = img_w
else:
pad_right = 0
xmax = xmax_prime
cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
im_pad = cv2.copyMakeBorder(cropped_img, 0, 0, int(pad_left), int(pad_right), cv2.BORDER_REPLICATE)
else: #pad vertically
target_h = w * 2
ymin_prime = mid_y - (target_h / 2)
ymax_prime = mid_y + (target_h / 2)
if ymin_prime < 0:
pad_up = abs(ymin_prime)# - ymin
ymin = 0
else:
pad_up = 0
ymin = ymin_prime
if ymax_prime > img_h:
pad_down = ymax_prime - img_h
ymax = img_h
else:
pad_down = 0
ymax = ymax_prime
print(ymin,ymax, xmin,xmax, img.shape)
cropped_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
im_pad = cv2.copyMakeBorder(cropped_img, int(pad_up), int(pad_down), 0,
0, cv2.BORDER_REPLICATE)
result = cv2.resize(im_pad,(512,1024),interpolation = cv2.INTER_AREA)
return result
def run(args):
os.makedirs(args.output_folder, exist_ok=True)
dataset = ImagesDataset(args.image_folder, transforms.Compose([transforms.ToTensor()]))
dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
body_estimation = Body('openpose/model/body_pose_model.pth')
total = len(dataloader)
print('Num of dataloader : ', total)
os.makedirs(f'{args.output_folder}', exist_ok=True)
# os.makedirs(f'{args.output_folder}/middle_result', exist_ok=True)
## initialzide HumenSeg
human_seg_args = {}
human_seg_args['cfg'] = 'PP_HumanSeg/export_model/deeplabv3p_resnet50_os8_humanseg_512x512_100k_with_softmax/deploy.yaml'
human_seg_args['input_shape'] = [1024,512]
human_seg_args['save_dir'] = args.output_folder
human_seg_args['soft_predict'] = False
human_seg_args['use_gpu'] = True
human_seg_args['test_speed'] = False
human_seg_args['use_optic_flow'] = False
human_seg_args['add_argmax'] = True
human_seg_args= argparse.Namespace(**human_seg_args)
human_seg = PP_HumenSeg_Predictor(human_seg_args)
from tqdm import tqdm
for fname, image in tqdm(dataloader):
# try:
## tensor to numpy image
fname = fname[0]
print(f'Processing \'{fname}\'.')
image = (image.permute(0, 2, 3, 1) * 255).clamp(0, 255)
image = image.squeeze(0).numpy() # --> tensor to numpy, (H,W,C)
# avoid super high res img
if image.shape[0] >= 2000: # height ### for shein image
ratio = image.shape[0]/1200 #height
dim = (int(image.shape[1]/ratio),1200)#(width, height)
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
## create segmentation
# mybg = cv2.imread('mybg.png')
comb, segmentation, bg, ori_img = human_seg.run(image,None) #mybg)
# cv2.imwrite('comb.png',comb) # [0,255]
# cv2.imwrite('alpha.png',segmentation*255) # segmentation [0,1] --> [0.255]
# cv2.imwrite('bg.png',bg) #[0,255]
# cv2.imwrite('ori_img.png',ori_img) # [0,255]
masks_np = (segmentation* 255)# .byte().cpu().numpy() #1024,512,1
mask0_np = masks_np[:,:,0].astype(np.uint8)#[0, :, :]
contours = cv2.findContours(mask0_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(contours)
c = max(cnts, key=cv2.contourArea)
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
extBot = list(extBot)
extTop = list(extTop)
pad_range = int((extBot[1]-extTop[1])*0.05)
if (int(extTop[1])<=5 and int(extTop[1])>0) and (comb.shape[0]>int(extBot[1]) and int(extBot[1])>=comb.shape[0]-5): #seg mask already reaches to the edge
#pad with pure white, top 100 px, bottom 100 px
comb= cv2.copyMakeBorder(comb,pad_range+5,pad_range+5,0,0,cv2.BORDER_CONSTANT,value=[255,255,255])
elif int(extTop[1])<=0 or int(extBot[1])>=comb.shape[0]:
print('PAD: body out of boundary', fname) #should not happened
return {}
else:
comb = cv2.copyMakeBorder(comb, pad_range+5, pad_range+5, 0, 0, cv2.BORDER_REPLICATE) #105 instead of 100: give some extra space
extBot[1] = extBot[1] + pad_range+5
extTop[1] = extTop[1] + pad_range+5
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extLeft = list(extLeft)
extRight = list(extRight)
person_ymin = int(extTop[1])-pad_range # 100
person_ymax = int(extBot[1])+pad_range # 100 #height
if person_ymin<0 or person_ymax>comb.shape[0]: # out of range
return {}
person_xmin = int(extLeft[0])
person_xmax = int(extRight[0])
rect = [person_xmin,person_xmax,person_ymin, person_ymax]
# recimg = copy.deepcopy(comb)
# cv2.rectangle(recimg,(person_xmin,person_ymin),(person_xmax,person_ymax),(0,255,0),2)
# cv2.imwrite(f'{args.output_folder}/middle_result/{fname}_rec.png',recimg)
## detect keypoints
keypoints, subset = body_estimation(comb)
# print(keypoints, subset, len(subset))
if len(subset) != 1 or (len(subset)==1 and subset[0][-1]<15):
print(f'Processing \'{fname}\'. Please import image contains one person only. Also can check segmentation mask. ')
continue
# canvas = copy.deepcopy(comb)
# canvas = util.draw_bodypose(canvas, keypoints, subset, show_number=True)
# cv2.imwrite(f'{args.output_folder}/middle_result/{fname}_keypoints.png',canvas)
comb = crop_img_with_padding(comb, keypoints, rect)
cv2.imwrite(f'{args.output_folder}/{fname}.png', comb)
print(f' -- Finished processing \'{fname}\'. --')
# except:
# print(f'Processing \'{fname}\'. Not satisfied the alignment strategy.')
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
t1 = time.time()
arg_formatter = argparse.ArgumentDefaultsHelpFormatter
description = 'StyleGAN-Human data process'
parser = argparse.ArgumentParser(formatter_class=arg_formatter,
description=description)
parser.add_argument('--image-folder', type=str, dest='image_folder')
parser.add_argument('--output-folder', dest='output_folder', default='results', type=str)
# parser.add_argument('--cfg', dest='cfg for segmentation', default='PP_HumanSeg/export_model/ppseg_lite_portrait_398x224_with_softmax/deploy.yaml', type=str)
print('parsing arguments')
cmd_args = parser.parse_args()
run(cmd_args)
print('total time elapsed: ', str(time.time() - t1)) | 192152.py | [
"CWE-676: Use of Potentially Dangerous Function"
] |
# Copyright (c) SenseTime Research. All rights reserved.
from legacy import save_obj, load_pkl
import torch
from torch.nn import functional as F
import pandas as pd
from .edit_config import attr_dict
import os
def conv_warper(layer, input, style, noise):
# the conv should change
conv = layer.conv
batch, in_channel, height, width = input.shape
style = style.view(batch, 1, in_channel, 1, 1)
weight = conv.scale * conv.weight * style
if conv.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
weight = weight.view(
batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
if conv.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = conv.blur(out)
elif conv.downsample:
input = conv.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = layer.noise(out, noise=noise)
out = layer.activate(out)
return out
def decoder(G, style_space, latent, noise):
# an decoder warper for G
out = G.input(latent)
out = conv_warper(G.conv1, out, style_space[0], noise[0])
skip = G.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
out = conv_warper(conv1, out, style_space[i], noise=noise1)
out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
return image
def encoder_ifg(G, noise, attr_name, truncation=1, truncation_latent=None,
latent_dir='latent_direction/ss/',
step=0, total=0, real=False):
if not real:
styles = [noise]
styles = [G.style(s) for s in styles]
style_space = []
if truncation<1:
if not real:
style_t = []
for style in styles:
style_t.append(truncation_latent + truncation * (style - truncation_latent))
styles = style_t
else: # styles are latent (tensor: 1,18,512), for real PTI output
truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
if not real:
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: latent=styles
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i+1]))
i += 2
# get layer, strength by dict
strength = attr_dict['interface_gan'][attr_name][0]
if step != 0 and total != 0:
strength = step / total * strength
for i in range(15):
style_vect = load_pkl(os.path.join(latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, i)))
style_vect = torch.from_numpy(style_vect).to(latent.device).float()
style_space[i] += style_vect * strength
return style_space, latent, noise
def encoder_ss(G, noise, attr_name, truncation=1, truncation_latent=None,
statics_dir="latent_direction/ss_statics",
latent_dir="latent_direction/ss/",
step=0, total=0,real=False):
if not real:
styles = [noise]
styles = [G.style(s) for s in styles]
style_space = []
if truncation<1:
if not real:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
else: # styles are latent (tensor: 1,18,512), for real PTI output
truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
if not real:
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: latent = styles
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i+1]))
i += 2
# get threshold, layer, strength by dict
layer, strength, threshold = attr_dict['stylespace'][attr_name]
statis_dir = os.path.join(statics_dir, "{}_statis/{}".format(attr_name, layer))
statis_csv_path = os.path.join(statis_dir, "statis.csv")
statis_df = pd.read_csv(statis_csv_path)
statis_df = statis_df.sort_values(by='channel', ascending=True)
ch_mask = statis_df['strength'].values
ch_mask = torch.from_numpy(ch_mask).to(latent.device).float()
ch_mask = (ch_mask.abs()>threshold).float()
style_vect = load_pkl(os.path.join(latent_dir, '{}/style_vect_mean_{}.pkl'.format(attr_name, layer)))
style_vect = torch.from_numpy(style_vect).to(latent.device).float()
style_vect = style_vect * ch_mask
if step != 0 and total != 0:
strength = step / total * strength
style_space[layer] += style_vect * strength
return style_space, latent, noise
def encoder_sefa(G, noise, attr_name, truncation=1, truncation_latent=None,
latent_dir='latent_direction/sefa/',
step=0, total=0, real=False):
if not real:
styles = [noise]
styles = [G.style(s) for s in styles]
if truncation<1:
if not real:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
else:
truncation_latent = truncation_latent.repeat(18,1).unsqueeze(0) # (1,512) --> (1,18,512)
styles = torch.add(truncation_latent,torch.mul(torch.sub(noise,truncation_latent),truncation))
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
if not real:
inject_index = G.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: latent = styles
layer, strength = attr_dict['sefa'][attr_name]
sefa_vect = torch.load(os.path.join(latent_dir, '{}.pt'.format(attr_name))).to(latent.device).float()
if step != 0 and total != 0:
strength = step / total * strength
for l in layer:
latent[:, l, :] += (sefa_vect * strength * 2)
return latent, noise
| 263731.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
from enum import Enum
import math
import numpy as np
import torch
from torch import nn
from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
from pti.pti_models.e4e.encoders.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add
from pti.pti_models.e4e.stylegan2.model import EqualLinear
class ProgressiveStage(Enum):
WTraining = 0
Delta1Training = 1
Delta2Training = 2
Delta3Training = 3
Delta4Training = 4
Delta5Training = 5
Delta6Training = 6
Delta7Training = 7
Delta8Training = 8
Delta9Training = 9
Delta10Training = 10
Delta11Training = 11
Delta12Training = 12
Delta13Training = 13
Delta14Training = 14
Delta15Training = 15
Delta16Training = 16
Delta17Training = 17
Inference = 18
class GradualStyleBlock(Module):
def __init__(self, in_c, out_c, spatial):
super(GradualStyleBlock, self).__init__()
self.out_c = out_c
self.spatial = spatial
num_pools = int(np.log2(spatial))
modules = []
modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()]
for i in range(num_pools - 1):
modules += [
Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()
]
self.convs = nn.Sequential(*modules)
self.linear = EqualLinear(out_c, out_c, lr_mul=1)
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.out_c)
x = self.linear(x)
return x
class GradualStyleEncoder(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(GradualStyleEncoder, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
log_size = int(math.log(opts.stylegan_size, 2))
self.style_count = 2 * log_size - 2
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.input_layer(x)
latents = []
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
for j in range(self.coarse_ind):
latents.append(self.styles[j](c3))
p2 = _upsample_add(c3, self.latlayer1(c2))
for j in range(self.coarse_ind, self.middle_ind):
latents.append(self.styles[j](p2))
p1 = _upsample_add(p2, self.latlayer2(c1))
for j in range(self.middle_ind, self.style_count):
latents.append(self.styles[j](p1))
out = torch.stack(latents, dim=1)
return out
class Encoder4Editing(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(Encoder4Editing, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
log_size = int(math.log(opts.stylegan_size, 2))
self.style_count = 2 * log_size - 2
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
self.progressive_stage = ProgressiveStage.Inference
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to it
def set_progressive_stage(self, new_stage: ProgressiveStage):
self.progressive_stage = new_stage
print('Changed progressive stage to: ', new_stage)
def forward(self, x):
x = self.input_layer(x)
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
# Infer main W and duplicate it
w0 = self.styles[0](c3)
w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
stage = self.progressive_stage.value
features = c3
for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas
if i == self.coarse_ind:
p2 = _upsample_add(c3, self.latlayer1(c2)) # FPN's middle features
features = p2
elif i == self.middle_ind:
p1 = _upsample_add(p2, self.latlayer2(c1)) # FPN's fine features
features = p1
delta_i = self.styles[i](features)
w[:, i] += delta_i
return w
| 076051.py | [
"Unknown"
] |
import json
import random
import subprocess
import threading
import time
from typing import NamedTuple
import libtmux
class InstructionSpec(NamedTuple):
instruction: str
time_from: float
time_to: float
class CliDirector:
def __init__(self):
self.record_start = None
self.pause_between_keys = 0.2
self.instructions: list[InstructionSpec] = []
def start(self, filename: str, width: int = 0, height: int = 0) -> libtmux.Session:
self.start_session(width, height)
self.start_recording(filename)
return self.tmux_session
def start_session(self, width: int = 0, height: int = 0) -> libtmux.Session:
self.tmux_server = libtmux.Server()
self.tmux_session = self.tmux_server.new_session(
session_name="asciinema_recorder", kill_session=True
)
self.tmux_pane = self.tmux_session.attached_window.attached_pane
self.tmux_version = self.tmux_pane.display_message("#{version}", True)
if width and height:
self.resize_window(width, height)
self.pause(3)
return self.tmux_session
def start_recording(self, filename: str) -> None:
self.asciinema_proc = subprocess.Popen(
[
"asciinema",
"rec",
"-y",
"--overwrite",
"-c",
"tmux attach -t asciinema_recorder",
filename,
]
)
self.pause(1.5)
self.record_start = time.time()
def resize_window(self, width: int, height: int) -> None:
subprocess.Popen(
["resize", "-s", str(height), str(width)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def end(self) -> None:
self.end_recording()
self.end_session()
def end_recording(self) -> None:
self.asciinema_proc.terminate()
self.asciinema_proc.wait(timeout=5)
self.record_start = None
self.instructions = []
def end_session(self) -> None:
self.tmux_session.kill_session()
def press_key(
self, keys: str, count=1, pause: float | None = None, target=None
) -> None:
if pause is None:
pause = self.pause_between_keys
if target is None:
target = self.tmux_pane
for i in range(count):
if keys == " ":
keys = "Space"
target.send_keys(cmd=keys, enter=False, suppress_history=False)
# inspired by https://github.com/dmotz/TuringType
real_pause = random.uniform(0, pause) + 0.4 * pause
if keys == "Space":
real_pause += 1.5 * pause
elif keys == ".":
real_pause += pause
elif random.random() > 0.75:
real_pause += pause
elif random.random() > 0.95:
real_pause += 2 * pause
self.pause(real_pause)
def type(self, keys: str, pause: float | None = None, target=None) -> None:
if pause is None:
pause = self.pause_between_keys
if target is None:
target = self.tmux_pane
target.select_pane()
for key in keys:
self.press_key(key, pause=pause, target=target)
def exec(self, keys: str, target=None) -> None:
if target is None:
target = self.tmux_pane
self.type(keys, target=target)
self.pause(1.25)
self.press_key("Enter", target=target)
self.pause(0.5)
def focus_pane(self, pane: libtmux.Pane, set_active_pane: bool = True) -> None:
pane.select_pane()
if set_active_pane:
self.tmux_pane = pane
def pause(self, seconds: float) -> None:
time.sleep(seconds)
def run_external(self, command: str) -> None:
subprocess.run(command, shell=True)
def message(
self,
msg: str,
duration: int | None = None,
add_instruction: bool = True,
instruction_html: str = "",
) -> None:
if duration is None:
duration = len(msg) * 0.08 # seconds
self.tmux_session.set_option(
"display-time", int(duration * 1000)
) # milliseconds
self.tmux_pane.display_message(" " + msg)
if add_instruction or instruction_html:
if not instruction_html:
instruction_html = msg
self.instruction(instruction=instruction_html, duration=duration)
self.pause(duration + 0.5)
def popup(self, content: str, duration: int = 4) -> None:
# todo: check if installed tmux version supports display-popup
# tmux's display-popup is blocking, so we close it in a separate thread
t = threading.Thread(target=self.close_popup, args=[duration])
t.start()
lines = content.splitlines()
self.tmux_pane.cmd("display-popup", "", *lines)
t.join()
def close_popup(self, duration: float = 0) -> None:
self.pause(duration)
self.tmux_pane.cmd("display-popup", "-C")
def instruction(
self, instruction: str, duration: float = 3, time_from: float | None = None
) -> None:
if time_from is None:
time_from = self.current_time
self.instructions.append(
InstructionSpec(
instruction=str(len(self.instructions) + 1) + ". " + instruction,
time_from=round(time_from, 1),
time_to=round(time_from + duration, 1),
)
)
def save_instructions(self, output_path: str) -> None:
instr_as_dicts = []
for instr in self.instructions:
instr_as_dicts.append(instr._asdict())
with open(output_path, "w", encoding="utf-8") as f:
json.dump(instr_as_dicts, f, ensure_ascii=False, indent=4)
@property
def current_time(self) -> float:
now = time.time()
return round(now - self.record_start, 1)
@property
def current_pane(self) -> libtmux.Pane:
return self.tmux_pane
| 245327.py | [
"CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')"
] |
"""
This module manages and invokes typed commands.
"""
import functools
import inspect
import logging
import sys
import textwrap
import types
from collections.abc import Callable
from collections.abc import Iterable
from collections.abc import Sequence
from typing import Any
from typing import NamedTuple
import pyparsing
import mitmproxy.types
from mitmproxy import command_lexer
from mitmproxy import exceptions
from mitmproxy.command_lexer import unquote
def verify_arg_signature(f: Callable, args: Iterable[Any], kwargs: dict) -> None:
sig = inspect.signature(f, eval_str=True)
try:
sig.bind(*args, **kwargs)
except TypeError as v:
raise exceptions.CommandError("command argument mismatch: %s" % v.args[0])
def typename(t: type) -> str:
"""
Translates a type to an explanatory string.
"""
if t == inspect._empty: # type: ignore
raise exceptions.CommandError("missing type annotation")
to = mitmproxy.types.CommandTypes.get(t, None)
if not to:
raise exceptions.CommandError(
"unsupported type: %s" % getattr(t, "__name__", t)
)
return to.display
def _empty_as_none(x: Any) -> Any:
if x == inspect.Signature.empty:
return None
return x
class CommandParameter(NamedTuple):
name: str
type: type
kind: inspect._ParameterKind = inspect.Parameter.POSITIONAL_OR_KEYWORD
def __str__(self):
if self.kind is inspect.Parameter.VAR_POSITIONAL:
return f"*{self.name}"
else:
return self.name
class Command:
name: str
manager: "CommandManager"
signature: inspect.Signature
help: str | None
def __init__(self, manager: "CommandManager", name: str, func: Callable) -> None:
self.name = name
self.manager = manager
self.func = func
self.signature = inspect.signature(self.func, eval_str=True)
if func.__doc__:
txt = func.__doc__.strip()
self.help = "\n".join(textwrap.wrap(txt))
else:
self.help = None
# This fails with a CommandException if types are invalid
for name, parameter in self.signature.parameters.items():
t = parameter.annotation
if not mitmproxy.types.CommandTypes.get(parameter.annotation, None):
raise exceptions.CommandError(
f"Argument {name} has an unknown type {t} in {func}."
)
if self.return_type and not mitmproxy.types.CommandTypes.get(
self.return_type, None
):
raise exceptions.CommandError(
f"Return type has an unknown type ({self.return_type}) in {func}."
)
@property
def return_type(self) -> type | None:
return _empty_as_none(self.signature.return_annotation)
@property
def parameters(self) -> list[CommandParameter]:
"""Returns a list of CommandParameters."""
ret = []
for name, param in self.signature.parameters.items():
ret.append(CommandParameter(name, param.annotation, param.kind))
return ret
def signature_help(self) -> str:
params = " ".join(str(param) for param in self.parameters)
if self.return_type:
ret = f" -> {typename(self.return_type)}"
else:
ret = ""
return f"{self.name} {params}{ret}"
def prepare_args(self, args: Sequence[str]) -> inspect.BoundArguments:
try:
bound_arguments = self.signature.bind(*args)
except TypeError:
expected = f"Expected: {str(self.signature.parameters)}"
received = f"Received: {str(args)}"
raise exceptions.CommandError(
f"Command argument mismatch: \n {expected}\n {received}"
)
for name, value in bound_arguments.arguments.items():
param = self.signature.parameters[name]
convert_to = param.annotation
if param.kind == param.VAR_POSITIONAL:
bound_arguments.arguments[name] = tuple(
parsearg(self.manager, x, convert_to) for x in value
)
else:
bound_arguments.arguments[name] = parsearg(
self.manager, value, convert_to
)
bound_arguments.apply_defaults()
return bound_arguments
def call(self, args: Sequence[str]) -> Any:
"""
Call the command with a list of arguments. At this point, all
arguments are strings.
"""
bound_args = self.prepare_args(args)
ret = self.func(*bound_args.args, **bound_args.kwargs)
if ret is None and self.return_type is None:
return
typ = mitmproxy.types.CommandTypes.get(self.return_type)
assert typ
if not typ.is_valid(self.manager, typ, ret):
raise exceptions.CommandError(
f"{self.name} returned unexpected data - expected {typ.display}"
)
return ret
class ParseResult(NamedTuple):
value: str
type: type
valid: bool
class CommandManager:
commands: dict[str, Command]
def __init__(self, master):
self.master = master
self.commands = {}
def collect_commands(self, addon):
for i in dir(addon):
if not i.startswith("__"):
o = getattr(addon, i)
try:
# hasattr is not enough, see https://github.com/mitmproxy/mitmproxy/issues/3794
is_command = isinstance(getattr(o, "command_name", None), str)
except Exception:
pass # getattr may raise if o implements __getattr__.
else:
if is_command:
try:
self.add(o.command_name, o)
except exceptions.CommandError as e:
logging.warning(
f"Could not load command {o.command_name}: {e}"
)
def add(self, path: str, func: Callable):
self.commands[path] = Command(self, path, func)
@functools.lru_cache(maxsize=128)
def parse_partial(
self, cmdstr: str
) -> tuple[Sequence[ParseResult], Sequence[CommandParameter]]:
"""
Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.
"""
parts: pyparsing.ParseResults = command_lexer.expr.parseString(
cmdstr, parseAll=True
)
parsed: list[ParseResult] = []
next_params: list[CommandParameter] = [
CommandParameter("", mitmproxy.types.Cmd),
CommandParameter("", mitmproxy.types.CmdArgs),
]
expected: CommandParameter | None = None
for part in parts:
if part.isspace():
parsed.append(
ParseResult(
value=part,
type=mitmproxy.types.Space,
valid=True,
)
)
continue
if expected and expected.kind is inspect.Parameter.VAR_POSITIONAL:
assert not next_params
elif next_params:
expected = next_params.pop(0)
else:
expected = CommandParameter("", mitmproxy.types.Unknown)
arg_is_known_command = (
expected.type == mitmproxy.types.Cmd and part in self.commands
)
arg_is_unknown_command = (
expected.type == mitmproxy.types.Cmd and part not in self.commands
)
command_args_following = (
next_params and next_params[0].type == mitmproxy.types.CmdArgs
)
if arg_is_known_command and command_args_following:
next_params = self.commands[part].parameters + next_params[1:]
if arg_is_unknown_command and command_args_following:
next_params.pop(0)
to = mitmproxy.types.CommandTypes.get(expected.type, None)
valid = False
if to:
try:
to.parse(self, expected.type, part)
except ValueError:
valid = False
else:
valid = True
parsed.append(
ParseResult(
value=part,
type=expected.type,
valid=valid,
)
)
return parsed, next_params
def call(self, command_name: str, *args: Any) -> Any:
"""
Call a command with native arguments. May raise CommandError.
"""
if command_name not in self.commands:
raise exceptions.CommandError("Unknown command: %s" % command_name)
return self.commands[command_name].func(*args)
def call_strings(self, command_name: str, args: Sequence[str]) -> Any:
"""
Call a command using a list of string arguments. May raise CommandError.
"""
if command_name not in self.commands:
raise exceptions.CommandError("Unknown command: %s" % command_name)
return self.commands[command_name].call(args)
def execute(self, cmdstr: str) -> Any:
"""
Execute a command string. May raise CommandError.
"""
parts, _ = self.parse_partial(cmdstr)
if not parts:
raise exceptions.CommandError(f"Invalid command: {cmdstr!r}")
command_name, *args = (
unquote(part.value) for part in parts if part.type != mitmproxy.types.Space
)
return self.call_strings(command_name, args)
def dump(self, out=sys.stdout) -> None:
cmds = list(self.commands.values())
cmds.sort(key=lambda x: x.signature_help())
for c in cmds:
for hl in (c.help or "").splitlines():
print("# " + hl, file=out)
print(c.signature_help(), file=out)
print(file=out)
def parsearg(manager: CommandManager, spec: str, argtype: type) -> Any:
"""
Convert a string to a argument to the appropriate type.
"""
t = mitmproxy.types.CommandTypes.get(argtype, None)
if not t:
raise exceptions.CommandError(f"Unsupported argument type: {argtype}")
try:
return t.parse(manager, argtype, spec)
except ValueError as e:
raise exceptions.CommandError(str(e)) from e
def command(name: str | None = None):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
verify_arg_signature(function, args, kwargs)
return function(*args, **kwargs)
wrapper.__dict__["command_name"] = name or function.__name__.replace("_", ".")
return wrapper
return decorator
def argument(name, type):
"""
Set the type of a command argument at runtime. This is useful for more
specific types such as mitmproxy.types.Choice, which we cannot annotate
directly as mypy does not like that.
"""
def decorator(f: types.FunctionType) -> types.FunctionType:
assert name in f.__annotations__
f.__annotations__[name] = type
return f
return decorator
| 527709.py | [
"CWE-95: Improper Neutralization of Directives in Dynamically Evaluated Code ('Eval Injection')"
] |
#!/usr/bin/env -S python3 -u
import datetime
import http.client
import json
import os
import re
import subprocess
import sys
import time
from pathlib import Path
# Security: No third-party dependencies here!
root = Path(__file__).absolute().parent.parent
def get(url: str) -> http.client.HTTPResponse:
assert url.startswith("https://")
host, path = re.split(r"(?=/)", url.removeprefix("https://"), maxsplit=1)
conn = http.client.HTTPSConnection(host)
conn.request("GET", path, headers={"User-Agent": "mitmproxy/release-bot"})
resp = conn.getresponse()
print(f"HTTP {resp.status} {resp.reason}")
return resp
def get_json(url: str) -> dict:
resp = get(url)
body = resp.read()
try:
return json.loads(body)
except Exception as e:
raise RuntimeError(f"{resp.status=} {body=}") from e
if __name__ == "__main__":
version = sys.argv[1]
assert re.match(r"^\d+\.\d+\.\d+$", version)
major_version = int(version.split(".")[0])
skip_branch_status_check = sys.argv[2] == "true"
# changing this is useful for testing on a fork.
repo = os.environ.get("GITHUB_REPOSITORY", "mitmproxy/mitmproxy")
print(f"{version=} {skip_branch_status_check=} {repo=}")
branch = subprocess.run(
["git", "branch", "--show-current"],
cwd=root,
check=True,
capture_output=True,
text=True,
).stdout.strip()
print("➡️ Working dir clean?")
assert not subprocess.run(["git", "status", "--porcelain"]).stdout
if skip_branch_status_check:
print(f"⚠️ Skipping status check for {branch}.")
else:
print(f"➡️ CI is passing for {branch}?")
assert (
get_json(f"https://api.github.com/repos/{repo}/commits/{branch}/status")[
"state"
]
== "success"
)
print("➡️ Updating CHANGELOG.md...")
changelog = root / "CHANGELOG.md"
date = datetime.date.today().strftime("%d %B %Y")
title = f"## {date}: mitmproxy {version}"
cl = changelog.read_text("utf8")
assert title not in cl
cl, ok = re.subn(r"(?<=## Unreleased: mitmproxy next)", f"\n\n\n{title}", cl)
assert ok == 1
changelog.write_text(cl, "utf8")
print("➡️ Updating web assets...")
subprocess.run(["npm", "ci"], cwd=root / "web", check=True, capture_output=True)
subprocess.run(
["npm", "start", "prod"], cwd=root / "web", check=True, capture_output=True
)
print("➡️ Updating version...")
version_py = root / "mitmproxy" / "version.py"
ver = version_py.read_text("utf8")
ver, ok = re.subn(r'(?<=VERSION = ")[^"]+', version, ver)
assert ok == 1
version_py.write_text(ver, "utf8")
print("➡️ Do release commit...")
subprocess.run(
["git", "config", "user.email", "[email protected]"], cwd=root, check=True
)
subprocess.run(
["git", "config", "user.name", "mitmproxy release bot"], cwd=root, check=True
)
subprocess.run(
["git", "commit", "-a", "-m", f"mitmproxy {version}"], cwd=root, check=True
)
tag_name = f"v{version}"
subprocess.run(["git", "tag", tag_name], cwd=root, check=True)
release_sha = subprocess.run(
["git", "rev-parse", "HEAD"],
cwd=root,
check=True,
capture_output=True,
text=True,
).stdout.strip()
if branch == "main":
print("➡️ Bump version...")
next_dev_version = f"{major_version + 1}.0.0.dev"
ver, ok = re.subn(r'(?<=VERSION = ")[^"]+', next_dev_version, ver)
assert ok == 1
version_py.write_text(ver, "utf8")
print("➡️ Reopen main for development...")
subprocess.run(
["git", "commit", "-a", "-m", f"reopen main for development"],
cwd=root,
check=True,
)
print("➡️ Pushing...")
subprocess.run(
["git", "push", "--atomic", "origin", branch, tag_name], cwd=root, check=True
)
print("➡️ Creating release on GitHub...")
subprocess.run(
[
"gh",
"release",
"create",
tag_name,
"--title",
f"mitmproxy {version}",
"--notes-file",
"release/github-release-notes.txt",
],
cwd=root,
check=True,
)
print("➡️ Dispatching release workflow...")
subprocess.run(
["gh", "workflow", "run", "main.yml", "--ref", tag_name], cwd=root, check=True
)
print("")
print("✅ CI is running now.")
while True:
print("⌛ Waiting for CI...")
workflows = get_json(
f"https://api.github.com/repos/{repo}/actions/runs?head_sha={release_sha}"
)["workflow_runs"]
all_done = True
if not workflows:
all_done = False # we expect to have at least one workflow.
for workflow in workflows:
if workflow["status"] != "completed":
all_done = False
if workflow["status"] == "waiting":
print(f"⚠️ CI is waiting for approval: {workflow['html_url']}")
if all_done:
for workflow in workflows:
if workflow["conclusion"] != "success":
print(f"⚠️ {workflow['display_title']} workflow run failed.")
break
else:
time.sleep(30) # relatively strict rate limits here.
print("➡️ Checking GitHub Releases...")
resp = get(f"https://api.github.com/repos/{repo}/releases/tags/{tag_name}")
assert resp.status == 200
print("➡️ Checking PyPI...")
pypi_data = get_json("https://pypi.org/pypi/mitmproxy/json")
assert version in pypi_data["releases"]
print("➡️ Checking docs archive...")
resp = get(f"https://docs.mitmproxy.org/archive/v{major_version}/")
assert resp.status == 200
print(f"➡️ Checking Docker ({version} tag)...")
resp = get(
f"https://hub.docker.com/v2/repositories/mitmproxy/mitmproxy/tags/{version}"
)
assert resp.status == 200
if branch == "main":
print("➡️ Checking Docker (latest tag)...")
docker_latest_data = get_json(
"https://hub.docker.com/v2/repositories/mitmproxy/mitmproxy/tags/latest"
)
docker_last_updated = datetime.datetime.fromisoformat(
docker_latest_data["last_updated"].replace("Z", "+00:00")
)
print(f"Last update: {docker_last_updated.isoformat(timespec='minutes')}")
assert docker_last_updated > datetime.datetime.now(
datetime.timezone.utc
) - datetime.timedelta(hours=2)
print("")
print("✅ All done. 🥳")
print("")
| 933962.py | [
"CWE-295: Improper Certificate Validation"
] |
import cv2
import math
import numpy as np
import os.path as osp
import torch
import torch.utils.data as data
from basicsr.data import degradations as degradations
from basicsr.data.data_util import paths_from_folder
from basicsr.data.transforms import augment
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
from basicsr.utils.registry import DATASET_REGISTRY
from torchvision.transforms.functional import (adjust_brightness, adjust_contrast, adjust_hue, adjust_saturation,
normalize)
@DATASET_REGISTRY.register()
class FFHQDegradationDataset(data.Dataset):
"""FFHQ dataset for GFPGAN.
It reads high resolution images, and then generate low-quality (LQ) images on-the-fly.
Args:
opt (dict): Config for train datasets. It contains the following keys:
dataroot_gt (str): Data root path for gt.
io_backend (dict): IO backend type and other kwarg.
mean (list | tuple): Image mean.
std (list | tuple): Image std.
use_hflip (bool): Whether to horizontally flip.
Please see more options in the codes.
"""
def __init__(self, opt):
super(FFHQDegradationDataset, self).__init__()
self.opt = opt
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.gt_folder = opt['dataroot_gt']
self.mean = opt['mean']
self.std = opt['std']
self.out_size = opt['out_size']
self.crop_components = opt.get('crop_components', False) # facial components
self.eye_enlarge_ratio = opt.get('eye_enlarge_ratio', 1) # whether enlarge eye regions
if self.crop_components:
# load component list from a pre-process pth files
self.components_list = torch.load(opt.get('component_path'))
# file client (lmdb io backend)
if self.io_backend_opt['type'] == 'lmdb':
self.io_backend_opt['db_paths'] = self.gt_folder
if not self.gt_folder.endswith('.lmdb'):
raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
self.paths = [line.split('.')[0] for line in fin]
else:
# disk backend: scan file list from a folder
self.paths = paths_from_folder(self.gt_folder)
# degradation configurations
self.blur_kernel_size = opt['blur_kernel_size']
self.kernel_list = opt['kernel_list']
self.kernel_prob = opt['kernel_prob']
self.blur_sigma = opt['blur_sigma']
self.downsample_range = opt['downsample_range']
self.noise_range = opt['noise_range']
self.jpeg_range = opt['jpeg_range']
# color jitter
self.color_jitter_prob = opt.get('color_jitter_prob')
self.color_jitter_pt_prob = opt.get('color_jitter_pt_prob')
self.color_jitter_shift = opt.get('color_jitter_shift', 20)
# to gray
self.gray_prob = opt.get('gray_prob')
logger = get_root_logger()
logger.info(f'Blur: blur_kernel_size {self.blur_kernel_size}, sigma: [{", ".join(map(str, self.blur_sigma))}]')
logger.info(f'Downsample: downsample_range [{", ".join(map(str, self.downsample_range))}]')
logger.info(f'Noise: [{", ".join(map(str, self.noise_range))}]')
logger.info(f'JPEG compression: [{", ".join(map(str, self.jpeg_range))}]')
if self.color_jitter_prob is not None:
logger.info(f'Use random color jitter. Prob: {self.color_jitter_prob}, shift: {self.color_jitter_shift}')
if self.gray_prob is not None:
logger.info(f'Use random gray. Prob: {self.gray_prob}')
self.color_jitter_shift /= 255.
@staticmethod
def color_jitter(img, shift):
"""jitter color: randomly jitter the RGB values, in numpy formats"""
jitter_val = np.random.uniform(-shift, shift, 3).astype(np.float32)
img = img + jitter_val
img = np.clip(img, 0, 1)
return img
@staticmethod
def color_jitter_pt(img, brightness, contrast, saturation, hue):
"""jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats"""
fn_idx = torch.randperm(4)
for fn_id in fn_idx:
if fn_id == 0 and brightness is not None:
brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
img = adjust_brightness(img, brightness_factor)
if fn_id == 1 and contrast is not None:
contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
img = adjust_contrast(img, contrast_factor)
if fn_id == 2 and saturation is not None:
saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
img = adjust_saturation(img, saturation_factor)
if fn_id == 3 and hue is not None:
hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
img = adjust_hue(img, hue_factor)
return img
def get_component_coordinates(self, index, status):
"""Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file"""
components_bbox = self.components_list[f'{index:08d}']
if status[0]: # hflip
# exchange right and left eye
tmp = components_bbox['left_eye']
components_bbox['left_eye'] = components_bbox['right_eye']
components_bbox['right_eye'] = tmp
# modify the width coordinate
components_bbox['left_eye'][0] = self.out_size - components_bbox['left_eye'][0]
components_bbox['right_eye'][0] = self.out_size - components_bbox['right_eye'][0]
components_bbox['mouth'][0] = self.out_size - components_bbox['mouth'][0]
# get coordinates
locations = []
for part in ['left_eye', 'right_eye', 'mouth']:
mean = components_bbox[part][0:2]
half_len = components_bbox[part][2]
if 'eye' in part:
half_len *= self.eye_enlarge_ratio
loc = np.hstack((mean - half_len + 1, mean + half_len))
loc = torch.from_numpy(loc).float()
locations.append(loc)
return locations
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
# load gt image
# Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
gt_path = self.paths[index]
img_bytes = self.file_client.get(gt_path)
img_gt = imfrombytes(img_bytes, float32=True)
# random horizontal flip
img_gt, status = augment(img_gt, hflip=self.opt['use_hflip'], rotation=False, return_status=True)
h, w, _ = img_gt.shape
# get facial component coordinates
if self.crop_components:
locations = self.get_component_coordinates(index, status)
loc_left_eye, loc_right_eye, loc_mouth = locations
# ------------------------ generate lq image ------------------------ #
# blur
kernel = degradations.random_mixed_kernels(
self.kernel_list,
self.kernel_prob,
self.blur_kernel_size,
self.blur_sigma,
self.blur_sigma, [-math.pi, math.pi],
noise_range=None)
img_lq = cv2.filter2D(img_gt, -1, kernel)
# downsample
scale = np.random.uniform(self.downsample_range[0], self.downsample_range[1])
img_lq = cv2.resize(img_lq, (int(w // scale), int(h // scale)), interpolation=cv2.INTER_LINEAR)
# noise
if self.noise_range is not None:
img_lq = degradations.random_add_gaussian_noise(img_lq, self.noise_range)
# jpeg compression
if self.jpeg_range is not None:
img_lq = degradations.random_add_jpg_compression(img_lq, self.jpeg_range)
# resize to original size
img_lq = cv2.resize(img_lq, (w, h), interpolation=cv2.INTER_LINEAR)
# random color jitter (only for lq)
if self.color_jitter_prob is not None and (np.random.uniform() < self.color_jitter_prob):
img_lq = self.color_jitter(img_lq, self.color_jitter_shift)
# random to gray (only for lq)
if self.gray_prob and np.random.uniform() < self.gray_prob:
img_lq = cv2.cvtColor(img_lq, cv2.COLOR_BGR2GRAY)
img_lq = np.tile(img_lq[:, :, None], [1, 1, 3])
if self.opt.get('gt_gray'): # whether convert GT to gray images
img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)
img_gt = np.tile(img_gt[:, :, None], [1, 1, 3]) # repeat the color channels
# BGR to RGB, HWC to CHW, numpy to tensor
img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
# random color jitter (pytorch version) (only for lq)
if self.color_jitter_pt_prob is not None and (np.random.uniform() < self.color_jitter_pt_prob):
brightness = self.opt.get('brightness', (0.5, 1.5))
contrast = self.opt.get('contrast', (0.5, 1.5))
saturation = self.opt.get('saturation', (0, 1.5))
hue = self.opt.get('hue', (-0.1, 0.1))
img_lq = self.color_jitter_pt(img_lq, brightness, contrast, saturation, hue)
# round and clip
img_lq = torch.clamp((img_lq * 255.0).round(), 0, 255) / 255.
# normalize
normalize(img_gt, self.mean, self.std, inplace=True)
normalize(img_lq, self.mean, self.std, inplace=True)
if self.crop_components:
return_dict = {
'lq': img_lq,
'gt': img_gt,
'gt_path': gt_path,
'loc_left_eye': loc_left_eye,
'loc_right_eye': loc_right_eye,
'loc_mouth': loc_mouth
}
return return_dict
else:
return {'lq': img_lq, 'gt': img_gt, 'gt_path': gt_path}
def __len__(self):
return len(self.paths)
| 870482.py | [
"CWE-502: Deserialization of Untrusted Data"
] |
import importlib
import inspect
import re
from typing import Any, Callable, Type, Union, get_type_hints
from pydantic import BaseModel, parse_raw_as
from pydantic.tools import parse_obj_as
def name_to_title(name: str) -> str:
"""Converts a camelCase or snake_case name to title case."""
# If camelCase -> convert to snake case
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
name = re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
# Convert to title case
return name.replace("_", " ").strip().title()
def is_compatible_type(type: Type) -> bool:
"""Returns `True` if the type is opyrator-compatible."""
try:
if issubclass(type, BaseModel):
return True
except Exception:
pass
try:
# valid list type
if type.__origin__ is list and issubclass(type.__args__[0], BaseModel):
return True
except Exception:
pass
return False
def get_input_type(func: Callable) -> Type:
"""Returns the input type of a given function (callable).
Args:
func: The function for which to get the input type.
Raises:
ValueError: If the function does not have a valid input type annotation.
"""
type_hints = get_type_hints(func)
if "input" not in type_hints:
raise ValueError(
"The callable MUST have a parameter with the name `input` with typing annotation. "
"For example: `def my_opyrator(input: InputModel) -> OutputModel:`."
)
input_type = type_hints["input"]
if not is_compatible_type(input_type):
raise ValueError(
"The `input` parameter MUST be a subclass of the Pydantic BaseModel or a list of Pydantic models."
)
# TODO: return warning if more than one input parameters
return input_type
def get_output_type(func: Callable) -> Type:
"""Returns the output type of a given function (callable).
Args:
func: The function for which to get the output type.
Raises:
ValueError: If the function does not have a valid output type annotation.
"""
type_hints = get_type_hints(func)
if "return" not in type_hints:
raise ValueError(
"The return type of the callable MUST be annotated with type hints."
"For example: `def my_opyrator(input: InputModel) -> OutputModel:`."
)
output_type = type_hints["return"]
if not is_compatible_type(output_type):
raise ValueError(
"The return value MUST be a subclass of the Pydantic BaseModel or a list of Pydantic models."
)
return output_type
def get_callable(import_string: str) -> Callable:
"""Import a callable from an string."""
callable_seperator = ":"
if callable_seperator not in import_string:
# Use dot as seperator
callable_seperator = "."
if callable_seperator not in import_string:
raise ValueError("The callable path MUST specify the function. ")
mod_name, callable_name = import_string.rsplit(callable_seperator, 1)
mod = importlib.import_module(mod_name)
return getattr(mod, callable_name)
class Opyrator:
def __init__(self, func: Union[Callable, str]) -> None:
if isinstance(func, str):
# Try to load the function from a string notion
self.function = get_callable(func)
else:
self.function = func
self._action = "Execute"
self._input_type = None
self._output_type = None
if not callable(self.function):
raise ValueError("The provided function parameters is not a callable.")
if inspect.isclass(self.function):
raise ValueError(
"The provided callable is an uninitialized Class. This is not allowed."
)
if inspect.isfunction(self.function):
# The provided callable is a function
self._input_type = get_input_type(self.function)
self._output_type = get_output_type(self.function)
try:
# Get name
self._name = name_to_title(self.function.__name__)
except Exception:
pass
try:
# Get description from function
doc_string = inspect.getdoc(self.function)
if doc_string:
self._action = doc_string
except Exception:
pass
elif hasattr(self.function, "__call__"):
# The provided callable is a function
self._input_type = get_input_type(self.function.__call__) # type: ignore
self._output_type = get_output_type(self.function.__call__) # type: ignore
try:
# Get name
self._name = name_to_title(type(self.function).__name__)
except Exception:
pass
try:
# Get action from
doc_string = inspect.getdoc(self.function.__call__) # type: ignore
if doc_string:
self._action = doc_string
if (
not self._action
or self._action == "Call"
):
# Get docstring from class instead of __call__ function
doc_string = inspect.getdoc(self.function)
if doc_string:
self._action = doc_string
except Exception:
pass
else:
raise ValueError("Unknown callable type.")
@property
def name(self) -> str:
return self._name
@property
def action(self) -> str:
return self._action
@property
def input_type(self) -> Any:
return self._input_type
@property
def output_type(self) -> Any:
return self._output_type
def __call__(self, input: Any, **kwargs: Any) -> Any:
input_obj = input
if isinstance(input, str):
# Allow json input
input_obj = parse_raw_as(self.input_type, input)
if isinstance(input, dict):
# Allow dict input
input_obj = parse_obj_as(self.input_type, input)
return self.function(input_obj, **kwargs)
| 365412.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .utils.mol_attention import MOLAttention
from .utils.basic_layers import Linear
from .utils.vc_utils import get_mask_from_lengths
class DecoderPrenet(nn.Module):
def __init__(self, in_dim, sizes):
super().__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[Linear(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Decoder(nn.Module):
"""Mixture of Logistic (MoL) attention-based RNN Decoder."""
def __init__(
self,
enc_dim,
num_mels,
frames_per_step,
attention_rnn_dim,
decoder_rnn_dim,
prenet_dims,
num_mixtures,
encoder_down_factor=1,
num_decoder_rnn_layer=1,
use_stop_tokens=False,
concat_context_to_last=False,
):
super().__init__()
self.enc_dim = enc_dim
self.encoder_down_factor = encoder_down_factor
self.num_mels = num_mels
self.frames_per_step = frames_per_step
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dims = prenet_dims
self.use_stop_tokens = use_stop_tokens
self.num_decoder_rnn_layer = num_decoder_rnn_layer
self.concat_context_to_last = concat_context_to_last
# Mel prenet
self.prenet = DecoderPrenet(num_mels, prenet_dims)
self.prenet_pitch = DecoderPrenet(num_mels, prenet_dims)
# Attention RNN
self.attention_rnn = nn.LSTMCell(
prenet_dims[-1] + enc_dim,
attention_rnn_dim
)
# Attention
self.attention_layer = MOLAttention(
attention_rnn_dim,
r=frames_per_step/encoder_down_factor,
M=num_mixtures,
)
# Decoder RNN
self.decoder_rnn_layers = nn.ModuleList()
for i in range(num_decoder_rnn_layer):
if i == 0:
self.decoder_rnn_layers.append(
nn.LSTMCell(
enc_dim + attention_rnn_dim,
decoder_rnn_dim))
else:
self.decoder_rnn_layers.append(
nn.LSTMCell(
decoder_rnn_dim,
decoder_rnn_dim))
# self.decoder_rnn = nn.LSTMCell(
# 2 * enc_dim + attention_rnn_dim,
# decoder_rnn_dim
# )
if concat_context_to_last:
self.linear_projection = Linear(
enc_dim + decoder_rnn_dim,
num_mels * frames_per_step
)
else:
self.linear_projection = Linear(
decoder_rnn_dim,
num_mels * frames_per_step
)
# Stop-token layer
if self.use_stop_tokens:
if concat_context_to_last:
self.stop_layer = Linear(
enc_dim + decoder_rnn_dim, 1, bias=True, w_init_gain="sigmoid"
)
else:
self.stop_layer = Linear(
decoder_rnn_dim, 1, bias=True, w_init_gain="sigmoid"
)
def get_go_frame(self, memory):
B = memory.size(0)
go_frame = torch.zeros((B, self.num_mels), dtype=torch.float,
device=memory.device)
return go_frame
def initialize_decoder_states(self, memory, mask):
device = next(self.parameters()).device
B = memory.size(0)
# attention rnn states
self.attention_hidden = torch.zeros(
(B, self.attention_rnn_dim), device=device)
self.attention_cell = torch.zeros(
(B, self.attention_rnn_dim), device=device)
# decoder rnn states
self.decoder_hiddens = []
self.decoder_cells = []
for i in range(self.num_decoder_rnn_layer):
self.decoder_hiddens.append(
torch.zeros((B, self.decoder_rnn_dim),
device=device)
)
self.decoder_cells.append(
torch.zeros((B, self.decoder_rnn_dim),
device=device)
)
# self.decoder_hidden = torch.zeros(
# (B, self.decoder_rnn_dim), device=device)
# self.decoder_cell = torch.zeros(
# (B, self.decoder_rnn_dim), device=device)
self.attention_context = torch.zeros(
(B, self.enc_dim), device=device)
self.memory = memory
# self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
"""Prepare decoder inputs, i.e. gt mel
Args:
decoder_inputs:(B, T_out, n_mel_channels) inputs used for teacher-forced training.
"""
decoder_inputs = decoder_inputs.reshape(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.frames_per_step), -1)
# (B, T_out//r, r*num_mels) -> (T_out//r, B, r*num_mels)
decoder_inputs = decoder_inputs.transpose(0, 1)
# (T_out//r, B, num_mels)
decoder_inputs = decoder_inputs[:,:,-self.num_mels:]
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, alignments, stop_outputs):
""" Prepares decoder outputs for output
Args:
mel_outputs:
alignments:
"""
# (T_out//r, B, T_enc) -> (B, T_out//r, T_enc)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out//r, B) -> (B, T_out//r)
if stop_outputs is not None:
if alignments.size(0) == 1:
stop_outputs = torch.stack(stop_outputs).unsqueeze(0)
else:
stop_outputs = torch.stack(stop_outputs).transpose(0, 1)
stop_outputs = stop_outputs.contiguous()
# (T_out//r, B, num_mels*r) -> (B, T_out//r, num_mels*r)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
# (B, T_out, num_mels)
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.num_mels)
return mel_outputs, alignments, stop_outputs
def attend(self, decoder_input):
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_context, attention_weights = self.attention_layer(
self.attention_hidden, self.memory, None, self.mask)
decoder_rnn_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
return decoder_rnn_input, self.attention_context, attention_weights
def decode(self, decoder_input):
for i in range(self.num_decoder_rnn_layer):
if i == 0:
self.decoder_hiddens[i], self.decoder_cells[i] = self.decoder_rnn_layers[i](
decoder_input, (self.decoder_hiddens[i], self.decoder_cells[i]))
else:
self.decoder_hiddens[i], self.decoder_cells[i] = self.decoder_rnn_layers[i](
self.decoder_hiddens[i-1], (self.decoder_hiddens[i], self.decoder_cells[i]))
return self.decoder_hiddens[-1]
def forward(self, memory, mel_inputs, memory_lengths):
""" Decoder forward pass for training
Args:
memory: (B, T_enc, enc_dim) Encoder outputs
decoder_inputs: (B, T, num_mels) Decoder inputs for teacher forcing.
memory_lengths: (B, ) Encoder output lengths for attention masking.
Returns:
mel_outputs: (B, T, num_mels) mel outputs from the decoder
alignments: (B, T//r, T_enc) attention weights.
"""
# [1, B, num_mels]
go_frame = self.get_go_frame(memory).unsqueeze(0)
# [T//r, B, num_mels]
mel_inputs = self.parse_decoder_inputs(mel_inputs)
# [T//r + 1, B, num_mels]
mel_inputs = torch.cat((go_frame, mel_inputs), dim=0)
# [T//r + 1, B, prenet_dim]
decoder_inputs = self.prenet(mel_inputs)
# decoder_inputs_pitch = self.prenet_pitch(decoder_inputs__)
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths),
)
self.attention_layer.init_states(memory)
# self.attention_layer_pitch.init_states(memory_pitch)
mel_outputs, alignments = [], []
if self.use_stop_tokens:
stop_outputs = []
else:
stop_outputs = None
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
# decoder_input_pitch = decoder_inputs_pitch[len(mel_outputs)]
decoder_rnn_input, context, attention_weights = self.attend(decoder_input)
decoder_rnn_output = self.decode(decoder_rnn_input)
if self.concat_context_to_last:
decoder_rnn_output = torch.cat(
(decoder_rnn_output, context), dim=1)
mel_output = self.linear_projection(decoder_rnn_output)
if self.use_stop_tokens:
stop_output = self.stop_layer(decoder_rnn_output)
stop_outputs += [stop_output.squeeze()]
mel_outputs += [mel_output.squeeze(1)] #? perhaps don't need squeeze
alignments += [attention_weights]
# alignments_pitch += [attention_weights_pitch]
mel_outputs, alignments, stop_outputs = self.parse_decoder_outputs(
mel_outputs, alignments, stop_outputs)
if stop_outputs is None:
return mel_outputs, alignments
else:
return mel_outputs, stop_outputs, alignments
def inference(self, memory, stop_threshold=0.5):
""" Decoder inference
Args:
memory: (1, T_enc, D_enc) Encoder outputs
Returns:
mel_outputs: mel outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
# [1, num_mels]
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
self.attention_layer.init_states(memory)
mel_outputs, alignments = [], []
# NOTE(sx): heuristic
max_decoder_step = memory.size(1)*self.encoder_down_factor//self.frames_per_step
min_decoder_step = memory.size(1)*self.encoder_down_factor // self.frames_per_step - 5
while True:
decoder_input = self.prenet(decoder_input)
decoder_input_final, context, alignment = self.attend(decoder_input)
#mel_output, stop_output, alignment = self.decode(decoder_input)
decoder_rnn_output = self.decode(decoder_input_final)
if self.concat_context_to_last:
decoder_rnn_output = torch.cat(
(decoder_rnn_output, context), dim=1)
mel_output = self.linear_projection(decoder_rnn_output)
stop_output = self.stop_layer(decoder_rnn_output)
mel_outputs += [mel_output.squeeze(1)]
alignments += [alignment]
if torch.sigmoid(stop_output.data) > stop_threshold and len(mel_outputs) >= min_decoder_step:
break
if len(mel_outputs) >= max_decoder_step:
# print("Warning! Decoding steps reaches max decoder steps.")
break
decoder_input = mel_output[:,-self.num_mels:]
mel_outputs, alignments, _ = self.parse_decoder_outputs(
mel_outputs, alignments, None)
return mel_outputs, alignments
def inference_batched(self, memory, stop_threshold=0.5):
""" Decoder inference
Args:
memory: (B, T_enc, D_enc) Encoder outputs
Returns:
mel_outputs: mel outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
# [1, num_mels]
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
self.attention_layer.init_states(memory)
mel_outputs, alignments = [], []
stop_outputs = []
# NOTE(sx): heuristic
max_decoder_step = memory.size(1)*self.encoder_down_factor//self.frames_per_step
min_decoder_step = memory.size(1)*self.encoder_down_factor // self.frames_per_step - 5
while True:
decoder_input = self.prenet(decoder_input)
decoder_input_final, context, alignment = self.attend(decoder_input)
#mel_output, stop_output, alignment = self.decode(decoder_input)
decoder_rnn_output = self.decode(decoder_input_final)
if self.concat_context_to_last:
decoder_rnn_output = torch.cat(
(decoder_rnn_output, context), dim=1)
mel_output = self.linear_projection(decoder_rnn_output)
# (B, 1)
stop_output = self.stop_layer(decoder_rnn_output)
stop_outputs += [stop_output.squeeze()]
# stop_outputs.append(stop_output)
mel_outputs += [mel_output.squeeze(1)]
alignments += [alignment]
# print(stop_output.shape)
if torch.all(torch.sigmoid(stop_output.squeeze().data) > stop_threshold) \
and len(mel_outputs) >= min_decoder_step:
break
if len(mel_outputs) >= max_decoder_step:
# print("Warning! Decoding steps reaches max decoder steps.")
break
decoder_input = mel_output[:,-self.num_mels:]
mel_outputs, alignments, stop_outputs = self.parse_decoder_outputs(
mel_outputs, alignments, stop_outputs)
mel_outputs_stacked = []
for mel, stop_logit in zip(mel_outputs, stop_outputs):
idx = np.argwhere(torch.sigmoid(stop_logit.cpu()) > stop_threshold)[0][0].item()
mel_outputs_stacked.append(mel[:idx,:])
mel_outputs = torch.cat(mel_outputs_stacked, dim=0).unsqueeze(0)
return mel_outputs, alignments
| 476533.py | [
"Unknown"
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Multi-Head Attention layer definition."""
import math
import numpy
import torch
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an MultiHeadedAttention object."""
super(MultiHeadedAttention, self).__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.attn = None
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(self, query, key, value):
"""Transform query, key and value.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:return torch.Tensor transformed query, key and value
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(self, value, scores, mask):
"""Compute attention context vector.
:param torch.Tensor value: (batch, head, time2, size)
:param torch.Tensor scores: (batch, head, time1, time2)
:param torch.Tensor mask: (batch, 1, time2) or (batch, time1, time2)
:return torch.Tensor transformed `value` (batch, time1, d_model)
weighted by the attention score (batch, time1, time2)
"""
n_batch = value.size(0)
if mask is not None:
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
min_value = float(
numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min
)
scores = scores.masked_fill(mask, min_value)
self.attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0
) # (batch, head, time1, time2)
else:
self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(self.attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (
x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(self, query, key, value, mask):
"""Compute 'Scaled Dot Product Attention'.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor mask: (batch, 1, time2) or (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attention output (batch, time1, d_model)
"""
q, k, v = self.forward_qkv(query, key, value)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask)
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
:param int n_head: the number of head s
:param int n_feat: the number of features
:param float dropout_rate: dropout rate
"""
def __init__(self, n_head, n_feat, dropout_rate):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate)
# linear transformation for positional ecoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x, zero_triu=False):
"""Compute relative positinal encoding.
:param torch.Tensor x: (batch, time, size)
:param bool zero_triu: return the lower triangular part of the matrix
"""
zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(2), x.size(3)))
x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]
return x
def forward(self, query, key, value, pos_emb, mask):
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
:param torch.Tensor query: (batch, time1, size)
:param torch.Tensor key: (batch, time2, size)
:param torch.Tensor value: (batch, time2, size)
:param torch.Tensor pos_emb: (batch, time1, size)
:param torch.Tensor mask: (batch, time1, time2)
:param torch.nn.Dropout dropout:
:return torch.Tensor: attention output (batch, time1, d_model)
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k
) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask)
| 434281.py | [
"Unknown"
] |
import importlib
import logging
import types
from dataclasses import dataclass, field
from heapq import heappop, heappush
from typing import Type, TypeAlias
from quivr_core.files.file import FileExtension
from .processor_base import ProcessorBase
logger = logging.getLogger("quivr_core")
_LOWEST_PRIORITY = 100
_registry: dict[str, Type[ProcessorBase]] = {}
# external, read only. Contains the actual processors that we are imported and ready to use
registry = types.MappingProxyType(_registry)
@dataclass(order=True)
class ProcEntry:
priority: int
cls_mod: str = field(compare=False)
err: str | None = field(compare=False)
ProcMapping: TypeAlias = dict[FileExtension | str, list[ProcEntry]]
# Register based on mimetypes
base_processors: ProcMapping = {
FileExtension.txt: [
ProcEntry(
cls_mod="quivr_core.processor.implementations.simple_txt_processor.SimpleTxtProcessor",
err=None,
priority=_LOWEST_PRIORITY,
)
],
FileExtension.pdf: [
ProcEntry(
cls_mod="quivr_core.processor.implementations.tika_processor.TikaProcessor",
err=None,
priority=_LOWEST_PRIORITY,
)
],
}
def _append_proc_mapping(
mapping: ProcMapping,
file_ext: FileExtension | str,
cls_mod: str,
errtxt: str,
priority: int | None,
):
if file_ext in mapping:
try:
prev_proc = heappop(mapping[file_ext])
proc_entry = ProcEntry(
priority=priority if priority is not None else prev_proc.priority - 1,
cls_mod=cls_mod,
err=errtxt,
)
# Push the previous processor back
heappush(mapping[file_ext], prev_proc)
heappush(mapping[file_ext], proc_entry)
except IndexError:
proc_entry = ProcEntry(
priority=priority if priority is not None else _LOWEST_PRIORITY,
cls_mod=cls_mod,
err=errtxt,
)
heappush(mapping[file_ext], proc_entry)
else:
proc_entry = ProcEntry(
priority=priority if priority is not None else _LOWEST_PRIORITY,
cls_mod=cls_mod,
err=errtxt,
)
mapping[file_ext] = [proc_entry]
def defaults_to_proc_entries(
base_processors: ProcMapping,
) -> ProcMapping:
# TODO(@aminediro) : how can a user change the order of the processor ?
# NOTE: order of this list is important as resolution of `get_processor_class` depends on it
# We should have a way to automatically add these at 'import' time
for supported_extensions, processor_name in [
([FileExtension.csv], "CSVProcessor"),
([FileExtension.txt], "TikTokenTxtProcessor"),
([FileExtension.docx, FileExtension.doc], "DOCXProcessor"),
([FileExtension.xls, FileExtension.xlsx], "XLSXProcessor"),
([FileExtension.pptx], "PPTProcessor"),
(
[FileExtension.markdown, FileExtension.md, FileExtension.mdx],
"MarkdownProcessor",
),
([FileExtension.epub], "EpubProcessor"),
([FileExtension.bib], "BibTexProcessor"),
([FileExtension.odt], "ODTProcessor"),
([FileExtension.html], "HTMLProcessor"),
([FileExtension.py], "PythonProcessor"),
([FileExtension.ipynb], "NotebookProcessor"),
]:
for ext in supported_extensions:
ext_str = ext.value if isinstance(ext, FileExtension) else ext
_append_proc_mapping(
mapping=base_processors,
file_ext=ext,
cls_mod=f"quivr_core.processor.implementations.default.{processor_name}",
errtxt=f"can't import {processor_name}. Please install quivr-core[{ext_str}] to access {processor_name}",
priority=None,
)
# TODO(@aminediro): Megaparse should register itself
# Append Megaparse
_append_proc_mapping(
mapping=base_processors,
file_ext=FileExtension.pdf,
cls_mod="quivr_core.processor.implementations.megaparse_processor.MegaparseProcessor",
errtxt=f"can't import MegaparseProcessor. Please install quivr-core[{ext_str}] to access MegaparseProcessor",
priority=None,
)
return base_processors
known_processors = defaults_to_proc_entries(base_processors)
def get_processor_class(file_extension: FileExtension | str) -> Type[ProcessorBase]:
"""Fetch processor class from registry
The dict ``known_processors`` maps file extensions to the locations
of processors that could process them.
Loading of these classes is *Lazy*. Appropriate import will happen
the first time we try to process some file type.
Some processors need additional dependencies. If the import fails
we return the "err" field of the ProcEntry in ``known_processors``.
"""
if file_extension not in registry:
# Either you registered it from module or it's in the known processors
if file_extension not in known_processors:
raise ValueError(f"Extension not known: {file_extension}")
entries = known_processors[file_extension]
while entries:
proc_entry = heappop(entries)
try:
register_processor(file_extension, _import_class(proc_entry.cls_mod))
break
except ImportError:
logger.warn(
f"{proc_entry.err}. Falling to the next available processor for {file_extension}"
)
if len(entries) == 0 and file_extension not in registry:
raise ImportError(f"can't find any processor for {file_extension}")
cls = registry[file_extension]
return cls
def register_processor(
file_ext: FileExtension | str,
proc_cls: str | Type[ProcessorBase],
append: bool = True,
override: bool = False,
errtxt: str | None = None,
priority: int | None = None,
):
if isinstance(proc_cls, str):
if file_ext in known_processors and append is False:
if all(proc_cls != proc.cls_mod for proc in known_processors[file_ext]):
raise ValueError(
f"Processor for ({file_ext}) already in the registry and append is False"
)
else:
if all(proc_cls != proc.cls_mod for proc in known_processors[file_ext]):
_append_proc_mapping(
known_processors,
file_ext=file_ext,
cls_mod=proc_cls,
errtxt=errtxt
or f"{proc_cls} import failed for processor of {file_ext}",
priority=priority,
)
else:
logger.info(f"{proc_cls} already in registry...")
else:
assert issubclass(
proc_cls, ProcessorBase
), f"{proc_cls} should be a subclass of quivr_core.processor.ProcessorBase"
if file_ext in registry and override is False:
if _registry[file_ext] is not proc_cls:
raise ValueError(
f"Processor for ({file_ext}) already in the registry and append is False"
)
else:
_registry[file_ext] = proc_cls
def _import_class(full_mod_path: str):
if ":" in full_mod_path:
mod_name, name = full_mod_path.rsplit(":", 1)
else:
mod_name, name = full_mod_path.rsplit(".", 1)
mod = importlib.import_module(mod_name)
for cls in name.split("."):
mod = getattr(mod, cls)
if not isinstance(mod, type):
raise TypeError(f"{full_mod_path} is not a class")
if not issubclass(mod, ProcessorBase):
raise TypeError(f"{full_mod_path} is not a subclass of ProcessorBase ")
return mod
def available_processors():
"""Return a list of the known processors."""
return list(known_processors)
| 554536.py | [
"CWE-706: Use of Incorrectly-Resolved Name or Reference"
] |
import json
import os
import re
import string
from collections import Counter
from shutil import rmtree
from typing import Any, Dict, List, Optional, Tuple
import requests
import tqdm
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
from llama_index.core.utils import get_cache_dir
DEV_DISTRACTOR_URL = """http://curtis.ml.cmu.edu/datasets/\
hotpot/hotpot_dev_distractor_v1.json"""
class HotpotQAEvaluator:
"""
Refer to https://hotpotqa.github.io/ for more details on the dataset.
"""
def _download_datasets(self) -> Dict[str, str]:
cache_dir = get_cache_dir()
dataset_paths = {}
dataset = "hotpot_dev_distractor"
dataset_full_path = os.path.join(cache_dir, "datasets", "HotpotQA")
if not os.path.exists(dataset_full_path):
url = DEV_DISTRACTOR_URL
try:
os.makedirs(dataset_full_path, exist_ok=True)
save_file = open(
os.path.join(dataset_full_path, "dev_distractor.json"), "wb"
)
response = requests.get(url, stream=True)
# Define the size of each chunk
chunk_size = 1024
# Loop over the chunks and parse the JSON data
for chunk in tqdm.tqdm(response.iter_content(chunk_size=chunk_size)):
if chunk:
save_file.write(chunk)
except Exception as e:
if os.path.exists(dataset_full_path):
print(
"Dataset:", dataset, "not found at:", url, "Removing cached dir"
)
rmtree(dataset_full_path)
raise ValueError(f"could not download {dataset} dataset") from e
dataset_paths[dataset] = os.path.join(dataset_full_path, "dev_distractor.json")
print("Dataset:", dataset, "downloaded at:", dataset_full_path)
return dataset_paths
def run(
self,
query_engine: BaseQueryEngine,
queries: int = 10,
queries_fraction: Optional[float] = None,
show_result: bool = False,
) -> None:
dataset_paths = self._download_datasets()
dataset = "hotpot_dev_distractor"
dataset_path = dataset_paths[dataset]
print("Evaluating on dataset:", dataset)
print("-------------------------------------")
f = open(dataset_path)
query_objects = json.loads(f.read())
if queries_fraction:
queries_to_load = int(len(query_objects) * queries_fraction)
else:
queries_to_load = queries
queries_fraction = round(queries / len(query_objects), 5)
print(
f"Loading {queries_to_load} queries out of \
{len(query_objects)} (fraction: {queries_fraction})"
)
query_objects = query_objects[:queries_to_load]
assert isinstance(
query_engine, RetrieverQueryEngine
), "query_engine must be a RetrieverQueryEngine for this evaluation"
retriever = HotpotQARetriever(query_objects)
# Mock the query engine with a retriever
query_engine = query_engine.with_retriever(retriever=retriever)
scores = {"exact_match": 0.0, "f1": 0.0}
for query in query_objects:
query_bundle = QueryBundle(
query_str=query["question"]
+ " Give a short factoid answer (as few words as possible).",
custom_embedding_strs=[query["question"]],
)
response = query_engine.query(query_bundle)
em = int(
exact_match_score(
prediction=str(response), ground_truth=query["answer"]
)
)
f1, _, _ = f1_score(prediction=str(response), ground_truth=query["answer"])
scores["exact_match"] += em
scores["f1"] += f1
if show_result:
print("Question: ", query["question"])
print("Response:", response)
print("Correct answer: ", query["answer"])
print("EM:", em, "F1:", f1)
print("-------------------------------------")
for score in scores:
scores[score] /= len(query_objects)
print("Scores: ", scores)
class HotpotQARetriever(BaseRetriever):
"""
This is a mocked retriever for HotpotQA dataset. It is only meant to be used
with the hotpotqa dev dataset in the distractor setting. This is the setting that
does not require retrieval but requires identifying the supporting facts from
a list of 10 sources.
"""
def __init__(self, query_objects: Any) -> None:
assert isinstance(
query_objects,
list,
), f"query_objects must be a list, got: {type(query_objects)}"
self._queries = {}
for object in query_objects:
self._queries[object["question"]] = object
def _retrieve(self, query: QueryBundle) -> List[NodeWithScore]:
if query.custom_embedding_strs:
query_str = query.custom_embedding_strs[0]
else:
query_str = query.query_str
contexts = self._queries[query_str]["context"]
node_with_scores = []
for ctx in contexts:
text_list = ctx[1]
text = "\n".join(text_list)
node = TextNode(text=text, metadata={"title": ctx[0]})
node_with_scores.append(NodeWithScore(node=node, score=1.0))
return node_with_scores
def __str__(self) -> str:
return "HotpotQARetriever"
"""
Utils from https://github.com/hotpotqa/hotpot/blob/master/hotpot_evaluate_v1.py
"""
def normalize_answer(s: str) -> str:
def remove_articles(text: str) -> str:
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text: str) -> str:
return " ".join(text.split())
def remove_punc(text: str) -> str:
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text: str) -> str:
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction: str, ground_truth: str) -> Tuple[float, float, float]:
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
ZERO_METRIC = (0, 0, 0)
if (
normalized_prediction in ["yes", "no", "noanswer"]
and normalized_prediction != normalized_ground_truth
):
return ZERO_METRIC
if (
normalized_ground_truth in ["yes", "no", "noanswer"]
and normalized_prediction != normalized_ground_truth
):
return ZERO_METRIC
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return ZERO_METRIC
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1, precision, recall
def exact_match_score(prediction: str, ground_truth: str) -> bool:
return normalize_answer(prediction) == normalize_answer(ground_truth)
| 930264.py | [
"CWE-319: Cleartext Transmission of Sensitive Information"
] |
"""SQL wrapper around SQLDatabase in langchain."""
from typing import Any, Dict, Iterable, List, Optional, Tuple
from sqlalchemy import MetaData, create_engine, insert, inspect, text
from sqlalchemy.engine import Engine
from sqlalchemy.exc import OperationalError, ProgrammingError
class SQLDatabase:
"""SQL Database.
This class provides a wrapper around the SQLAlchemy engine to interact with a SQL
database.
It provides methods to execute SQL commands, insert data into tables, and retrieve
information about the database schema.
It also supports optional features such as including or excluding specific tables,
sampling rows for table info,
including indexes in table info, and supporting views.
Based on langchain SQLDatabase.
https://github.com/langchain-ai/langchain/blob/e355606b1100097665207ca259de6dc548d44c78/libs/langchain/langchain/utilities/sql_database.py#L39
Args:
engine (Engine): The SQLAlchemy engine instance to use for database operations.
schema (Optional[str]): The name of the schema to use, if any.
metadata (Optional[MetaData]): The metadata instance to use, if any.
ignore_tables (Optional[List[str]]): List of table names to ignore. If set,
include_tables must be None.
include_tables (Optional[List[str]]): List of table names to include. If set,
ignore_tables must be None.
sample_rows_in_table_info (int): The number of sample rows to include in table
info.
indexes_in_table_info (bool): Whether to include indexes in table info.
custom_table_info (Optional[dict]): Custom table info to use.
view_support (bool): Whether to support views.
max_string_length (int): The maximum string length to use.
"""
def __init__(
self,
engine: Engine,
schema: Optional[str] = None,
metadata: Optional[MetaData] = None,
ignore_tables: Optional[List[str]] = None,
include_tables: Optional[List[str]] = None,
sample_rows_in_table_info: int = 3,
indexes_in_table_info: bool = False,
custom_table_info: Optional[dict] = None,
view_support: bool = False,
max_string_length: int = 300,
):
"""Create engine from database URI."""
self._engine = engine
self._schema = schema
if include_tables and ignore_tables:
raise ValueError("Cannot specify both include_tables and ignore_tables")
self._inspector = inspect(self._engine)
# including view support by adding the views as well as tables to the all
# tables list if view_support is True
self._all_tables = set(
self._inspector.get_table_names(schema=schema)
+ (self._inspector.get_view_names(schema=schema) if view_support else [])
)
self._include_tables = set(include_tables) if include_tables else set()
if self._include_tables:
missing_tables = self._include_tables - self._all_tables
if missing_tables:
raise ValueError(
f"include_tables {missing_tables} not found in database"
)
self._ignore_tables = set(ignore_tables) if ignore_tables else set()
if self._ignore_tables:
missing_tables = self._ignore_tables - self._all_tables
if missing_tables:
raise ValueError(
f"ignore_tables {missing_tables} not found in database"
)
usable_tables = self.get_usable_table_names()
self._usable_tables = set(usable_tables) if usable_tables else self._all_tables
if not isinstance(sample_rows_in_table_info, int):
raise TypeError("sample_rows_in_table_info must be an integer")
self._sample_rows_in_table_info = sample_rows_in_table_info
self._indexes_in_table_info = indexes_in_table_info
self._custom_table_info = custom_table_info
if self._custom_table_info:
if not isinstance(self._custom_table_info, dict):
raise TypeError(
"table_info must be a dictionary with table names as keys and the "
"desired table info as values"
)
# only keep the tables that are also present in the database
intersection = set(self._custom_table_info).intersection(self._all_tables)
self._custom_table_info = {
table: info
for table, info in self._custom_table_info.items()
if table in intersection
}
self._max_string_length = max_string_length
self._metadata = metadata or MetaData()
# including view support if view_support = true
self._metadata.reflect(
views=view_support,
bind=self._engine,
only=list(self._usable_tables),
schema=self._schema,
)
@property
def engine(self) -> Engine:
"""Return SQL Alchemy engine."""
return self._engine
@property
def metadata_obj(self) -> MetaData:
"""Return SQL Alchemy metadata."""
return self._metadata
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> "SQLDatabase":
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
@property
def dialect(self) -> str:
"""Return string representation of dialect to use."""
return self._engine.dialect.name
def get_usable_table_names(self) -> Iterable[str]:
"""Get names of tables available."""
if self._include_tables:
return sorted(self._include_tables)
return sorted(self._all_tables - self._ignore_tables)
def get_table_columns(self, table_name: str) -> List[Any]:
"""Get table columns."""
return self._inspector.get_columns(table_name)
def get_single_table_info(self, table_name: str) -> str:
"""Get table info for a single table."""
# same logic as table_info, but with specific table names
template = "Table '{table_name}' has columns: {columns}, "
try:
# try to retrieve table comment
table_comment = self._inspector.get_table_comment(
table_name, schema=self._schema
)["text"]
if table_comment:
template += f"with comment: ({table_comment}) "
except NotImplementedError:
# get_table_comment raises NotImplementedError for a dialect that does not support comments.
pass
template += "{foreign_keys}."
columns = []
for column in self._inspector.get_columns(table_name, schema=self._schema):
if column.get("comment"):
columns.append(
f"{column['name']} ({column['type']!s}): "
f"'{column.get('comment')}'"
)
else:
columns.append(f"{column['name']} ({column['type']!s})")
column_str = ", ".join(columns)
foreign_keys = []
for foreign_key in self._inspector.get_foreign_keys(
table_name, schema=self._schema
):
foreign_keys.append(
f"{foreign_key['constrained_columns']} -> "
f"{foreign_key['referred_table']}.{foreign_key['referred_columns']}"
)
foreign_key_str = (
foreign_keys
and " and foreign keys: {}".format(", ".join(foreign_keys))
or ""
)
return template.format(
table_name=table_name, columns=column_str, foreign_keys=foreign_key_str
)
def insert_into_table(self, table_name: str, data: dict) -> None:
"""Insert data into a table."""
table = self._metadata.tables[table_name]
stmt = insert(table).values(**data)
with self._engine.begin() as connection:
connection.execute(stmt)
def truncate_word(self, content: Any, *, length: int, suffix: str = "...") -> str:
"""
Truncate a string to a certain number of words, based on the max string
length.
"""
if not isinstance(content, str) or length <= 0:
return content
if len(content) <= length:
return content
return content[: length - len(suffix)].rsplit(" ", 1)[0] + suffix
def run_sql(self, command: str) -> Tuple[str, Dict]:
"""Execute a SQL statement and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.begin() as connection:
try:
if self._schema:
command = command.replace("FROM ", f"FROM {self._schema}.")
command = command.replace("JOIN ", f"JOIN {self._schema}.")
cursor = connection.execute(text(command))
except (ProgrammingError, OperationalError) as exc:
raise NotImplementedError(
f"Statement {command!r} is invalid SQL."
) from exc
if cursor.returns_rows:
result = cursor.fetchall()
# truncate the results to the max string length
# we can't use str(result) directly because it automatically truncates long strings
truncated_results = []
for row in result:
# truncate each column, then convert the row to a tuple
truncated_row = tuple(
self.truncate_word(column, length=self._max_string_length)
for column in row
)
truncated_results.append(truncated_row)
return str(truncated_results), {
"result": truncated_results,
"col_keys": list(cursor.keys()),
}
return "", {}
| 885715.py | [
"CWE-89: Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')"
] |